1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 union buffer_array_entry
{
17 struct scatterlist
*sgl
;
18 dma_addr_t buffer_dma
;
22 unsigned int num_of_buffers
;
23 union buffer_array_entry entry
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
24 unsigned int offset
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
25 int nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
26 int total_data_len
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
27 bool is_last
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
28 u32
*mlli_nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
31 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type
)
46 * cc_copy_mac() - Copy MAC to temporary location
49 * @req: aead request object
50 * @dir: [IN] copy from/to sgl
52 static void cc_copy_mac(struct device
*dev
, struct aead_request
*req
,
53 enum cc_sg_cpy_direct dir
)
55 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
56 u32 skip
= req
->assoclen
+ req
->cryptlen
;
58 cc_copy_sg_portion(dev
, areq_ctx
->backup_mac
, req
->src
,
59 (skip
- areq_ctx
->req_authsize
), skip
, dir
);
63 * cc_get_sgl_nents() - Get scatterlist number of entries.
67 * @nbytes: [IN] Total SGL data bytes.
68 * @lbytes: [OUT] Returns the amount of bytes at the last entry
71 * Number of entries in the scatterlist
73 static unsigned int cc_get_sgl_nents(struct device
*dev
,
74 struct scatterlist
*sg_list
,
75 unsigned int nbytes
, u32
*lbytes
)
77 unsigned int nents
= 0;
81 while (nbytes
&& sg_list
) {
83 /* get the number of bytes in the last entry */
85 nbytes
-= (sg_list
->length
> nbytes
) ?
86 nbytes
: sg_list
->length
;
87 sg_list
= sg_next(sg_list
);
90 dev_dbg(dev
, "nents %d last bytes %d\n", nents
, *lbytes
);
95 * cc_copy_sg_portion() - Copy scatter list data,
96 * from to_skip to end, to dest and vice versa
99 * @dest: Buffer to copy to/from
101 * @to_skip: Number of bytes to skip before copying
102 * @end: Offset of last byte to copy
103 * @direct: Transfer direction (true == from SG list to buffer, false == from
106 void cc_copy_sg_portion(struct device
*dev
, u8
*dest
, struct scatterlist
*sg
,
107 u32 to_skip
, u32 end
, enum cc_sg_cpy_direct direct
)
111 nents
= sg_nents_for_len(sg
, end
);
112 sg_copy_buffer(sg
, nents
, dest
, (end
- to_skip
+ 1), to_skip
,
113 (direct
== CC_SG_TO_BUF
));
116 static int cc_render_buff_to_mlli(struct device
*dev
, dma_addr_t buff_dma
,
117 u32 buff_size
, u32
*curr_nents
,
120 u32
*mlli_entry_p
= *mlli_entry_pp
;
123 /* Verify there is no memory overflow*/
124 new_nents
= (*curr_nents
+ buff_size
/ CC_MAX_MLLI_ENTRY_SIZE
+ 1);
125 if (new_nents
> MAX_NUM_OF_TOTAL_MLLI_ENTRIES
) {
126 dev_err(dev
, "Too many mlli entries. current %d max %d\n",
127 new_nents
, MAX_NUM_OF_TOTAL_MLLI_ENTRIES
);
131 /*handle buffer longer than 64 kbytes */
132 while (buff_size
> CC_MAX_MLLI_ENTRY_SIZE
) {
133 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
134 cc_lli_set_size(mlli_entry_p
, CC_MAX_MLLI_ENTRY_SIZE
);
135 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
136 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
137 mlli_entry_p
[LLI_WORD1_OFFSET
]);
138 buff_dma
+= CC_MAX_MLLI_ENTRY_SIZE
;
139 buff_size
-= CC_MAX_MLLI_ENTRY_SIZE
;
140 mlli_entry_p
= mlli_entry_p
+ 2;
144 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
145 cc_lli_set_size(mlli_entry_p
, buff_size
);
146 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
147 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
148 mlli_entry_p
[LLI_WORD1_OFFSET
]);
149 mlli_entry_p
= mlli_entry_p
+ 2;
150 *mlli_entry_pp
= mlli_entry_p
;
155 static int cc_render_sg_to_mlli(struct device
*dev
, struct scatterlist
*sgl
,
156 u32 sgl_data_len
, u32 sgl_offset
,
157 u32
*curr_nents
, u32
**mlli_entry_pp
)
159 struct scatterlist
*curr_sgl
= sgl
;
160 u32
*mlli_entry_p
= *mlli_entry_pp
;
163 for ( ; (curr_sgl
&& sgl_data_len
);
164 curr_sgl
= sg_next(curr_sgl
)) {
166 (sgl_data_len
> sg_dma_len(curr_sgl
) - sgl_offset
) ?
167 sg_dma_len(curr_sgl
) - sgl_offset
:
169 sgl_data_len
-= entry_data_len
;
170 rc
= cc_render_buff_to_mlli(dev
, sg_dma_address(curr_sgl
) +
171 sgl_offset
, entry_data_len
,
172 curr_nents
, &mlli_entry_p
);
178 *mlli_entry_pp
= mlli_entry_p
;
182 static int cc_generate_mlli(struct device
*dev
, struct buffer_array
*sg_data
,
183 struct mlli_params
*mlli_params
, gfp_t flags
)
186 u32 total_nents
= 0, prev_total_nents
= 0;
189 dev_dbg(dev
, "NUM of SG's = %d\n", sg_data
->num_of_buffers
);
191 /* Allocate memory from the pointed pool */
192 mlli_params
->mlli_virt_addr
=
193 dma_pool_alloc(mlli_params
->curr_pool
, flags
,
194 &mlli_params
->mlli_dma_addr
);
195 if (!mlli_params
->mlli_virt_addr
) {
196 dev_err(dev
, "dma_pool_alloc() failed\n");
198 goto build_mlli_exit
;
200 /* Point to start of MLLI */
201 mlli_p
= mlli_params
->mlli_virt_addr
;
202 /* go over all SG's and link it to one MLLI table */
203 for (i
= 0; i
< sg_data
->num_of_buffers
; i
++) {
204 union buffer_array_entry
*entry
= &sg_data
->entry
[i
];
205 u32 tot_len
= sg_data
->total_data_len
[i
];
206 u32 offset
= sg_data
->offset
[i
];
208 rc
= cc_render_sg_to_mlli(dev
, entry
->sgl
, tot_len
, offset
,
209 &total_nents
, &mlli_p
);
213 /* set last bit in the current table */
214 if (sg_data
->mlli_nents
[i
]) {
215 /*Calculate the current MLLI table length for the
216 *length field in the descriptor
218 *sg_data
->mlli_nents
[i
] +=
219 (total_nents
- prev_total_nents
);
220 prev_total_nents
= total_nents
;
224 /* Set MLLI size for the bypass operation */
225 mlli_params
->mlli_len
= (total_nents
* LLI_ENTRY_BYTE_SIZE
);
227 dev_dbg(dev
, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
228 mlli_params
->mlli_virt_addr
, &mlli_params
->mlli_dma_addr
,
229 mlli_params
->mlli_len
);
235 static void cc_add_sg_entry(struct device
*dev
, struct buffer_array
*sgl_data
,
236 unsigned int nents
, struct scatterlist
*sgl
,
237 unsigned int data_len
, unsigned int data_offset
,
238 bool is_last_table
, u32
*mlli_nents
)
240 unsigned int index
= sgl_data
->num_of_buffers
;
242 dev_dbg(dev
, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
243 index
, nents
, sgl
, data_len
, is_last_table
);
244 sgl_data
->nents
[index
] = nents
;
245 sgl_data
->entry
[index
].sgl
= sgl
;
246 sgl_data
->offset
[index
] = data_offset
;
247 sgl_data
->total_data_len
[index
] = data_len
;
248 sgl_data
->is_last
[index
] = is_last_table
;
249 sgl_data
->mlli_nents
[index
] = mlli_nents
;
250 if (sgl_data
->mlli_nents
[index
])
251 *sgl_data
->mlli_nents
[index
] = 0;
252 sgl_data
->num_of_buffers
++;
255 static int cc_map_sg(struct device
*dev
, struct scatterlist
*sg
,
256 unsigned int nbytes
, int direction
, u32
*nents
,
257 u32 max_sg_nents
, u32
*lbytes
, u32
*mapped_nents
)
261 *nents
= cc_get_sgl_nents(dev
, sg
, nbytes
, lbytes
);
262 if (*nents
> max_sg_nents
) {
264 dev_err(dev
, "Too many fragments. current %d max %d\n",
265 *nents
, max_sg_nents
);
269 ret
= dma_map_sg(dev
, sg
, *nents
, direction
);
270 if (dma_mapping_error(dev
, ret
)) {
272 dev_err(dev
, "dma_map_sg() sg buffer failed %d\n", ret
);
282 cc_set_aead_conf_buf(struct device
*dev
, struct aead_req_ctx
*areq_ctx
,
283 u8
*config_data
, struct buffer_array
*sg_data
,
284 unsigned int assoclen
)
286 dev_dbg(dev
, " handle additional data config set to DLLI\n");
287 /* create sg for the current buffer */
288 sg_init_one(&areq_ctx
->ccm_adata_sg
, config_data
,
289 AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
);
290 if (dma_map_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
) != 1) {
291 dev_err(dev
, "dma_map_sg() config buffer failed\n");
294 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
295 &sg_dma_address(&areq_ctx
->ccm_adata_sg
),
296 sg_page(&areq_ctx
->ccm_adata_sg
),
297 sg_virt(&areq_ctx
->ccm_adata_sg
),
298 areq_ctx
->ccm_adata_sg
.offset
, areq_ctx
->ccm_adata_sg
.length
);
299 /* prepare for case of MLLI */
301 cc_add_sg_entry(dev
, sg_data
, 1, &areq_ctx
->ccm_adata_sg
,
302 (AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
),
308 static int cc_set_hash_buf(struct device
*dev
, struct ahash_req_ctx
*areq_ctx
,
309 u8
*curr_buff
, u32 curr_buff_cnt
,
310 struct buffer_array
*sg_data
)
312 dev_dbg(dev
, " handle curr buff %x set to DLLI\n", curr_buff_cnt
);
313 /* create sg for the current buffer */
314 sg_init_one(areq_ctx
->buff_sg
, curr_buff
, curr_buff_cnt
);
315 if (dma_map_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
) != 1) {
316 dev_err(dev
, "dma_map_sg() src buffer failed\n");
319 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
320 &sg_dma_address(areq_ctx
->buff_sg
), sg_page(areq_ctx
->buff_sg
),
321 sg_virt(areq_ctx
->buff_sg
), areq_ctx
->buff_sg
->offset
,
322 areq_ctx
->buff_sg
->length
);
323 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
324 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
325 areq_ctx
->in_nents
= 0;
326 /* prepare for case of MLLI */
327 cc_add_sg_entry(dev
, sg_data
, 1, areq_ctx
->buff_sg
, curr_buff_cnt
, 0,
332 void cc_unmap_cipher_request(struct device
*dev
, void *ctx
,
333 unsigned int ivsize
, struct scatterlist
*src
,
334 struct scatterlist
*dst
)
336 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
338 if (req_ctx
->gen_ctx
.iv_dma_addr
) {
339 dev_dbg(dev
, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
340 &req_ctx
->gen_ctx
.iv_dma_addr
, ivsize
);
341 dma_unmap_single(dev
, req_ctx
->gen_ctx
.iv_dma_addr
,
342 ivsize
, DMA_BIDIRECTIONAL
);
345 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
&&
346 req_ctx
->mlli_params
.mlli_virt_addr
) {
347 dma_pool_free(req_ctx
->mlli_params
.curr_pool
,
348 req_ctx
->mlli_params
.mlli_virt_addr
,
349 req_ctx
->mlli_params
.mlli_dma_addr
);
352 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
, DMA_BIDIRECTIONAL
);
353 dev_dbg(dev
, "Unmapped req->src=%pK\n", sg_virt(src
));
356 dma_unmap_sg(dev
, dst
, req_ctx
->out_nents
, DMA_BIDIRECTIONAL
);
357 dev_dbg(dev
, "Unmapped req->dst=%pK\n", sg_virt(dst
));
361 int cc_map_cipher_request(struct cc_drvdata
*drvdata
, void *ctx
,
362 unsigned int ivsize
, unsigned int nbytes
,
363 void *info
, struct scatterlist
*src
,
364 struct scatterlist
*dst
, gfp_t flags
)
366 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
367 struct mlli_params
*mlli_params
= &req_ctx
->mlli_params
;
368 struct device
*dev
= drvdata_to_dev(drvdata
);
369 struct buffer_array sg_data
;
372 u32 mapped_nents
= 0;
374 req_ctx
->dma_buf_type
= CC_DMA_BUF_DLLI
;
375 mlli_params
->curr_pool
= NULL
;
376 sg_data
.num_of_buffers
= 0;
380 dump_byte_array("iv", info
, ivsize
);
381 req_ctx
->gen_ctx
.iv_dma_addr
=
382 dma_map_single(dev
, info
, ivsize
, DMA_BIDIRECTIONAL
);
383 if (dma_mapping_error(dev
, req_ctx
->gen_ctx
.iv_dma_addr
)) {
384 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
388 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
389 ivsize
, info
, &req_ctx
->gen_ctx
.iv_dma_addr
);
391 req_ctx
->gen_ctx
.iv_dma_addr
= 0;
394 /* Map the src SGL */
395 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_BIDIRECTIONAL
, &req_ctx
->in_nents
,
396 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
399 if (mapped_nents
> 1)
400 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
403 /* Handle inplace operation */
404 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
405 req_ctx
->out_nents
= 0;
406 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
408 &req_ctx
->in_mlli_nents
);
412 rc
= cc_map_sg(dev
, dst
, nbytes
, DMA_BIDIRECTIONAL
,
413 &req_ctx
->out_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
414 &dummy
, &mapped_nents
);
417 if (mapped_nents
> 1)
418 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
420 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
421 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
423 &req_ctx
->in_mlli_nents
);
424 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->out_nents
, dst
,
426 &req_ctx
->out_mlli_nents
);
430 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
431 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
432 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
437 dev_dbg(dev
, "areq_ctx->dma_buf_type = %s\n",
438 cc_dma_buf_type(req_ctx
->dma_buf_type
));
443 cc_unmap_cipher_request(dev
, req_ctx
, ivsize
, src
, dst
);
447 void cc_unmap_aead_request(struct device
*dev
, struct aead_request
*req
)
449 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
450 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
451 struct cc_drvdata
*drvdata
= dev_get_drvdata(dev
);
453 if (areq_ctx
->mac_buf_dma_addr
) {
454 dma_unmap_single(dev
, areq_ctx
->mac_buf_dma_addr
,
455 MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
458 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
459 if (areq_ctx
->hkey_dma_addr
) {
460 dma_unmap_single(dev
, areq_ctx
->hkey_dma_addr
,
461 AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
464 if (areq_ctx
->gcm_block_len_dma_addr
) {
465 dma_unmap_single(dev
, areq_ctx
->gcm_block_len_dma_addr
,
466 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
469 if (areq_ctx
->gcm_iv_inc1_dma_addr
) {
470 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
,
471 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
474 if (areq_ctx
->gcm_iv_inc2_dma_addr
) {
475 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
,
476 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
480 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
481 if (areq_ctx
->ccm_iv0_dma_addr
) {
482 dma_unmap_single(dev
, areq_ctx
->ccm_iv0_dma_addr
,
483 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
486 dma_unmap_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
);
488 if (areq_ctx
->gen_ctx
.iv_dma_addr
) {
489 dma_unmap_single(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
,
490 hw_iv_size
, DMA_BIDIRECTIONAL
);
491 kzfree(areq_ctx
->gen_ctx
.iv
);
495 if ((areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
496 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) &&
497 (areq_ctx
->mlli_params
.mlli_virt_addr
)) {
498 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
499 &areq_ctx
->mlli_params
.mlli_dma_addr
,
500 areq_ctx
->mlli_params
.mlli_virt_addr
);
501 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
502 areq_ctx
->mlli_params
.mlli_virt_addr
,
503 areq_ctx
->mlli_params
.mlli_dma_addr
);
506 dev_dbg(dev
, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
507 sg_virt(req
->src
), areq_ctx
->src
.nents
, areq_ctx
->assoc
.nents
,
508 areq_ctx
->assoclen
, req
->cryptlen
);
510 dma_unmap_sg(dev
, req
->src
, areq_ctx
->src
.mapped_nents
,
512 if (req
->src
!= req
->dst
) {
513 dev_dbg(dev
, "Unmapping dst sgl: req->dst=%pK\n",
515 dma_unmap_sg(dev
, req
->dst
, areq_ctx
->dst
.mapped_nents
,
518 if (drvdata
->coherent
&&
519 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
520 req
->src
== req
->dst
) {
521 /* copy back mac from temporary location to deal with possible
522 * data memory overriding that caused by cache coherence
525 cc_copy_mac(dev
, req
, CC_SG_FROM_BUF
);
529 static bool cc_is_icv_frag(unsigned int sgl_nents
, unsigned int authsize
,
530 u32 last_entry_data_size
)
532 return ((sgl_nents
> 1) && (last_entry_data_size
< authsize
));
535 static int cc_aead_chain_iv(struct cc_drvdata
*drvdata
,
536 struct aead_request
*req
,
537 struct buffer_array
*sg_data
,
538 bool is_last
, bool do_chain
)
540 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
541 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
542 struct device
*dev
= drvdata_to_dev(drvdata
);
543 gfp_t flags
= cc_gfp_flags(&req
->base
);
547 areq_ctx
->gen_ctx
.iv_dma_addr
= 0;
548 areq_ctx
->gen_ctx
.iv
= NULL
;
552 areq_ctx
->gen_ctx
.iv
= kmemdup(req
->iv
, hw_iv_size
, flags
);
553 if (!areq_ctx
->gen_ctx
.iv
)
556 areq_ctx
->gen_ctx
.iv_dma_addr
=
557 dma_map_single(dev
, areq_ctx
->gen_ctx
.iv
, hw_iv_size
,
559 if (dma_mapping_error(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
)) {
560 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
561 hw_iv_size
, req
->iv
);
562 kzfree(areq_ctx
->gen_ctx
.iv
);
563 areq_ctx
->gen_ctx
.iv
= NULL
;
568 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
569 hw_iv_size
, req
->iv
, &areq_ctx
->gen_ctx
.iv_dma_addr
);
575 static int cc_aead_chain_assoc(struct cc_drvdata
*drvdata
,
576 struct aead_request
*req
,
577 struct buffer_array
*sg_data
,
578 bool is_last
, bool do_chain
)
580 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
582 int mapped_nents
= 0;
583 struct device
*dev
= drvdata_to_dev(drvdata
);
587 goto chain_assoc_exit
;
590 if (areq_ctx
->assoclen
== 0) {
591 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_NULL
;
592 areq_ctx
->assoc
.nents
= 0;
593 areq_ctx
->assoc
.mlli_nents
= 0;
594 dev_dbg(dev
, "Chain assoc of length 0: buff_type=%s nents=%u\n",
595 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
596 areq_ctx
->assoc
.nents
);
597 goto chain_assoc_exit
;
600 mapped_nents
= sg_nents_for_len(req
->src
, areq_ctx
->assoclen
);
601 if (mapped_nents
< 0)
604 if (mapped_nents
> LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
605 dev_err(dev
, "Too many fragments. current %d max %d\n",
606 mapped_nents
, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
609 areq_ctx
->assoc
.nents
= mapped_nents
;
611 /* in CCM case we have additional entry for
612 * ccm header configurations
614 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
615 if ((mapped_nents
+ 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
616 dev_err(dev
, "CCM case.Too many fragments. Current %d max %d\n",
617 (areq_ctx
->assoc
.nents
+ 1),
618 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
620 goto chain_assoc_exit
;
624 if (mapped_nents
== 1 && areq_ctx
->ccm_hdr_size
== ccm_header_size_null
)
625 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_DLLI
;
627 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
629 if (do_chain
|| areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
630 dev_dbg(dev
, "Chain assoc: buff_type=%s nents=%u\n",
631 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
632 areq_ctx
->assoc
.nents
);
633 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->assoc
.nents
, req
->src
,
634 areq_ctx
->assoclen
, 0, is_last
,
635 &areq_ctx
->assoc
.mlli_nents
);
636 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
643 static void cc_prepare_aead_data_dlli(struct aead_request
*req
,
644 u32
*src_last_bytes
, u32
*dst_last_bytes
)
646 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
647 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
648 unsigned int authsize
= areq_ctx
->req_authsize
;
649 struct scatterlist
*sg
;
652 areq_ctx
->is_icv_fragmented
= false;
654 if ((req
->src
== req
->dst
) || direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
655 sg
= areq_ctx
->src_sgl
;
656 offset
= *src_last_bytes
- authsize
;
658 sg
= areq_ctx
->dst_sgl
;
659 offset
= *dst_last_bytes
- authsize
;
662 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) + offset
;
663 areq_ctx
->icv_virt_addr
= sg_virt(sg
) + offset
;
666 static void cc_prepare_aead_data_mlli(struct cc_drvdata
*drvdata
,
667 struct aead_request
*req
,
668 struct buffer_array
*sg_data
,
669 u32
*src_last_bytes
, u32
*dst_last_bytes
,
672 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
673 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
674 unsigned int authsize
= areq_ctx
->req_authsize
;
675 struct device
*dev
= drvdata_to_dev(drvdata
);
676 struct scatterlist
*sg
;
678 if (req
->src
== req
->dst
) {
680 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
681 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
682 areq_ctx
->src_offset
, is_last_table
,
683 &areq_ctx
->src
.mlli_nents
);
685 areq_ctx
->is_icv_fragmented
=
686 cc_is_icv_frag(areq_ctx
->src
.nents
, authsize
,
689 if (areq_ctx
->is_icv_fragmented
) {
690 /* Backup happens only when ICV is fragmented, ICV
691 * verification is made by CPU compare in order to
692 * simplify MAC verification upon request completion
694 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
695 /* In coherent platforms (e.g. ACP)
696 * already copying ICV for any
697 * INPLACE-DECRYPT operation, hence
698 * we must neglect this code.
700 if (!drvdata
->coherent
)
701 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
703 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
705 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
706 areq_ctx
->icv_dma_addr
=
707 areq_ctx
->mac_buf_dma_addr
;
709 } else { /* Contig. ICV */
710 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
711 /*Should hanlde if the sg is not contig.*/
712 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
713 (*src_last_bytes
- authsize
);
714 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
715 (*src_last_bytes
- authsize
);
718 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
719 /*NON-INPLACE and DECRYPT*/
720 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
721 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
722 areq_ctx
->src_offset
, is_last_table
,
723 &areq_ctx
->src
.mlli_nents
);
724 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
725 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
726 areq_ctx
->dst_offset
, is_last_table
,
727 &areq_ctx
->dst
.mlli_nents
);
729 areq_ctx
->is_icv_fragmented
=
730 cc_is_icv_frag(areq_ctx
->src
.nents
, authsize
,
732 /* Backup happens only when ICV is fragmented, ICV
734 * verification is made by CPU compare in order to simplify
735 * MAC verification upon request completion
737 if (areq_ctx
->is_icv_fragmented
) {
738 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
739 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
741 } else { /* Contig. ICV */
742 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
743 /*Should hanlde if the sg is not contig.*/
744 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
745 (*src_last_bytes
- authsize
);
746 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
747 (*src_last_bytes
- authsize
);
751 /*NON-INPLACE and ENCRYPT*/
752 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
753 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
754 areq_ctx
->dst_offset
, is_last_table
,
755 &areq_ctx
->dst
.mlli_nents
);
756 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
757 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
758 areq_ctx
->src_offset
, is_last_table
,
759 &areq_ctx
->src
.mlli_nents
);
761 areq_ctx
->is_icv_fragmented
=
762 cc_is_icv_frag(areq_ctx
->dst
.nents
, authsize
,
765 if (!areq_ctx
->is_icv_fragmented
) {
766 sg
= &areq_ctx
->dst_sgl
[areq_ctx
->dst
.nents
- 1];
768 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
769 (*dst_last_bytes
- authsize
);
770 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
771 (*dst_last_bytes
- authsize
);
773 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
774 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
779 static int cc_aead_chain_data(struct cc_drvdata
*drvdata
,
780 struct aead_request
*req
,
781 struct buffer_array
*sg_data
,
782 bool is_last_table
, bool do_chain
)
784 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
785 struct device
*dev
= drvdata_to_dev(drvdata
);
786 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
787 unsigned int authsize
= areq_ctx
->req_authsize
;
788 unsigned int src_last_bytes
= 0, dst_last_bytes
= 0;
790 u32 src_mapped_nents
= 0, dst_mapped_nents
= 0;
792 /* non-inplace mode */
793 unsigned int size_for_map
= req
->assoclen
+ req
->cryptlen
;
795 u32 size_to_skip
= req
->assoclen
;
796 struct scatterlist
*sgl
;
798 offset
= size_to_skip
;
803 areq_ctx
->src_sgl
= req
->src
;
804 areq_ctx
->dst_sgl
= req
->dst
;
806 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
808 src_mapped_nents
= cc_get_sgl_nents(dev
, req
->src
, size_for_map
,
810 sg_index
= areq_ctx
->src_sgl
->length
;
811 //check where the data starts
812 while (src_mapped_nents
&& (sg_index
<= size_to_skip
)) {
814 offset
-= areq_ctx
->src_sgl
->length
;
815 sgl
= sg_next(areq_ctx
->src_sgl
);
818 areq_ctx
->src_sgl
= sgl
;
819 sg_index
+= areq_ctx
->src_sgl
->length
;
821 if (src_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
822 dev_err(dev
, "Too many fragments. current %d max %d\n",
823 src_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
827 areq_ctx
->src
.nents
= src_mapped_nents
;
829 areq_ctx
->src_offset
= offset
;
831 if (req
->src
!= req
->dst
) {
832 size_for_map
= req
->assoclen
+ req
->cryptlen
;
834 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
835 size_for_map
+= authsize
;
837 size_for_map
-= authsize
;
839 rc
= cc_map_sg(dev
, req
->dst
, size_for_map
, DMA_BIDIRECTIONAL
,
840 &areq_ctx
->dst
.mapped_nents
,
841 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dst_last_bytes
,
844 goto chain_data_exit
;
847 dst_mapped_nents
= cc_get_sgl_nents(dev
, req
->dst
, size_for_map
,
849 sg_index
= areq_ctx
->dst_sgl
->length
;
850 offset
= size_to_skip
;
852 //check where the data starts
853 while (dst_mapped_nents
&& sg_index
<= size_to_skip
) {
855 offset
-= areq_ctx
->dst_sgl
->length
;
856 sgl
= sg_next(areq_ctx
->dst_sgl
);
859 areq_ctx
->dst_sgl
= sgl
;
860 sg_index
+= areq_ctx
->dst_sgl
->length
;
862 if (dst_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
863 dev_err(dev
, "Too many fragments. current %d max %d\n",
864 dst_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
867 areq_ctx
->dst
.nents
= dst_mapped_nents
;
868 areq_ctx
->dst_offset
= offset
;
869 if (src_mapped_nents
> 1 ||
870 dst_mapped_nents
> 1 ||
872 areq_ctx
->data_buff_type
= CC_DMA_BUF_MLLI
;
873 cc_prepare_aead_data_mlli(drvdata
, req
, sg_data
,
874 &src_last_bytes
, &dst_last_bytes
,
877 areq_ctx
->data_buff_type
= CC_DMA_BUF_DLLI
;
878 cc_prepare_aead_data_dlli(req
, &src_last_bytes
,
886 static void cc_update_aead_mlli_nents(struct cc_drvdata
*drvdata
,
887 struct aead_request
*req
)
889 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
890 u32 curr_mlli_size
= 0;
892 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
893 areq_ctx
->assoc
.sram_addr
= drvdata
->mlli_sram_addr
;
894 curr_mlli_size
= areq_ctx
->assoc
.mlli_nents
*
898 if (areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
899 /*Inplace case dst nents equal to src nents*/
900 if (req
->src
== req
->dst
) {
901 areq_ctx
->dst
.mlli_nents
= areq_ctx
->src
.mlli_nents
;
902 areq_ctx
->src
.sram_addr
= drvdata
->mlli_sram_addr
+
904 areq_ctx
->dst
.sram_addr
= areq_ctx
->src
.sram_addr
;
905 if (!areq_ctx
->is_single_pass
)
906 areq_ctx
->assoc
.mlli_nents
+=
907 areq_ctx
->src
.mlli_nents
;
909 if (areq_ctx
->gen_ctx
.op_type
==
910 DRV_CRYPTO_DIRECTION_DECRYPT
) {
911 areq_ctx
->src
.sram_addr
=
912 drvdata
->mlli_sram_addr
+
914 areq_ctx
->dst
.sram_addr
=
915 areq_ctx
->src
.sram_addr
+
916 areq_ctx
->src
.mlli_nents
*
918 if (!areq_ctx
->is_single_pass
)
919 areq_ctx
->assoc
.mlli_nents
+=
920 areq_ctx
->src
.mlli_nents
;
922 areq_ctx
->dst
.sram_addr
=
923 drvdata
->mlli_sram_addr
+
925 areq_ctx
->src
.sram_addr
=
926 areq_ctx
->dst
.sram_addr
+
927 areq_ctx
->dst
.mlli_nents
*
929 if (!areq_ctx
->is_single_pass
)
930 areq_ctx
->assoc
.mlli_nents
+=
931 areq_ctx
->dst
.mlli_nents
;
937 int cc_map_aead_request(struct cc_drvdata
*drvdata
, struct aead_request
*req
)
939 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
940 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
941 struct device
*dev
= drvdata_to_dev(drvdata
);
942 struct buffer_array sg_data
;
943 unsigned int authsize
= areq_ctx
->req_authsize
;
946 u32 mapped_nents
= 0;
947 u32 dummy
= 0; /*used for the assoc data fragments */
949 gfp_t flags
= cc_gfp_flags(&req
->base
);
951 mlli_params
->curr_pool
= NULL
;
952 sg_data
.num_of_buffers
= 0;
954 /* copy mac to a temporary location to deal with possible
955 * data memory overriding that caused by cache coherence problem.
957 if (drvdata
->coherent
&&
958 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
959 req
->src
== req
->dst
)
960 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
962 /* cacluate the size for cipher remove ICV in decrypt*/
963 areq_ctx
->cryptlen
= (areq_ctx
->gen_ctx
.op_type
==
964 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
966 (req
->cryptlen
- authsize
);
968 dma_addr
= dma_map_single(dev
, areq_ctx
->mac_buf
, MAX_MAC_SIZE
,
970 if (dma_mapping_error(dev
, dma_addr
)) {
971 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
972 MAX_MAC_SIZE
, areq_ctx
->mac_buf
);
974 goto aead_map_failure
;
976 areq_ctx
->mac_buf_dma_addr
= dma_addr
;
978 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
979 void *addr
= areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
981 dma_addr
= dma_map_single(dev
, addr
, AES_BLOCK_SIZE
,
984 if (dma_mapping_error(dev
, dma_addr
)) {
985 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
986 AES_BLOCK_SIZE
, addr
);
987 areq_ctx
->ccm_iv0_dma_addr
= 0;
989 goto aead_map_failure
;
991 areq_ctx
->ccm_iv0_dma_addr
= dma_addr
;
993 rc
= cc_set_aead_conf_buf(dev
, areq_ctx
, areq_ctx
->ccm_config
,
994 &sg_data
, areq_ctx
->assoclen
);
996 goto aead_map_failure
;
999 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1000 dma_addr
= dma_map_single(dev
, areq_ctx
->hkey
, AES_BLOCK_SIZE
,
1002 if (dma_mapping_error(dev
, dma_addr
)) {
1003 dev_err(dev
, "Mapping hkey %u B at va=%pK for DMA failed\n",
1004 AES_BLOCK_SIZE
, areq_ctx
->hkey
);
1006 goto aead_map_failure
;
1008 areq_ctx
->hkey_dma_addr
= dma_addr
;
1010 dma_addr
= dma_map_single(dev
, &areq_ctx
->gcm_len_block
,
1011 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1012 if (dma_mapping_error(dev
, dma_addr
)) {
1013 dev_err(dev
, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1014 AES_BLOCK_SIZE
, &areq_ctx
->gcm_len_block
);
1016 goto aead_map_failure
;
1018 areq_ctx
->gcm_block_len_dma_addr
= dma_addr
;
1020 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc1
,
1021 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1023 if (dma_mapping_error(dev
, dma_addr
)) {
1024 dev_err(dev
, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1025 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc1
));
1026 areq_ctx
->gcm_iv_inc1_dma_addr
= 0;
1028 goto aead_map_failure
;
1030 areq_ctx
->gcm_iv_inc1_dma_addr
= dma_addr
;
1032 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc2
,
1033 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1035 if (dma_mapping_error(dev
, dma_addr
)) {
1036 dev_err(dev
, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1037 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc2
));
1038 areq_ctx
->gcm_iv_inc2_dma_addr
= 0;
1040 goto aead_map_failure
;
1042 areq_ctx
->gcm_iv_inc2_dma_addr
= dma_addr
;
1045 size_to_map
= req
->cryptlen
+ req
->assoclen
;
1046 /* If we do in-place encryption, we also need the auth tag */
1047 if ((areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
) &&
1048 (req
->src
== req
->dst
)) {
1049 size_to_map
+= authsize
;
1052 rc
= cc_map_sg(dev
, req
->src
, size_to_map
, DMA_BIDIRECTIONAL
,
1053 &areq_ctx
->src
.mapped_nents
,
1054 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
+
1055 LLI_MAX_NUM_OF_DATA_ENTRIES
),
1056 &dummy
, &mapped_nents
);
1058 goto aead_map_failure
;
1060 if (areq_ctx
->is_single_pass
) {
1062 * Create MLLI table for:
1065 * Note: IV is contg. buffer (not an SGL)
1067 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, true, false);
1069 goto aead_map_failure
;
1070 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, true, false);
1072 goto aead_map_failure
;
1073 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, false);
1075 goto aead_map_failure
;
1076 } else { /* DOUBLE-PASS flow */
1078 * Prepare MLLI table(s) in this order:
1080 * If ENCRYPT/DECRYPT (inplace):
1081 * (1) MLLI table for assoc
1082 * (2) IV entry (chained right after end of assoc)
1083 * (3) MLLI for src/dst (inplace operation)
1085 * If ENCRYPT (non-inplace)
1086 * (1) MLLI table for assoc
1087 * (2) IV entry (chained right after end of assoc)
1091 * If DECRYPT (non-inplace)
1092 * (1) MLLI table for assoc
1093 * (2) IV entry (chained right after end of assoc)
1097 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, false, true);
1099 goto aead_map_failure
;
1100 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, false, true);
1102 goto aead_map_failure
;
1103 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, true);
1105 goto aead_map_failure
;
1108 /* Mlli support -start building the MLLI according to the above
1111 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1112 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1113 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
1114 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1116 goto aead_map_failure
;
1118 cc_update_aead_mlli_nents(drvdata
, req
);
1119 dev_dbg(dev
, "assoc params mn %d\n",
1120 areq_ctx
->assoc
.mlli_nents
);
1121 dev_dbg(dev
, "src params mn %d\n", areq_ctx
->src
.mlli_nents
);
1122 dev_dbg(dev
, "dst params mn %d\n", areq_ctx
->dst
.mlli_nents
);
1127 cc_unmap_aead_request(dev
, req
);
1131 int cc_map_hash_request_final(struct cc_drvdata
*drvdata
, void *ctx
,
1132 struct scatterlist
*src
, unsigned int nbytes
,
1133 bool do_update
, gfp_t flags
)
1135 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1136 struct device
*dev
= drvdata_to_dev(drvdata
);
1137 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1138 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1139 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1140 struct buffer_array sg_data
;
1143 u32 mapped_nents
= 0;
1145 dev_dbg(dev
, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1146 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1147 /* Init the type of the dma buffer */
1148 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1149 mlli_params
->curr_pool
= NULL
;
1150 sg_data
.num_of_buffers
= 0;
1151 areq_ctx
->in_nents
= 0;
1153 if (nbytes
== 0 && *curr_buff_cnt
== 0) {
1158 /* map the previous buffer */
1159 if (*curr_buff_cnt
) {
1160 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1166 if (src
&& nbytes
> 0 && do_update
) {
1167 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_TO_DEVICE
,
1168 &areq_ctx
->in_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
1169 &dummy
, &mapped_nents
);
1171 goto unmap_curr_buff
;
1172 if (src
&& mapped_nents
== 1 &&
1173 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1174 memcpy(areq_ctx
->buff_sg
, src
,
1175 sizeof(struct scatterlist
));
1176 areq_ctx
->buff_sg
->length
= nbytes
;
1177 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1178 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1180 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1185 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1186 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
1187 /* add the src data to the sg_data */
1188 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
, nbytes
,
1189 0, true, &areq_ctx
->mlli_nents
);
1190 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1192 goto fail_unmap_din
;
1194 /* change the buffer index for the unmap function */
1195 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ 1);
1196 dev_dbg(dev
, "areq_ctx->data_dma_buf_type = %s\n",
1197 cc_dma_buf_type(areq_ctx
->data_dma_buf_type
));
1201 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1205 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1210 int cc_map_hash_request_update(struct cc_drvdata
*drvdata
, void *ctx
,
1211 struct scatterlist
*src
, unsigned int nbytes
,
1212 unsigned int block_size
, gfp_t flags
)
1214 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1215 struct device
*dev
= drvdata_to_dev(drvdata
);
1216 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1217 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1218 u8
*next_buff
= cc_next_buf(areq_ctx
);
1219 u32
*next_buff_cnt
= cc_next_buf_cnt(areq_ctx
);
1220 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1221 unsigned int update_data_len
;
1222 u32 total_in_len
= nbytes
+ *curr_buff_cnt
;
1223 struct buffer_array sg_data
;
1224 unsigned int swap_index
= 0;
1227 u32 mapped_nents
= 0;
1229 dev_dbg(dev
, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1230 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1231 /* Init the type of the dma buffer */
1232 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1233 mlli_params
->curr_pool
= NULL
;
1234 areq_ctx
->curr_sg
= NULL
;
1235 sg_data
.num_of_buffers
= 0;
1236 areq_ctx
->in_nents
= 0;
1238 if (total_in_len
< block_size
) {
1239 dev_dbg(dev
, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1240 curr_buff
, *curr_buff_cnt
, &curr_buff
[*curr_buff_cnt
]);
1241 areq_ctx
->in_nents
= sg_nents_for_len(src
, nbytes
);
1242 sg_copy_to_buffer(src
, areq_ctx
->in_nents
,
1243 &curr_buff
[*curr_buff_cnt
], nbytes
);
1244 *curr_buff_cnt
+= nbytes
;
1248 /* Calculate the residue size*/
1249 *next_buff_cnt
= total_in_len
& (block_size
- 1);
1250 /* update data len */
1251 update_data_len
= total_in_len
- *next_buff_cnt
;
1253 dev_dbg(dev
, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1254 *next_buff_cnt
, update_data_len
);
1256 /* Copy the new residue to next buffer */
1257 if (*next_buff_cnt
) {
1258 dev_dbg(dev
, " handle residue: next buff %pK skip data %u residue %u\n",
1259 next_buff
, (update_data_len
- *curr_buff_cnt
),
1261 cc_copy_sg_portion(dev
, next_buff
, src
,
1262 (update_data_len
- *curr_buff_cnt
),
1263 nbytes
, CC_SG_TO_BUF
);
1264 /* change the buffer index for next operation */
1268 if (*curr_buff_cnt
) {
1269 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1273 /* change the buffer index for next operation */
1277 if (update_data_len
> *curr_buff_cnt
) {
1278 rc
= cc_map_sg(dev
, src
, (update_data_len
- *curr_buff_cnt
),
1279 DMA_TO_DEVICE
, &areq_ctx
->in_nents
,
1280 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
,
1283 goto unmap_curr_buff
;
1284 if (mapped_nents
== 1 &&
1285 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1286 /* only one entry in the SG and no previous data */
1287 memcpy(areq_ctx
->buff_sg
, src
,
1288 sizeof(struct scatterlist
));
1289 areq_ctx
->buff_sg
->length
= update_data_len
;
1290 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1291 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1293 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1297 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1298 mlli_params
->curr_pool
= drvdata
->mlli_buffs_pool
;
1299 /* add the src data to the sg_data */
1300 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
,
1301 (update_data_len
- *curr_buff_cnt
), 0, true,
1302 &areq_ctx
->mlli_nents
);
1303 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1305 goto fail_unmap_din
;
1307 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ swap_index
);
1312 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1316 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1321 void cc_unmap_hash_request(struct device
*dev
, void *ctx
,
1322 struct scatterlist
*src
, bool do_revert
)
1324 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1325 u32
*prev_len
= cc_next_buf_cnt(areq_ctx
);
1327 /*In case a pool was set, a table was
1328 *allocated and should be released
1330 if (areq_ctx
->mlli_params
.curr_pool
) {
1331 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
1332 &areq_ctx
->mlli_params
.mlli_dma_addr
,
1333 areq_ctx
->mlli_params
.mlli_virt_addr
);
1334 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
1335 areq_ctx
->mlli_params
.mlli_virt_addr
,
1336 areq_ctx
->mlli_params
.mlli_dma_addr
);
1339 if (src
&& areq_ctx
->in_nents
) {
1340 dev_dbg(dev
, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1341 sg_virt(src
), &sg_dma_address(src
), sg_dma_len(src
));
1342 dma_unmap_sg(dev
, src
,
1343 areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1347 dev_dbg(dev
, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1348 sg_virt(areq_ctx
->buff_sg
),
1349 &sg_dma_address(areq_ctx
->buff_sg
),
1350 sg_dma_len(areq_ctx
->buff_sg
));
1351 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1353 /* clean the previous data length for update
1358 areq_ctx
->buff_index
^= 1;
1363 int cc_buffer_mgr_init(struct cc_drvdata
*drvdata
)
1365 struct device
*dev
= drvdata_to_dev(drvdata
);
1367 drvdata
->mlli_buffs_pool
=
1368 dma_pool_create("dx_single_mlli_tables", dev
,
1369 MAX_NUM_OF_TOTAL_MLLI_ENTRIES
*
1370 LLI_ENTRY_BYTE_SIZE
,
1371 MLLI_TABLE_MIN_ALIGNMENT
, 0);
1373 if (!drvdata
->mlli_buffs_pool
)
1379 int cc_buffer_mgr_fini(struct cc_drvdata
*drvdata
)
1381 dma_pool_destroy(drvdata
->mlli_buffs_pool
);