1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 enum dma_buffer_type
{
22 struct buff_mgr_handle
{
23 struct dma_pool
*mlli_buffs_pool
;
26 union buffer_array_entry
{
27 struct scatterlist
*sgl
;
28 dma_addr_t buffer_dma
;
32 unsigned int num_of_buffers
;
33 union buffer_array_entry entry
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
34 unsigned int offset
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
35 int nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
36 int total_data_len
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
37 enum dma_buffer_type type
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
38 bool is_last
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
39 u32
*mlli_nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
42 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type
)
57 * cc_copy_mac() - Copy MAC to temporary location
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
63 static void cc_copy_mac(struct device
*dev
, struct aead_request
*req
,
64 enum cc_sg_cpy_direct dir
)
66 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
67 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
68 u32 skip
= areq_ctx
->assoclen
+ req
->cryptlen
;
70 if (areq_ctx
->is_gcm4543
)
71 skip
+= crypto_aead_ivsize(tfm
);
73 cc_copy_sg_portion(dev
, areq_ctx
->backup_mac
, req
->src
,
74 (skip
- areq_ctx
->req_authsize
), skip
, dir
);
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
84 static unsigned int cc_get_sgl_nents(struct device
*dev
,
85 struct scatterlist
*sg_list
,
86 unsigned int nbytes
, u32
*lbytes
)
88 unsigned int nents
= 0;
90 while (nbytes
&& sg_list
) {
92 /* get the number of bytes in the last entry */
94 nbytes
-= (sg_list
->length
> nbytes
) ?
95 nbytes
: sg_list
->length
;
96 sg_list
= sg_next(sg_list
);
98 dev_dbg(dev
, "nents %d last bytes %d\n", nents
, *lbytes
);
103 * cc_zero_sgl() - Zero scatter scatter list data.
107 void cc_zero_sgl(struct scatterlist
*sgl
, u32 data_len
)
109 struct scatterlist
*current_sg
= sgl
;
112 while (sg_index
<= data_len
) {
114 /* reached the end of the sgl --> just return back */
117 memset(sg_virt(current_sg
), 0, current_sg
->length
);
118 sg_index
+= current_sg
->length
;
119 current_sg
= sg_next(current_sg
);
124 * cc_copy_sg_portion() - Copy scatter list data,
125 * from to_skip to end, to dest and vice versa
133 void cc_copy_sg_portion(struct device
*dev
, u8
*dest
, struct scatterlist
*sg
,
134 u32 to_skip
, u32 end
, enum cc_sg_cpy_direct direct
)
138 nents
= cc_get_sgl_nents(dev
, sg
, end
, &lbytes
);
139 sg_copy_buffer(sg
, nents
, (void *)dest
, (end
- to_skip
+ 1), to_skip
,
140 (direct
== CC_SG_TO_BUF
));
143 static int cc_render_buff_to_mlli(struct device
*dev
, dma_addr_t buff_dma
,
144 u32 buff_size
, u32
*curr_nents
,
147 u32
*mlli_entry_p
= *mlli_entry_pp
;
150 /* Verify there is no memory overflow*/
151 new_nents
= (*curr_nents
+ buff_size
/ CC_MAX_MLLI_ENTRY_SIZE
+ 1);
152 if (new_nents
> MAX_NUM_OF_TOTAL_MLLI_ENTRIES
)
155 /*handle buffer longer than 64 kbytes */
156 while (buff_size
> CC_MAX_MLLI_ENTRY_SIZE
) {
157 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
158 cc_lli_set_size(mlli_entry_p
, CC_MAX_MLLI_ENTRY_SIZE
);
159 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
160 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
161 mlli_entry_p
[LLI_WORD1_OFFSET
]);
162 buff_dma
+= CC_MAX_MLLI_ENTRY_SIZE
;
163 buff_size
-= CC_MAX_MLLI_ENTRY_SIZE
;
164 mlli_entry_p
= mlli_entry_p
+ 2;
168 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
169 cc_lli_set_size(mlli_entry_p
, buff_size
);
170 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
171 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
172 mlli_entry_p
[LLI_WORD1_OFFSET
]);
173 mlli_entry_p
= mlli_entry_p
+ 2;
174 *mlli_entry_pp
= mlli_entry_p
;
179 static int cc_render_sg_to_mlli(struct device
*dev
, struct scatterlist
*sgl
,
180 u32 sgl_data_len
, u32 sgl_offset
,
181 u32
*curr_nents
, u32
**mlli_entry_pp
)
183 struct scatterlist
*curr_sgl
= sgl
;
184 u32
*mlli_entry_p
= *mlli_entry_pp
;
187 for ( ; (curr_sgl
&& sgl_data_len
);
188 curr_sgl
= sg_next(curr_sgl
)) {
190 (sgl_data_len
> sg_dma_len(curr_sgl
) - sgl_offset
) ?
191 sg_dma_len(curr_sgl
) - sgl_offset
:
193 sgl_data_len
-= entry_data_len
;
194 rc
= cc_render_buff_to_mlli(dev
, sg_dma_address(curr_sgl
) +
195 sgl_offset
, entry_data_len
,
196 curr_nents
, &mlli_entry_p
);
202 *mlli_entry_pp
= mlli_entry_p
;
206 static int cc_generate_mlli(struct device
*dev
, struct buffer_array
*sg_data
,
207 struct mlli_params
*mlli_params
, gfp_t flags
)
210 u32 total_nents
= 0, prev_total_nents
= 0;
213 dev_dbg(dev
, "NUM of SG's = %d\n", sg_data
->num_of_buffers
);
215 /* Allocate memory from the pointed pool */
216 mlli_params
->mlli_virt_addr
=
217 dma_pool_alloc(mlli_params
->curr_pool
, flags
,
218 &mlli_params
->mlli_dma_addr
);
219 if (!mlli_params
->mlli_virt_addr
) {
220 dev_err(dev
, "dma_pool_alloc() failed\n");
222 goto build_mlli_exit
;
224 /* Point to start of MLLI */
225 mlli_p
= (u32
*)mlli_params
->mlli_virt_addr
;
226 /* go over all SG's and link it to one MLLI table */
227 for (i
= 0; i
< sg_data
->num_of_buffers
; i
++) {
228 union buffer_array_entry
*entry
= &sg_data
->entry
[i
];
229 u32 tot_len
= sg_data
->total_data_len
[i
];
230 u32 offset
= sg_data
->offset
[i
];
232 if (sg_data
->type
[i
] == DMA_SGL_TYPE
)
233 rc
= cc_render_sg_to_mlli(dev
, entry
->sgl
, tot_len
,
234 offset
, &total_nents
,
236 else /*DMA_BUFF_TYPE*/
237 rc
= cc_render_buff_to_mlli(dev
, entry
->buffer_dma
,
238 tot_len
, &total_nents
,
243 /* set last bit in the current table */
244 if (sg_data
->mlli_nents
[i
]) {
245 /*Calculate the current MLLI table length for the
246 *length field in the descriptor
248 *sg_data
->mlli_nents
[i
] +=
249 (total_nents
- prev_total_nents
);
250 prev_total_nents
= total_nents
;
254 /* Set MLLI size for the bypass operation */
255 mlli_params
->mlli_len
= (total_nents
* LLI_ENTRY_BYTE_SIZE
);
257 dev_dbg(dev
, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
258 mlli_params
->mlli_virt_addr
, &mlli_params
->mlli_dma_addr
,
259 mlli_params
->mlli_len
);
265 static void cc_add_buffer_entry(struct device
*dev
,
266 struct buffer_array
*sgl_data
,
267 dma_addr_t buffer_dma
, unsigned int buffer_len
,
268 bool is_last_entry
, u32
*mlli_nents
)
270 unsigned int index
= sgl_data
->num_of_buffers
;
272 dev_dbg(dev
, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
273 index
, &buffer_dma
, buffer_len
, is_last_entry
);
274 sgl_data
->nents
[index
] = 1;
275 sgl_data
->entry
[index
].buffer_dma
= buffer_dma
;
276 sgl_data
->offset
[index
] = 0;
277 sgl_data
->total_data_len
[index
] = buffer_len
;
278 sgl_data
->type
[index
] = DMA_BUFF_TYPE
;
279 sgl_data
->is_last
[index
] = is_last_entry
;
280 sgl_data
->mlli_nents
[index
] = mlli_nents
;
281 if (sgl_data
->mlli_nents
[index
])
282 *sgl_data
->mlli_nents
[index
] = 0;
283 sgl_data
->num_of_buffers
++;
286 static void cc_add_sg_entry(struct device
*dev
, struct buffer_array
*sgl_data
,
287 unsigned int nents
, struct scatterlist
*sgl
,
288 unsigned int data_len
, unsigned int data_offset
,
289 bool is_last_table
, u32
*mlli_nents
)
291 unsigned int index
= sgl_data
->num_of_buffers
;
293 dev_dbg(dev
, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
294 index
, nents
, sgl
, data_len
, is_last_table
);
295 sgl_data
->nents
[index
] = nents
;
296 sgl_data
->entry
[index
].sgl
= sgl
;
297 sgl_data
->offset
[index
] = data_offset
;
298 sgl_data
->total_data_len
[index
] = data_len
;
299 sgl_data
->type
[index
] = DMA_SGL_TYPE
;
300 sgl_data
->is_last
[index
] = is_last_table
;
301 sgl_data
->mlli_nents
[index
] = mlli_nents
;
302 if (sgl_data
->mlli_nents
[index
])
303 *sgl_data
->mlli_nents
[index
] = 0;
304 sgl_data
->num_of_buffers
++;
307 static int cc_map_sg(struct device
*dev
, struct scatterlist
*sg
,
308 unsigned int nbytes
, int direction
, u32
*nents
,
309 u32 max_sg_nents
, u32
*lbytes
, u32
*mapped_nents
)
311 if (sg_is_last(sg
)) {
312 /* One entry only case -set to DLLI */
313 if (dma_map_sg(dev
, sg
, 1, direction
) != 1) {
314 dev_err(dev
, "dma_map_sg() single buffer failed\n");
317 dev_dbg(dev
, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
318 &sg_dma_address(sg
), sg_page(sg
), sg_virt(sg
),
319 sg
->offset
, sg
->length
);
323 } else { /*sg_is_last*/
324 *nents
= cc_get_sgl_nents(dev
, sg
, nbytes
, lbytes
);
325 if (*nents
> max_sg_nents
) {
327 dev_err(dev
, "Too many fragments. current %d max %d\n",
328 *nents
, max_sg_nents
);
331 /* In case of mmu the number of mapped nents might
332 * be changed from the original sgl nents
334 *mapped_nents
= dma_map_sg(dev
, sg
, *nents
, direction
);
335 if (*mapped_nents
== 0) {
337 dev_err(dev
, "dma_map_sg() sg buffer failed\n");
346 cc_set_aead_conf_buf(struct device
*dev
, struct aead_req_ctx
*areq_ctx
,
347 u8
*config_data
, struct buffer_array
*sg_data
,
348 unsigned int assoclen
)
350 dev_dbg(dev
, " handle additional data config set to DLLI\n");
351 /* create sg for the current buffer */
352 sg_init_one(&areq_ctx
->ccm_adata_sg
, config_data
,
353 AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
);
354 if (dma_map_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
) != 1) {
355 dev_err(dev
, "dma_map_sg() config buffer failed\n");
358 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
359 &sg_dma_address(&areq_ctx
->ccm_adata_sg
),
360 sg_page(&areq_ctx
->ccm_adata_sg
),
361 sg_virt(&areq_ctx
->ccm_adata_sg
),
362 areq_ctx
->ccm_adata_sg
.offset
, areq_ctx
->ccm_adata_sg
.length
);
363 /* prepare for case of MLLI */
365 cc_add_sg_entry(dev
, sg_data
, 1, &areq_ctx
->ccm_adata_sg
,
366 (AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
),
372 static int cc_set_hash_buf(struct device
*dev
, struct ahash_req_ctx
*areq_ctx
,
373 u8
*curr_buff
, u32 curr_buff_cnt
,
374 struct buffer_array
*sg_data
)
376 dev_dbg(dev
, " handle curr buff %x set to DLLI\n", curr_buff_cnt
);
377 /* create sg for the current buffer */
378 sg_init_one(areq_ctx
->buff_sg
, curr_buff
, curr_buff_cnt
);
379 if (dma_map_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
) != 1) {
380 dev_err(dev
, "dma_map_sg() src buffer failed\n");
383 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
384 &sg_dma_address(areq_ctx
->buff_sg
), sg_page(areq_ctx
->buff_sg
),
385 sg_virt(areq_ctx
->buff_sg
), areq_ctx
->buff_sg
->offset
,
386 areq_ctx
->buff_sg
->length
);
387 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
388 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
389 areq_ctx
->in_nents
= 0;
390 /* prepare for case of MLLI */
391 cc_add_sg_entry(dev
, sg_data
, 1, areq_ctx
->buff_sg
, curr_buff_cnt
, 0,
396 void cc_unmap_cipher_request(struct device
*dev
, void *ctx
,
397 unsigned int ivsize
, struct scatterlist
*src
,
398 struct scatterlist
*dst
)
400 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
402 if (req_ctx
->gen_ctx
.iv_dma_addr
) {
403 dev_dbg(dev
, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
404 &req_ctx
->gen_ctx
.iv_dma_addr
, ivsize
);
405 dma_unmap_single(dev
, req_ctx
->gen_ctx
.iv_dma_addr
,
406 ivsize
, DMA_TO_DEVICE
);
409 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
&&
410 req_ctx
->mlli_params
.mlli_virt_addr
) {
411 dma_pool_free(req_ctx
->mlli_params
.curr_pool
,
412 req_ctx
->mlli_params
.mlli_virt_addr
,
413 req_ctx
->mlli_params
.mlli_dma_addr
);
416 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
, DMA_BIDIRECTIONAL
);
417 dev_dbg(dev
, "Unmapped req->src=%pK\n", sg_virt(src
));
420 dma_unmap_sg(dev
, dst
, req_ctx
->out_nents
, DMA_BIDIRECTIONAL
);
421 dev_dbg(dev
, "Unmapped req->dst=%pK\n", sg_virt(dst
));
425 int cc_map_cipher_request(struct cc_drvdata
*drvdata
, void *ctx
,
426 unsigned int ivsize
, unsigned int nbytes
,
427 void *info
, struct scatterlist
*src
,
428 struct scatterlist
*dst
, gfp_t flags
)
430 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
431 struct mlli_params
*mlli_params
= &req_ctx
->mlli_params
;
432 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
433 struct device
*dev
= drvdata_to_dev(drvdata
);
434 struct buffer_array sg_data
;
437 u32 mapped_nents
= 0;
439 req_ctx
->dma_buf_type
= CC_DMA_BUF_DLLI
;
440 mlli_params
->curr_pool
= NULL
;
441 sg_data
.num_of_buffers
= 0;
445 dump_byte_array("iv", (u8
*)info
, ivsize
);
446 req_ctx
->gen_ctx
.iv_dma_addr
=
447 dma_map_single(dev
, (void *)info
,
448 ivsize
, DMA_TO_DEVICE
);
449 if (dma_mapping_error(dev
, req_ctx
->gen_ctx
.iv_dma_addr
)) {
450 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
454 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
455 ivsize
, info
, &req_ctx
->gen_ctx
.iv_dma_addr
);
457 req_ctx
->gen_ctx
.iv_dma_addr
= 0;
460 /* Map the src SGL */
461 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_BIDIRECTIONAL
, &req_ctx
->in_nents
,
462 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
465 if (mapped_nents
> 1)
466 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
469 /* Handle inplace operation */
470 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
471 req_ctx
->out_nents
= 0;
472 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
474 &req_ctx
->in_mlli_nents
);
478 rc
= cc_map_sg(dev
, dst
, nbytes
, DMA_BIDIRECTIONAL
,
479 &req_ctx
->out_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
480 &dummy
, &mapped_nents
);
483 if (mapped_nents
> 1)
484 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
486 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
487 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
489 &req_ctx
->in_mlli_nents
);
490 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->out_nents
, dst
,
492 &req_ctx
->out_mlli_nents
);
496 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
497 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
498 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
503 dev_dbg(dev
, "areq_ctx->dma_buf_type = %s\n",
504 cc_dma_buf_type(req_ctx
->dma_buf_type
));
509 cc_unmap_cipher_request(dev
, req_ctx
, ivsize
, src
, dst
);
513 void cc_unmap_aead_request(struct device
*dev
, struct aead_request
*req
)
515 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
516 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
517 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
518 struct cc_drvdata
*drvdata
= dev_get_drvdata(dev
);
520 u32 size_to_unmap
= 0;
522 if (areq_ctx
->mac_buf_dma_addr
) {
523 dma_unmap_single(dev
, areq_ctx
->mac_buf_dma_addr
,
524 MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
527 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
528 if (areq_ctx
->hkey_dma_addr
) {
529 dma_unmap_single(dev
, areq_ctx
->hkey_dma_addr
,
530 AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
533 if (areq_ctx
->gcm_block_len_dma_addr
) {
534 dma_unmap_single(dev
, areq_ctx
->gcm_block_len_dma_addr
,
535 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
538 if (areq_ctx
->gcm_iv_inc1_dma_addr
) {
539 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
,
540 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
543 if (areq_ctx
->gcm_iv_inc2_dma_addr
) {
544 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
,
545 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
549 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
550 if (areq_ctx
->ccm_iv0_dma_addr
) {
551 dma_unmap_single(dev
, areq_ctx
->ccm_iv0_dma_addr
,
552 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
555 dma_unmap_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
);
557 if (areq_ctx
->gen_ctx
.iv_dma_addr
) {
558 dma_unmap_single(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
,
559 hw_iv_size
, DMA_BIDIRECTIONAL
);
560 kzfree(areq_ctx
->gen_ctx
.iv
);
564 if ((areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
565 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) &&
566 (areq_ctx
->mlli_params
.mlli_virt_addr
)) {
567 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
568 &areq_ctx
->mlli_params
.mlli_dma_addr
,
569 areq_ctx
->mlli_params
.mlli_virt_addr
);
570 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
571 areq_ctx
->mlli_params
.mlli_virt_addr
,
572 areq_ctx
->mlli_params
.mlli_dma_addr
);
575 dev_dbg(dev
, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
576 sg_virt(req
->src
), areq_ctx
->src
.nents
, areq_ctx
->assoc
.nents
,
577 areq_ctx
->assoclen
, req
->cryptlen
);
578 size_to_unmap
= areq_ctx
->assoclen
+ req
->cryptlen
;
579 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
580 size_to_unmap
+= areq_ctx
->req_authsize
;
581 if (areq_ctx
->is_gcm4543
)
582 size_to_unmap
+= crypto_aead_ivsize(tfm
);
584 dma_unmap_sg(dev
, req
->src
,
585 cc_get_sgl_nents(dev
, req
->src
, size_to_unmap
, &dummy
),
587 if (req
->src
!= req
->dst
) {
588 dev_dbg(dev
, "Unmapping dst sgl: req->dst=%pK\n",
590 dma_unmap_sg(dev
, req
->dst
,
591 cc_get_sgl_nents(dev
, req
->dst
, size_to_unmap
,
595 if (drvdata
->coherent
&&
596 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
597 req
->src
== req
->dst
) {
598 /* copy back mac from temporary location to deal with possible
599 * data memory overriding that caused by cache coherence
602 cc_copy_mac(dev
, req
, CC_SG_FROM_BUF
);
606 static int cc_get_aead_icv_nents(struct device
*dev
, struct scatterlist
*sgl
,
607 unsigned int sgl_nents
, unsigned int authsize
,
608 u32 last_entry_data_size
,
609 bool *is_icv_fragmented
)
611 unsigned int icv_max_size
= 0;
612 unsigned int icv_required_size
= authsize
> last_entry_data_size
?
613 (authsize
- last_entry_data_size
) :
618 if (sgl_nents
< MAX_ICV_NENTS_SUPPORTED
) {
619 *is_icv_fragmented
= false;
623 for (i
= 0 ; i
< (sgl_nents
- MAX_ICV_NENTS_SUPPORTED
) ; i
++) {
630 icv_max_size
= sgl
->length
;
632 if (last_entry_data_size
> authsize
) {
633 /* ICV attached to data in last entry (not fragmented!) */
635 *is_icv_fragmented
= false;
636 } else if (last_entry_data_size
== authsize
) {
637 /* ICV placed in whole last entry (not fragmented!) */
639 *is_icv_fragmented
= false;
640 } else if (icv_max_size
> icv_required_size
) {
642 *is_icv_fragmented
= true;
643 } else if (icv_max_size
== icv_required_size
) {
645 *is_icv_fragmented
= true;
647 dev_err(dev
, "Unsupported num. of ICV fragments (> %d)\n",
648 MAX_ICV_NENTS_SUPPORTED
);
649 nents
= -1; /*unsupported*/
651 dev_dbg(dev
, "is_frag=%s icv_nents=%u\n",
652 (*is_icv_fragmented
? "true" : "false"), nents
);
657 static int cc_aead_chain_iv(struct cc_drvdata
*drvdata
,
658 struct aead_request
*req
,
659 struct buffer_array
*sg_data
,
660 bool is_last
, bool do_chain
)
662 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
663 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
664 struct device
*dev
= drvdata_to_dev(drvdata
);
665 gfp_t flags
= cc_gfp_flags(&req
->base
);
669 areq_ctx
->gen_ctx
.iv_dma_addr
= 0;
670 areq_ctx
->gen_ctx
.iv
= NULL
;
674 areq_ctx
->gen_ctx
.iv
= kmemdup(req
->iv
, hw_iv_size
, flags
);
675 if (!areq_ctx
->gen_ctx
.iv
)
678 areq_ctx
->gen_ctx
.iv_dma_addr
=
679 dma_map_single(dev
, areq_ctx
->gen_ctx
.iv
, hw_iv_size
,
681 if (dma_mapping_error(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
)) {
682 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
683 hw_iv_size
, req
->iv
);
684 kzfree(areq_ctx
->gen_ctx
.iv
);
685 areq_ctx
->gen_ctx
.iv
= NULL
;
690 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
691 hw_iv_size
, req
->iv
, &areq_ctx
->gen_ctx
.iv_dma_addr
);
692 // TODO: what about CTR?? ask Ron
693 if (do_chain
&& areq_ctx
->plaintext_authenticate_only
) {
694 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
695 unsigned int iv_size_to_authenc
= crypto_aead_ivsize(tfm
);
696 unsigned int iv_ofs
= GCM_BLOCK_RFC4_IV_OFFSET
;
697 /* Chain to given list */
698 cc_add_buffer_entry(dev
, sg_data
,
699 (areq_ctx
->gen_ctx
.iv_dma_addr
+ iv_ofs
),
700 iv_size_to_authenc
, is_last
,
701 &areq_ctx
->assoc
.mlli_nents
);
702 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
709 static int cc_aead_chain_assoc(struct cc_drvdata
*drvdata
,
710 struct aead_request
*req
,
711 struct buffer_array
*sg_data
,
712 bool is_last
, bool do_chain
)
714 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
716 u32 mapped_nents
= 0;
717 struct scatterlist
*current_sg
= req
->src
;
718 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
719 unsigned int sg_index
= 0;
720 u32 size_of_assoc
= areq_ctx
->assoclen
;
721 struct device
*dev
= drvdata_to_dev(drvdata
);
723 if (areq_ctx
->is_gcm4543
)
724 size_of_assoc
+= crypto_aead_ivsize(tfm
);
728 goto chain_assoc_exit
;
731 if (areq_ctx
->assoclen
== 0) {
732 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_NULL
;
733 areq_ctx
->assoc
.nents
= 0;
734 areq_ctx
->assoc
.mlli_nents
= 0;
735 dev_dbg(dev
, "Chain assoc of length 0: buff_type=%s nents=%u\n",
736 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
737 areq_ctx
->assoc
.nents
);
738 goto chain_assoc_exit
;
741 //iterate over the sgl to see how many entries are for associated data
742 //it is assumed that if we reach here , the sgl is already mapped
743 sg_index
= current_sg
->length
;
744 //the first entry in the scatter list contains all the associated data
745 if (sg_index
> size_of_assoc
) {
748 while (sg_index
<= size_of_assoc
) {
749 current_sg
= sg_next(current_sg
);
750 /* if have reached the end of the sgl, then this is
754 dev_err(dev
, "reached end of sg list. unexpected\n");
757 sg_index
+= current_sg
->length
;
761 if (mapped_nents
> LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
762 dev_err(dev
, "Too many fragments. current %d max %d\n",
763 mapped_nents
, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
766 areq_ctx
->assoc
.nents
= mapped_nents
;
768 /* in CCM case we have additional entry for
769 * ccm header configurations
771 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
772 if ((mapped_nents
+ 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
773 dev_err(dev
, "CCM case.Too many fragments. Current %d max %d\n",
774 (areq_ctx
->assoc
.nents
+ 1),
775 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
777 goto chain_assoc_exit
;
781 if (mapped_nents
== 1 && areq_ctx
->ccm_hdr_size
== ccm_header_size_null
)
782 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_DLLI
;
784 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
786 if (do_chain
|| areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
787 dev_dbg(dev
, "Chain assoc: buff_type=%s nents=%u\n",
788 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
789 areq_ctx
->assoc
.nents
);
790 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->assoc
.nents
, req
->src
,
791 areq_ctx
->assoclen
, 0, is_last
,
792 &areq_ctx
->assoc
.mlli_nents
);
793 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
800 static void cc_prepare_aead_data_dlli(struct aead_request
*req
,
801 u32
*src_last_bytes
, u32
*dst_last_bytes
)
803 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
804 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
805 unsigned int authsize
= areq_ctx
->req_authsize
;
807 areq_ctx
->is_icv_fragmented
= false;
808 if (req
->src
== req
->dst
) {
810 areq_ctx
->icv_dma_addr
= sg_dma_address(areq_ctx
->src_sgl
) +
811 (*src_last_bytes
- authsize
);
812 areq_ctx
->icv_virt_addr
= sg_virt(areq_ctx
->src_sgl
) +
813 (*src_last_bytes
- authsize
);
814 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
815 /*NON-INPLACE and DECRYPT*/
816 areq_ctx
->icv_dma_addr
= sg_dma_address(areq_ctx
->src_sgl
) +
817 (*src_last_bytes
- authsize
);
818 areq_ctx
->icv_virt_addr
= sg_virt(areq_ctx
->src_sgl
) +
819 (*src_last_bytes
- authsize
);
821 /*NON-INPLACE and ENCRYPT*/
822 areq_ctx
->icv_dma_addr
= sg_dma_address(areq_ctx
->dst_sgl
) +
823 (*dst_last_bytes
- authsize
);
824 areq_ctx
->icv_virt_addr
= sg_virt(areq_ctx
->dst_sgl
) +
825 (*dst_last_bytes
- authsize
);
829 static int cc_prepare_aead_data_mlli(struct cc_drvdata
*drvdata
,
830 struct aead_request
*req
,
831 struct buffer_array
*sg_data
,
832 u32
*src_last_bytes
, u32
*dst_last_bytes
,
835 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
836 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
837 unsigned int authsize
= areq_ctx
->req_authsize
;
838 int rc
= 0, icv_nents
;
839 struct device
*dev
= drvdata_to_dev(drvdata
);
840 struct scatterlist
*sg
;
842 if (req
->src
== req
->dst
) {
844 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
845 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
846 areq_ctx
->src_offset
, is_last_table
,
847 &areq_ctx
->src
.mlli_nents
);
849 icv_nents
= cc_get_aead_icv_nents(dev
, areq_ctx
->src_sgl
,
851 authsize
, *src_last_bytes
,
852 &areq_ctx
->is_icv_fragmented
);
855 goto prepare_data_mlli_exit
;
858 if (areq_ctx
->is_icv_fragmented
) {
859 /* Backup happens only when ICV is fragmented, ICV
860 * verification is made by CPU compare in order to
861 * simplify MAC verification upon request completion
863 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
864 /* In coherent platforms (e.g. ACP)
865 * already copying ICV for any
866 * INPLACE-DECRYPT operation, hence
867 * we must neglect this code.
869 if (!drvdata
->coherent
)
870 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
872 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
874 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
875 areq_ctx
->icv_dma_addr
=
876 areq_ctx
->mac_buf_dma_addr
;
878 } else { /* Contig. ICV */
879 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
880 /*Should hanlde if the sg is not contig.*/
881 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
882 (*src_last_bytes
- authsize
);
883 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
884 (*src_last_bytes
- authsize
);
887 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
888 /*NON-INPLACE and DECRYPT*/
889 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
890 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
891 areq_ctx
->src_offset
, is_last_table
,
892 &areq_ctx
->src
.mlli_nents
);
893 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
894 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
895 areq_ctx
->dst_offset
, is_last_table
,
896 &areq_ctx
->dst
.mlli_nents
);
898 icv_nents
= cc_get_aead_icv_nents(dev
, areq_ctx
->src_sgl
,
900 authsize
, *src_last_bytes
,
901 &areq_ctx
->is_icv_fragmented
);
904 goto prepare_data_mlli_exit
;
907 /* Backup happens only when ICV is fragmented, ICV
908 * verification is made by CPU compare in order to simplify
909 * MAC verification upon request completion
911 if (areq_ctx
->is_icv_fragmented
) {
912 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
913 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
915 } else { /* Contig. ICV */
916 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
917 /*Should hanlde if the sg is not contig.*/
918 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
919 (*src_last_bytes
- authsize
);
920 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
921 (*src_last_bytes
- authsize
);
925 /*NON-INPLACE and ENCRYPT*/
926 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
927 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
928 areq_ctx
->dst_offset
, is_last_table
,
929 &areq_ctx
->dst
.mlli_nents
);
930 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
931 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
932 areq_ctx
->src_offset
, is_last_table
,
933 &areq_ctx
->src
.mlli_nents
);
935 icv_nents
= cc_get_aead_icv_nents(dev
, areq_ctx
->dst_sgl
,
937 authsize
, *dst_last_bytes
,
938 &areq_ctx
->is_icv_fragmented
);
941 goto prepare_data_mlli_exit
;
944 if (!areq_ctx
->is_icv_fragmented
) {
945 sg
= &areq_ctx
->dst_sgl
[areq_ctx
->dst
.nents
- 1];
947 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
948 (*dst_last_bytes
- authsize
);
949 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
950 (*dst_last_bytes
- authsize
);
952 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
953 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
957 prepare_data_mlli_exit
:
961 static int cc_aead_chain_data(struct cc_drvdata
*drvdata
,
962 struct aead_request
*req
,
963 struct buffer_array
*sg_data
,
964 bool is_last_table
, bool do_chain
)
966 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
967 struct device
*dev
= drvdata_to_dev(drvdata
);
968 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
969 unsigned int authsize
= areq_ctx
->req_authsize
;
970 unsigned int src_last_bytes
= 0, dst_last_bytes
= 0;
972 u32 src_mapped_nents
= 0, dst_mapped_nents
= 0;
974 /* non-inplace mode */
975 unsigned int size_for_map
= areq_ctx
->assoclen
+ req
->cryptlen
;
976 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
978 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
979 u32 size_to_skip
= areq_ctx
->assoclen
;
982 size_to_skip
+= crypto_aead_ivsize(tfm
);
984 offset
= size_to_skip
;
989 areq_ctx
->src_sgl
= req
->src
;
990 areq_ctx
->dst_sgl
= req
->dst
;
993 size_for_map
+= crypto_aead_ivsize(tfm
);
995 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
997 src_mapped_nents
= cc_get_sgl_nents(dev
, req
->src
, size_for_map
,
999 sg_index
= areq_ctx
->src_sgl
->length
;
1000 //check where the data starts
1001 while (sg_index
<= size_to_skip
) {
1002 offset
-= areq_ctx
->src_sgl
->length
;
1003 areq_ctx
->src_sgl
= sg_next(areq_ctx
->src_sgl
);
1004 //if have reached the end of the sgl, then this is unexpected
1005 if (!areq_ctx
->src_sgl
) {
1006 dev_err(dev
, "reached end of sg list. unexpected\n");
1009 sg_index
+= areq_ctx
->src_sgl
->length
;
1012 if (src_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
1013 dev_err(dev
, "Too many fragments. current %d max %d\n",
1014 src_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
1018 areq_ctx
->src
.nents
= src_mapped_nents
;
1020 areq_ctx
->src_offset
= offset
;
1022 if (req
->src
!= req
->dst
) {
1023 size_for_map
= areq_ctx
->assoclen
+ req
->cryptlen
;
1025 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
1026 size_for_map
+= authsize
;
1028 size_for_map
-= authsize
;
1031 size_for_map
+= crypto_aead_ivsize(tfm
);
1033 rc
= cc_map_sg(dev
, req
->dst
, size_for_map
, DMA_BIDIRECTIONAL
,
1034 &areq_ctx
->dst
.nents
,
1035 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dst_last_bytes
,
1038 goto chain_data_exit
;
1041 dst_mapped_nents
= cc_get_sgl_nents(dev
, req
->dst
, size_for_map
,
1043 sg_index
= areq_ctx
->dst_sgl
->length
;
1044 offset
= size_to_skip
;
1046 //check where the data starts
1047 while (sg_index
<= size_to_skip
) {
1048 offset
-= areq_ctx
->dst_sgl
->length
;
1049 areq_ctx
->dst_sgl
= sg_next(areq_ctx
->dst_sgl
);
1050 //if have reached the end of the sgl, then this is unexpected
1051 if (!areq_ctx
->dst_sgl
) {
1052 dev_err(dev
, "reached end of sg list. unexpected\n");
1055 sg_index
+= areq_ctx
->dst_sgl
->length
;
1058 if (dst_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
1059 dev_err(dev
, "Too many fragments. current %d max %d\n",
1060 dst_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
1063 areq_ctx
->dst
.nents
= dst_mapped_nents
;
1064 areq_ctx
->dst_offset
= offset
;
1065 if (src_mapped_nents
> 1 ||
1066 dst_mapped_nents
> 1 ||
1068 areq_ctx
->data_buff_type
= CC_DMA_BUF_MLLI
;
1069 rc
= cc_prepare_aead_data_mlli(drvdata
, req
, sg_data
,
1071 &dst_last_bytes
, is_last_table
);
1073 areq_ctx
->data_buff_type
= CC_DMA_BUF_DLLI
;
1074 cc_prepare_aead_data_dlli(req
, &src_last_bytes
,
1082 static void cc_update_aead_mlli_nents(struct cc_drvdata
*drvdata
,
1083 struct aead_request
*req
)
1085 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1086 u32 curr_mlli_size
= 0;
1088 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
1089 areq_ctx
->assoc
.sram_addr
= drvdata
->mlli_sram_addr
;
1090 curr_mlli_size
= areq_ctx
->assoc
.mlli_nents
*
1091 LLI_ENTRY_BYTE_SIZE
;
1094 if (areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1095 /*Inplace case dst nents equal to src nents*/
1096 if (req
->src
== req
->dst
) {
1097 areq_ctx
->dst
.mlli_nents
= areq_ctx
->src
.mlli_nents
;
1098 areq_ctx
->src
.sram_addr
= drvdata
->mlli_sram_addr
+
1100 areq_ctx
->dst
.sram_addr
= areq_ctx
->src
.sram_addr
;
1101 if (!areq_ctx
->is_single_pass
)
1102 areq_ctx
->assoc
.mlli_nents
+=
1103 areq_ctx
->src
.mlli_nents
;
1105 if (areq_ctx
->gen_ctx
.op_type
==
1106 DRV_CRYPTO_DIRECTION_DECRYPT
) {
1107 areq_ctx
->src
.sram_addr
=
1108 drvdata
->mlli_sram_addr
+
1110 areq_ctx
->dst
.sram_addr
=
1111 areq_ctx
->src
.sram_addr
+
1112 areq_ctx
->src
.mlli_nents
*
1113 LLI_ENTRY_BYTE_SIZE
;
1114 if (!areq_ctx
->is_single_pass
)
1115 areq_ctx
->assoc
.mlli_nents
+=
1116 areq_ctx
->src
.mlli_nents
;
1118 areq_ctx
->dst
.sram_addr
=
1119 drvdata
->mlli_sram_addr
+
1121 areq_ctx
->src
.sram_addr
=
1122 areq_ctx
->dst
.sram_addr
+
1123 areq_ctx
->dst
.mlli_nents
*
1124 LLI_ENTRY_BYTE_SIZE
;
1125 if (!areq_ctx
->is_single_pass
)
1126 areq_ctx
->assoc
.mlli_nents
+=
1127 areq_ctx
->dst
.mlli_nents
;
1133 int cc_map_aead_request(struct cc_drvdata
*drvdata
, struct aead_request
*req
)
1135 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1136 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1137 struct device
*dev
= drvdata_to_dev(drvdata
);
1138 struct buffer_array sg_data
;
1139 unsigned int authsize
= areq_ctx
->req_authsize
;
1140 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1142 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1143 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
1144 dma_addr_t dma_addr
;
1145 u32 mapped_nents
= 0;
1146 u32 dummy
= 0; /*used for the assoc data fragments */
1147 u32 size_to_map
= 0;
1148 gfp_t flags
= cc_gfp_flags(&req
->base
);
1150 mlli_params
->curr_pool
= NULL
;
1151 sg_data
.num_of_buffers
= 0;
1153 /* copy mac to a temporary location to deal with possible
1154 * data memory overriding that caused by cache coherence problem.
1156 if (drvdata
->coherent
&&
1157 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
1158 req
->src
== req
->dst
)
1159 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
1161 /* cacluate the size for cipher remove ICV in decrypt*/
1162 areq_ctx
->cryptlen
= (areq_ctx
->gen_ctx
.op_type
==
1163 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1165 (req
->cryptlen
- authsize
);
1167 dma_addr
= dma_map_single(dev
, areq_ctx
->mac_buf
, MAX_MAC_SIZE
,
1169 if (dma_mapping_error(dev
, dma_addr
)) {
1170 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1171 MAX_MAC_SIZE
, areq_ctx
->mac_buf
);
1173 goto aead_map_failure
;
1175 areq_ctx
->mac_buf_dma_addr
= dma_addr
;
1177 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
1178 void *addr
= areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
1180 dma_addr
= dma_map_single(dev
, addr
, AES_BLOCK_SIZE
,
1183 if (dma_mapping_error(dev
, dma_addr
)) {
1184 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1185 AES_BLOCK_SIZE
, addr
);
1186 areq_ctx
->ccm_iv0_dma_addr
= 0;
1188 goto aead_map_failure
;
1190 areq_ctx
->ccm_iv0_dma_addr
= dma_addr
;
1192 rc
= cc_set_aead_conf_buf(dev
, areq_ctx
, areq_ctx
->ccm_config
,
1193 &sg_data
, areq_ctx
->assoclen
);
1195 goto aead_map_failure
;
1198 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1199 dma_addr
= dma_map_single(dev
, areq_ctx
->hkey
, AES_BLOCK_SIZE
,
1201 if (dma_mapping_error(dev
, dma_addr
)) {
1202 dev_err(dev
, "Mapping hkey %u B at va=%pK for DMA failed\n",
1203 AES_BLOCK_SIZE
, areq_ctx
->hkey
);
1205 goto aead_map_failure
;
1207 areq_ctx
->hkey_dma_addr
= dma_addr
;
1209 dma_addr
= dma_map_single(dev
, &areq_ctx
->gcm_len_block
,
1210 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1211 if (dma_mapping_error(dev
, dma_addr
)) {
1212 dev_err(dev
, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1213 AES_BLOCK_SIZE
, &areq_ctx
->gcm_len_block
);
1215 goto aead_map_failure
;
1217 areq_ctx
->gcm_block_len_dma_addr
= dma_addr
;
1219 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc1
,
1220 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1222 if (dma_mapping_error(dev
, dma_addr
)) {
1223 dev_err(dev
, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1224 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc1
));
1225 areq_ctx
->gcm_iv_inc1_dma_addr
= 0;
1227 goto aead_map_failure
;
1229 areq_ctx
->gcm_iv_inc1_dma_addr
= dma_addr
;
1231 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc2
,
1232 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1234 if (dma_mapping_error(dev
, dma_addr
)) {
1235 dev_err(dev
, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1236 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc2
));
1237 areq_ctx
->gcm_iv_inc2_dma_addr
= 0;
1239 goto aead_map_failure
;
1241 areq_ctx
->gcm_iv_inc2_dma_addr
= dma_addr
;
1244 size_to_map
= req
->cryptlen
+ areq_ctx
->assoclen
;
1245 /* If we do in-place encryption, we also need the auth tag */
1246 if ((areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
) &&
1247 (req
->src
== req
->dst
)) {
1248 size_to_map
+= authsize
;
1251 size_to_map
+= crypto_aead_ivsize(tfm
);
1252 rc
= cc_map_sg(dev
, req
->src
, size_to_map
, DMA_BIDIRECTIONAL
,
1253 &areq_ctx
->src
.nents
,
1254 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
+
1255 LLI_MAX_NUM_OF_DATA_ENTRIES
),
1256 &dummy
, &mapped_nents
);
1258 goto aead_map_failure
;
1260 if (areq_ctx
->is_single_pass
) {
1262 * Create MLLI table for:
1265 * Note: IV is contg. buffer (not an SGL)
1267 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, true, false);
1269 goto aead_map_failure
;
1270 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, true, false);
1272 goto aead_map_failure
;
1273 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, false);
1275 goto aead_map_failure
;
1276 } else { /* DOUBLE-PASS flow */
1278 * Prepare MLLI table(s) in this order:
1280 * If ENCRYPT/DECRYPT (inplace):
1281 * (1) MLLI table for assoc
1282 * (2) IV entry (chained right after end of assoc)
1283 * (3) MLLI for src/dst (inplace operation)
1285 * If ENCRYPT (non-inplace)
1286 * (1) MLLI table for assoc
1287 * (2) IV entry (chained right after end of assoc)
1291 * If DECRYPT (non-inplace)
1292 * (1) MLLI table for assoc
1293 * (2) IV entry (chained right after end of assoc)
1297 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, false, true);
1299 goto aead_map_failure
;
1300 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, false, true);
1302 goto aead_map_failure
;
1303 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, true);
1305 goto aead_map_failure
;
1308 /* Mlli support -start building the MLLI according to the above
1311 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1312 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1313 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1314 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1316 goto aead_map_failure
;
1318 cc_update_aead_mlli_nents(drvdata
, req
);
1319 dev_dbg(dev
, "assoc params mn %d\n",
1320 areq_ctx
->assoc
.mlli_nents
);
1321 dev_dbg(dev
, "src params mn %d\n", areq_ctx
->src
.mlli_nents
);
1322 dev_dbg(dev
, "dst params mn %d\n", areq_ctx
->dst
.mlli_nents
);
1327 cc_unmap_aead_request(dev
, req
);
1331 int cc_map_hash_request_final(struct cc_drvdata
*drvdata
, void *ctx
,
1332 struct scatterlist
*src
, unsigned int nbytes
,
1333 bool do_update
, gfp_t flags
)
1335 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1336 struct device
*dev
= drvdata_to_dev(drvdata
);
1337 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1338 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1339 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1340 struct buffer_array sg_data
;
1341 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1344 u32 mapped_nents
= 0;
1346 dev_dbg(dev
, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1347 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1348 /* Init the type of the dma buffer */
1349 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1350 mlli_params
->curr_pool
= NULL
;
1351 sg_data
.num_of_buffers
= 0;
1352 areq_ctx
->in_nents
= 0;
1354 if (nbytes
== 0 && *curr_buff_cnt
== 0) {
1359 /*TODO: copy data in case that buffer is enough for operation */
1360 /* map the previous buffer */
1361 if (*curr_buff_cnt
) {
1362 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1368 if (src
&& nbytes
> 0 && do_update
) {
1369 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_TO_DEVICE
,
1370 &areq_ctx
->in_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
1371 &dummy
, &mapped_nents
);
1373 goto unmap_curr_buff
;
1374 if (src
&& mapped_nents
== 1 &&
1375 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1376 memcpy(areq_ctx
->buff_sg
, src
,
1377 sizeof(struct scatterlist
));
1378 areq_ctx
->buff_sg
->length
= nbytes
;
1379 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1380 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1382 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1387 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1388 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1389 /* add the src data to the sg_data */
1390 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
, nbytes
,
1391 0, true, &areq_ctx
->mlli_nents
);
1392 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1394 goto fail_unmap_din
;
1396 /* change the buffer index for the unmap function */
1397 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ 1);
1398 dev_dbg(dev
, "areq_ctx->data_dma_buf_type = %s\n",
1399 cc_dma_buf_type(areq_ctx
->data_dma_buf_type
));
1403 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1407 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1412 int cc_map_hash_request_update(struct cc_drvdata
*drvdata
, void *ctx
,
1413 struct scatterlist
*src
, unsigned int nbytes
,
1414 unsigned int block_size
, gfp_t flags
)
1416 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1417 struct device
*dev
= drvdata_to_dev(drvdata
);
1418 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1419 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1420 u8
*next_buff
= cc_next_buf(areq_ctx
);
1421 u32
*next_buff_cnt
= cc_next_buf_cnt(areq_ctx
);
1422 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1423 unsigned int update_data_len
;
1424 u32 total_in_len
= nbytes
+ *curr_buff_cnt
;
1425 struct buffer_array sg_data
;
1426 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1427 unsigned int swap_index
= 0;
1430 u32 mapped_nents
= 0;
1432 dev_dbg(dev
, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1433 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1434 /* Init the type of the dma buffer */
1435 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1436 mlli_params
->curr_pool
= NULL
;
1437 areq_ctx
->curr_sg
= NULL
;
1438 sg_data
.num_of_buffers
= 0;
1439 areq_ctx
->in_nents
= 0;
1441 if (total_in_len
< block_size
) {
1442 dev_dbg(dev
, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1443 curr_buff
, *curr_buff_cnt
, &curr_buff
[*curr_buff_cnt
]);
1444 areq_ctx
->in_nents
=
1445 cc_get_sgl_nents(dev
, src
, nbytes
, &dummy
);
1446 sg_copy_to_buffer(src
, areq_ctx
->in_nents
,
1447 &curr_buff
[*curr_buff_cnt
], nbytes
);
1448 *curr_buff_cnt
+= nbytes
;
1452 /* Calculate the residue size*/
1453 *next_buff_cnt
= total_in_len
& (block_size
- 1);
1454 /* update data len */
1455 update_data_len
= total_in_len
- *next_buff_cnt
;
1457 dev_dbg(dev
, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1458 *next_buff_cnt
, update_data_len
);
1460 /* Copy the new residue to next buffer */
1461 if (*next_buff_cnt
) {
1462 dev_dbg(dev
, " handle residue: next buff %pK skip data %u residue %u\n",
1463 next_buff
, (update_data_len
- *curr_buff_cnt
),
1465 cc_copy_sg_portion(dev
, next_buff
, src
,
1466 (update_data_len
- *curr_buff_cnt
),
1467 nbytes
, CC_SG_TO_BUF
);
1468 /* change the buffer index for next operation */
1472 if (*curr_buff_cnt
) {
1473 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1477 /* change the buffer index for next operation */
1481 if (update_data_len
> *curr_buff_cnt
) {
1482 rc
= cc_map_sg(dev
, src
, (update_data_len
- *curr_buff_cnt
),
1483 DMA_TO_DEVICE
, &areq_ctx
->in_nents
,
1484 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
,
1487 goto unmap_curr_buff
;
1488 if (mapped_nents
== 1 &&
1489 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1490 /* only one entry in the SG and no previous data */
1491 memcpy(areq_ctx
->buff_sg
, src
,
1492 sizeof(struct scatterlist
));
1493 areq_ctx
->buff_sg
->length
= update_data_len
;
1494 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1495 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1497 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1501 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1502 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1503 /* add the src data to the sg_data */
1504 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
,
1505 (update_data_len
- *curr_buff_cnt
), 0, true,
1506 &areq_ctx
->mlli_nents
);
1507 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1509 goto fail_unmap_din
;
1511 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ swap_index
);
1516 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1520 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1525 void cc_unmap_hash_request(struct device
*dev
, void *ctx
,
1526 struct scatterlist
*src
, bool do_revert
)
1528 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1529 u32
*prev_len
= cc_next_buf_cnt(areq_ctx
);
1531 /*In case a pool was set, a table was
1532 *allocated and should be released
1534 if (areq_ctx
->mlli_params
.curr_pool
) {
1535 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
1536 &areq_ctx
->mlli_params
.mlli_dma_addr
,
1537 areq_ctx
->mlli_params
.mlli_virt_addr
);
1538 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
1539 areq_ctx
->mlli_params
.mlli_virt_addr
,
1540 areq_ctx
->mlli_params
.mlli_dma_addr
);
1543 if (src
&& areq_ctx
->in_nents
) {
1544 dev_dbg(dev
, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1545 sg_virt(src
), &sg_dma_address(src
), sg_dma_len(src
));
1546 dma_unmap_sg(dev
, src
,
1547 areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1551 dev_dbg(dev
, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1552 sg_virt(areq_ctx
->buff_sg
),
1553 &sg_dma_address(areq_ctx
->buff_sg
),
1554 sg_dma_len(areq_ctx
->buff_sg
));
1555 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1557 /* clean the previous data length for update
1562 areq_ctx
->buff_index
^= 1;
1567 int cc_buffer_mgr_init(struct cc_drvdata
*drvdata
)
1569 struct buff_mgr_handle
*buff_mgr_handle
;
1570 struct device
*dev
= drvdata_to_dev(drvdata
);
1572 buff_mgr_handle
= kmalloc(sizeof(*buff_mgr_handle
), GFP_KERNEL
);
1573 if (!buff_mgr_handle
)
1576 drvdata
->buff_mgr_handle
= buff_mgr_handle
;
1578 buff_mgr_handle
->mlli_buffs_pool
=
1579 dma_pool_create("dx_single_mlli_tables", dev
,
1580 MAX_NUM_OF_TOTAL_MLLI_ENTRIES
*
1581 LLI_ENTRY_BYTE_SIZE
,
1582 MLLI_TABLE_MIN_ALIGNMENT
, 0);
1584 if (!buff_mgr_handle
->mlli_buffs_pool
)
1590 cc_buffer_mgr_fini(drvdata
);
1594 int cc_buffer_mgr_fini(struct cc_drvdata
*drvdata
)
1596 struct buff_mgr_handle
*buff_mgr_handle
= drvdata
->buff_mgr_handle
;
1598 if (buff_mgr_handle
) {
1599 dma_pool_destroy(buff_mgr_handle
->mlli_buffs_pool
);
1600 kfree(drvdata
->buff_mgr_handle
);
1601 drvdata
->buff_mgr_handle
= NULL
;