1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <crypto/internal/aead.h>
5 #include <crypto/authenc.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dmapool.h>
8 #include <linux/dma-mapping.h>
10 #include "cc_buffer_mgr.h"
11 #include "cc_lli_defs.h"
12 #include "cc_cipher.h"
16 enum dma_buffer_type
{
22 struct buff_mgr_handle
{
23 struct dma_pool
*mlli_buffs_pool
;
26 union buffer_array_entry
{
27 struct scatterlist
*sgl
;
28 dma_addr_t buffer_dma
;
32 unsigned int num_of_buffers
;
33 union buffer_array_entry entry
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
34 unsigned int offset
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
35 int nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
36 int total_data_len
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
37 enum dma_buffer_type type
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
38 bool is_last
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
39 u32
*mlli_nents
[MAX_NUM_OF_BUFFERS_IN_MLLI
];
42 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type
)
57 * cc_copy_mac() - Copy MAC to temporary location
60 * @req: aead request object
61 * @dir: [IN] copy from/to sgl
63 static void cc_copy_mac(struct device
*dev
, struct aead_request
*req
,
64 enum cc_sg_cpy_direct dir
)
66 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
67 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
68 u32 skip
= areq_ctx
->assoclen
+ req
->cryptlen
;
70 if (areq_ctx
->is_gcm4543
)
71 skip
+= crypto_aead_ivsize(tfm
);
73 cc_copy_sg_portion(dev
, areq_ctx
->backup_mac
, req
->src
,
74 (skip
- areq_ctx
->req_authsize
), skip
, dir
);
78 * cc_get_sgl_nents() - Get scatterlist number of entries.
81 * @nbytes: [IN] Total SGL data bytes.
82 * @lbytes: [OUT] Returns the amount of bytes at the last entry
84 static unsigned int cc_get_sgl_nents(struct device
*dev
,
85 struct scatterlist
*sg_list
,
86 unsigned int nbytes
, u32
*lbytes
)
88 unsigned int nents
= 0;
90 while (nbytes
&& sg_list
) {
92 /* get the number of bytes in the last entry */
94 nbytes
-= (sg_list
->length
> nbytes
) ?
95 nbytes
: sg_list
->length
;
96 sg_list
= sg_next(sg_list
);
98 dev_dbg(dev
, "nents %d last bytes %d\n", nents
, *lbytes
);
103 * cc_copy_sg_portion() - Copy scatter list data,
104 * from to_skip to end, to dest and vice versa
112 void cc_copy_sg_portion(struct device
*dev
, u8
*dest
, struct scatterlist
*sg
,
113 u32 to_skip
, u32 end
, enum cc_sg_cpy_direct direct
)
117 nents
= sg_nents_for_len(sg
, end
);
118 sg_copy_buffer(sg
, nents
, (void *)dest
, (end
- to_skip
+ 1), to_skip
,
119 (direct
== CC_SG_TO_BUF
));
122 static int cc_render_buff_to_mlli(struct device
*dev
, dma_addr_t buff_dma
,
123 u32 buff_size
, u32
*curr_nents
,
126 u32
*mlli_entry_p
= *mlli_entry_pp
;
129 /* Verify there is no memory overflow*/
130 new_nents
= (*curr_nents
+ buff_size
/ CC_MAX_MLLI_ENTRY_SIZE
+ 1);
131 if (new_nents
> MAX_NUM_OF_TOTAL_MLLI_ENTRIES
) {
132 dev_err(dev
, "Too many mlli entries. current %d max %d\n",
133 new_nents
, MAX_NUM_OF_TOTAL_MLLI_ENTRIES
);
137 /*handle buffer longer than 64 kbytes */
138 while (buff_size
> CC_MAX_MLLI_ENTRY_SIZE
) {
139 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
140 cc_lli_set_size(mlli_entry_p
, CC_MAX_MLLI_ENTRY_SIZE
);
141 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
142 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
143 mlli_entry_p
[LLI_WORD1_OFFSET
]);
144 buff_dma
+= CC_MAX_MLLI_ENTRY_SIZE
;
145 buff_size
-= CC_MAX_MLLI_ENTRY_SIZE
;
146 mlli_entry_p
= mlli_entry_p
+ 2;
150 cc_lli_set_addr(mlli_entry_p
, buff_dma
);
151 cc_lli_set_size(mlli_entry_p
, buff_size
);
152 dev_dbg(dev
, "entry[%d]: single_buff=0x%08X size=%08X\n",
153 *curr_nents
, mlli_entry_p
[LLI_WORD0_OFFSET
],
154 mlli_entry_p
[LLI_WORD1_OFFSET
]);
155 mlli_entry_p
= mlli_entry_p
+ 2;
156 *mlli_entry_pp
= mlli_entry_p
;
161 static int cc_render_sg_to_mlli(struct device
*dev
, struct scatterlist
*sgl
,
162 u32 sgl_data_len
, u32 sgl_offset
,
163 u32
*curr_nents
, u32
**mlli_entry_pp
)
165 struct scatterlist
*curr_sgl
= sgl
;
166 u32
*mlli_entry_p
= *mlli_entry_pp
;
169 for ( ; (curr_sgl
&& sgl_data_len
);
170 curr_sgl
= sg_next(curr_sgl
)) {
172 (sgl_data_len
> sg_dma_len(curr_sgl
) - sgl_offset
) ?
173 sg_dma_len(curr_sgl
) - sgl_offset
:
175 sgl_data_len
-= entry_data_len
;
176 rc
= cc_render_buff_to_mlli(dev
, sg_dma_address(curr_sgl
) +
177 sgl_offset
, entry_data_len
,
178 curr_nents
, &mlli_entry_p
);
184 *mlli_entry_pp
= mlli_entry_p
;
188 static int cc_generate_mlli(struct device
*dev
, struct buffer_array
*sg_data
,
189 struct mlli_params
*mlli_params
, gfp_t flags
)
192 u32 total_nents
= 0, prev_total_nents
= 0;
195 dev_dbg(dev
, "NUM of SG's = %d\n", sg_data
->num_of_buffers
);
197 /* Allocate memory from the pointed pool */
198 mlli_params
->mlli_virt_addr
=
199 dma_pool_alloc(mlli_params
->curr_pool
, flags
,
200 &mlli_params
->mlli_dma_addr
);
201 if (!mlli_params
->mlli_virt_addr
) {
202 dev_err(dev
, "dma_pool_alloc() failed\n");
204 goto build_mlli_exit
;
206 /* Point to start of MLLI */
207 mlli_p
= (u32
*)mlli_params
->mlli_virt_addr
;
208 /* go over all SG's and link it to one MLLI table */
209 for (i
= 0; i
< sg_data
->num_of_buffers
; i
++) {
210 union buffer_array_entry
*entry
= &sg_data
->entry
[i
];
211 u32 tot_len
= sg_data
->total_data_len
[i
];
212 u32 offset
= sg_data
->offset
[i
];
214 if (sg_data
->type
[i
] == DMA_SGL_TYPE
)
215 rc
= cc_render_sg_to_mlli(dev
, entry
->sgl
, tot_len
,
216 offset
, &total_nents
,
218 else /*DMA_BUFF_TYPE*/
219 rc
= cc_render_buff_to_mlli(dev
, entry
->buffer_dma
,
220 tot_len
, &total_nents
,
225 /* set last bit in the current table */
226 if (sg_data
->mlli_nents
[i
]) {
227 /*Calculate the current MLLI table length for the
228 *length field in the descriptor
230 *sg_data
->mlli_nents
[i
] +=
231 (total_nents
- prev_total_nents
);
232 prev_total_nents
= total_nents
;
236 /* Set MLLI size for the bypass operation */
237 mlli_params
->mlli_len
= (total_nents
* LLI_ENTRY_BYTE_SIZE
);
239 dev_dbg(dev
, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n",
240 mlli_params
->mlli_virt_addr
, &mlli_params
->mlli_dma_addr
,
241 mlli_params
->mlli_len
);
247 static void cc_add_buffer_entry(struct device
*dev
,
248 struct buffer_array
*sgl_data
,
249 dma_addr_t buffer_dma
, unsigned int buffer_len
,
250 bool is_last_entry
, u32
*mlli_nents
)
252 unsigned int index
= sgl_data
->num_of_buffers
;
254 dev_dbg(dev
, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n",
255 index
, &buffer_dma
, buffer_len
, is_last_entry
);
256 sgl_data
->nents
[index
] = 1;
257 sgl_data
->entry
[index
].buffer_dma
= buffer_dma
;
258 sgl_data
->offset
[index
] = 0;
259 sgl_data
->total_data_len
[index
] = buffer_len
;
260 sgl_data
->type
[index
] = DMA_BUFF_TYPE
;
261 sgl_data
->is_last
[index
] = is_last_entry
;
262 sgl_data
->mlli_nents
[index
] = mlli_nents
;
263 if (sgl_data
->mlli_nents
[index
])
264 *sgl_data
->mlli_nents
[index
] = 0;
265 sgl_data
->num_of_buffers
++;
268 static void cc_add_sg_entry(struct device
*dev
, struct buffer_array
*sgl_data
,
269 unsigned int nents
, struct scatterlist
*sgl
,
270 unsigned int data_len
, unsigned int data_offset
,
271 bool is_last_table
, u32
*mlli_nents
)
273 unsigned int index
= sgl_data
->num_of_buffers
;
275 dev_dbg(dev
, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n",
276 index
, nents
, sgl
, data_len
, is_last_table
);
277 sgl_data
->nents
[index
] = nents
;
278 sgl_data
->entry
[index
].sgl
= sgl
;
279 sgl_data
->offset
[index
] = data_offset
;
280 sgl_data
->total_data_len
[index
] = data_len
;
281 sgl_data
->type
[index
] = DMA_SGL_TYPE
;
282 sgl_data
->is_last
[index
] = is_last_table
;
283 sgl_data
->mlli_nents
[index
] = mlli_nents
;
284 if (sgl_data
->mlli_nents
[index
])
285 *sgl_data
->mlli_nents
[index
] = 0;
286 sgl_data
->num_of_buffers
++;
289 static int cc_map_sg(struct device
*dev
, struct scatterlist
*sg
,
290 unsigned int nbytes
, int direction
, u32
*nents
,
291 u32 max_sg_nents
, u32
*lbytes
, u32
*mapped_nents
)
293 if (sg_is_last(sg
)) {
294 /* One entry only case -set to DLLI */
295 if (dma_map_sg(dev
, sg
, 1, direction
) != 1) {
296 dev_err(dev
, "dma_map_sg() single buffer failed\n");
299 dev_dbg(dev
, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
300 &sg_dma_address(sg
), sg_page(sg
), sg_virt(sg
),
301 sg
->offset
, sg
->length
);
305 } else { /*sg_is_last*/
306 *nents
= cc_get_sgl_nents(dev
, sg
, nbytes
, lbytes
);
307 if (*nents
> max_sg_nents
) {
309 dev_err(dev
, "Too many fragments. current %d max %d\n",
310 *nents
, max_sg_nents
);
313 /* In case of mmu the number of mapped nents might
314 * be changed from the original sgl nents
316 *mapped_nents
= dma_map_sg(dev
, sg
, *nents
, direction
);
317 if (*mapped_nents
== 0) {
319 dev_err(dev
, "dma_map_sg() sg buffer failed\n");
328 cc_set_aead_conf_buf(struct device
*dev
, struct aead_req_ctx
*areq_ctx
,
329 u8
*config_data
, struct buffer_array
*sg_data
,
330 unsigned int assoclen
)
332 dev_dbg(dev
, " handle additional data config set to DLLI\n");
333 /* create sg for the current buffer */
334 sg_init_one(&areq_ctx
->ccm_adata_sg
, config_data
,
335 AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
);
336 if (dma_map_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
) != 1) {
337 dev_err(dev
, "dma_map_sg() config buffer failed\n");
340 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
341 &sg_dma_address(&areq_ctx
->ccm_adata_sg
),
342 sg_page(&areq_ctx
->ccm_adata_sg
),
343 sg_virt(&areq_ctx
->ccm_adata_sg
),
344 areq_ctx
->ccm_adata_sg
.offset
, areq_ctx
->ccm_adata_sg
.length
);
345 /* prepare for case of MLLI */
347 cc_add_sg_entry(dev
, sg_data
, 1, &areq_ctx
->ccm_adata_sg
,
348 (AES_BLOCK_SIZE
+ areq_ctx
->ccm_hdr_size
),
354 static int cc_set_hash_buf(struct device
*dev
, struct ahash_req_ctx
*areq_ctx
,
355 u8
*curr_buff
, u32 curr_buff_cnt
,
356 struct buffer_array
*sg_data
)
358 dev_dbg(dev
, " handle curr buff %x set to DLLI\n", curr_buff_cnt
);
359 /* create sg for the current buffer */
360 sg_init_one(areq_ctx
->buff_sg
, curr_buff
, curr_buff_cnt
);
361 if (dma_map_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
) != 1) {
362 dev_err(dev
, "dma_map_sg() src buffer failed\n");
365 dev_dbg(dev
, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
366 &sg_dma_address(areq_ctx
->buff_sg
), sg_page(areq_ctx
->buff_sg
),
367 sg_virt(areq_ctx
->buff_sg
), areq_ctx
->buff_sg
->offset
,
368 areq_ctx
->buff_sg
->length
);
369 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
370 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
371 areq_ctx
->in_nents
= 0;
372 /* prepare for case of MLLI */
373 cc_add_sg_entry(dev
, sg_data
, 1, areq_ctx
->buff_sg
, curr_buff_cnt
, 0,
378 void cc_unmap_cipher_request(struct device
*dev
, void *ctx
,
379 unsigned int ivsize
, struct scatterlist
*src
,
380 struct scatterlist
*dst
)
382 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
384 if (req_ctx
->gen_ctx
.iv_dma_addr
) {
385 dev_dbg(dev
, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n",
386 &req_ctx
->gen_ctx
.iv_dma_addr
, ivsize
);
387 dma_unmap_single(dev
, req_ctx
->gen_ctx
.iv_dma_addr
,
388 ivsize
, DMA_BIDIRECTIONAL
);
391 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
&&
392 req_ctx
->mlli_params
.mlli_virt_addr
) {
393 dma_pool_free(req_ctx
->mlli_params
.curr_pool
,
394 req_ctx
->mlli_params
.mlli_virt_addr
,
395 req_ctx
->mlli_params
.mlli_dma_addr
);
398 dma_unmap_sg(dev
, src
, req_ctx
->in_nents
, DMA_BIDIRECTIONAL
);
399 dev_dbg(dev
, "Unmapped req->src=%pK\n", sg_virt(src
));
402 dma_unmap_sg(dev
, dst
, req_ctx
->out_nents
, DMA_BIDIRECTIONAL
);
403 dev_dbg(dev
, "Unmapped req->dst=%pK\n", sg_virt(dst
));
407 int cc_map_cipher_request(struct cc_drvdata
*drvdata
, void *ctx
,
408 unsigned int ivsize
, unsigned int nbytes
,
409 void *info
, struct scatterlist
*src
,
410 struct scatterlist
*dst
, gfp_t flags
)
412 struct cipher_req_ctx
*req_ctx
= (struct cipher_req_ctx
*)ctx
;
413 struct mlli_params
*mlli_params
= &req_ctx
->mlli_params
;
414 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
415 struct device
*dev
= drvdata_to_dev(drvdata
);
416 struct buffer_array sg_data
;
419 u32 mapped_nents
= 0;
421 req_ctx
->dma_buf_type
= CC_DMA_BUF_DLLI
;
422 mlli_params
->curr_pool
= NULL
;
423 sg_data
.num_of_buffers
= 0;
427 dump_byte_array("iv", (u8
*)info
, ivsize
);
428 req_ctx
->gen_ctx
.iv_dma_addr
=
429 dma_map_single(dev
, (void *)info
,
430 ivsize
, DMA_BIDIRECTIONAL
);
431 if (dma_mapping_error(dev
, req_ctx
->gen_ctx
.iv_dma_addr
)) {
432 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
436 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
437 ivsize
, info
, &req_ctx
->gen_ctx
.iv_dma_addr
);
439 req_ctx
->gen_ctx
.iv_dma_addr
= 0;
442 /* Map the src SGL */
443 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_BIDIRECTIONAL
, &req_ctx
->in_nents
,
444 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
, &mapped_nents
);
447 if (mapped_nents
> 1)
448 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
451 /* Handle inplace operation */
452 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
453 req_ctx
->out_nents
= 0;
454 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
456 &req_ctx
->in_mlli_nents
);
460 rc
= cc_map_sg(dev
, dst
, nbytes
, DMA_BIDIRECTIONAL
,
461 &req_ctx
->out_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
462 &dummy
, &mapped_nents
);
465 if (mapped_nents
> 1)
466 req_ctx
->dma_buf_type
= CC_DMA_BUF_MLLI
;
468 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
469 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->in_nents
, src
,
471 &req_ctx
->in_mlli_nents
);
472 cc_add_sg_entry(dev
, &sg_data
, req_ctx
->out_nents
, dst
,
474 &req_ctx
->out_mlli_nents
);
478 if (req_ctx
->dma_buf_type
== CC_DMA_BUF_MLLI
) {
479 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
480 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
485 dev_dbg(dev
, "areq_ctx->dma_buf_type = %s\n",
486 cc_dma_buf_type(req_ctx
->dma_buf_type
));
491 cc_unmap_cipher_request(dev
, req_ctx
, ivsize
, src
, dst
);
495 void cc_unmap_aead_request(struct device
*dev
, struct aead_request
*req
)
497 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
498 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
499 struct cc_drvdata
*drvdata
= dev_get_drvdata(dev
);
501 if (areq_ctx
->mac_buf_dma_addr
) {
502 dma_unmap_single(dev
, areq_ctx
->mac_buf_dma_addr
,
503 MAX_MAC_SIZE
, DMA_BIDIRECTIONAL
);
506 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
507 if (areq_ctx
->hkey_dma_addr
) {
508 dma_unmap_single(dev
, areq_ctx
->hkey_dma_addr
,
509 AES_BLOCK_SIZE
, DMA_BIDIRECTIONAL
);
512 if (areq_ctx
->gcm_block_len_dma_addr
) {
513 dma_unmap_single(dev
, areq_ctx
->gcm_block_len_dma_addr
,
514 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
517 if (areq_ctx
->gcm_iv_inc1_dma_addr
) {
518 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc1_dma_addr
,
519 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
522 if (areq_ctx
->gcm_iv_inc2_dma_addr
) {
523 dma_unmap_single(dev
, areq_ctx
->gcm_iv_inc2_dma_addr
,
524 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
528 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
529 if (areq_ctx
->ccm_iv0_dma_addr
) {
530 dma_unmap_single(dev
, areq_ctx
->ccm_iv0_dma_addr
,
531 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
534 dma_unmap_sg(dev
, &areq_ctx
->ccm_adata_sg
, 1, DMA_TO_DEVICE
);
536 if (areq_ctx
->gen_ctx
.iv_dma_addr
) {
537 dma_unmap_single(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
,
538 hw_iv_size
, DMA_BIDIRECTIONAL
);
539 kzfree(areq_ctx
->gen_ctx
.iv
);
543 if ((areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
544 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) &&
545 (areq_ctx
->mlli_params
.mlli_virt_addr
)) {
546 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
547 &areq_ctx
->mlli_params
.mlli_dma_addr
,
548 areq_ctx
->mlli_params
.mlli_virt_addr
);
549 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
550 areq_ctx
->mlli_params
.mlli_virt_addr
,
551 areq_ctx
->mlli_params
.mlli_dma_addr
);
554 dev_dbg(dev
, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
555 sg_virt(req
->src
), areq_ctx
->src
.nents
, areq_ctx
->assoc
.nents
,
556 areq_ctx
->assoclen
, req
->cryptlen
);
558 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
), DMA_BIDIRECTIONAL
);
559 if (req
->src
!= req
->dst
) {
560 dev_dbg(dev
, "Unmapping dst sgl: req->dst=%pK\n",
562 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
),
565 if (drvdata
->coherent
&&
566 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
567 req
->src
== req
->dst
) {
568 /* copy back mac from temporary location to deal with possible
569 * data memory overriding that caused by cache coherence
572 cc_copy_mac(dev
, req
, CC_SG_FROM_BUF
);
576 static bool cc_is_icv_frag(unsigned int sgl_nents
, unsigned int authsize
,
577 u32 last_entry_data_size
)
579 return ((sgl_nents
> 1) && (last_entry_data_size
< authsize
));
582 static int cc_aead_chain_iv(struct cc_drvdata
*drvdata
,
583 struct aead_request
*req
,
584 struct buffer_array
*sg_data
,
585 bool is_last
, bool do_chain
)
587 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
588 unsigned int hw_iv_size
= areq_ctx
->hw_iv_size
;
589 struct device
*dev
= drvdata_to_dev(drvdata
);
590 gfp_t flags
= cc_gfp_flags(&req
->base
);
594 areq_ctx
->gen_ctx
.iv_dma_addr
= 0;
595 areq_ctx
->gen_ctx
.iv
= NULL
;
599 areq_ctx
->gen_ctx
.iv
= kmemdup(req
->iv
, hw_iv_size
, flags
);
600 if (!areq_ctx
->gen_ctx
.iv
)
603 areq_ctx
->gen_ctx
.iv_dma_addr
=
604 dma_map_single(dev
, areq_ctx
->gen_ctx
.iv
, hw_iv_size
,
606 if (dma_mapping_error(dev
, areq_ctx
->gen_ctx
.iv_dma_addr
)) {
607 dev_err(dev
, "Mapping iv %u B at va=%pK for DMA failed\n",
608 hw_iv_size
, req
->iv
);
609 kzfree(areq_ctx
->gen_ctx
.iv
);
610 areq_ctx
->gen_ctx
.iv
= NULL
;
615 dev_dbg(dev
, "Mapped iv %u B at va=%pK to dma=%pad\n",
616 hw_iv_size
, req
->iv
, &areq_ctx
->gen_ctx
.iv_dma_addr
);
617 // TODO: what about CTR?? ask Ron
618 if (do_chain
&& areq_ctx
->plaintext_authenticate_only
) {
619 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
620 unsigned int iv_size_to_authenc
= crypto_aead_ivsize(tfm
);
621 unsigned int iv_ofs
= GCM_BLOCK_RFC4_IV_OFFSET
;
622 /* Chain to given list */
623 cc_add_buffer_entry(dev
, sg_data
,
624 (areq_ctx
->gen_ctx
.iv_dma_addr
+ iv_ofs
),
625 iv_size_to_authenc
, is_last
,
626 &areq_ctx
->assoc
.mlli_nents
);
627 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
634 static int cc_aead_chain_assoc(struct cc_drvdata
*drvdata
,
635 struct aead_request
*req
,
636 struct buffer_array
*sg_data
,
637 bool is_last
, bool do_chain
)
639 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
641 int mapped_nents
= 0;
642 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
643 unsigned int size_of_assoc
= areq_ctx
->assoclen
;
644 struct device
*dev
= drvdata_to_dev(drvdata
);
646 if (areq_ctx
->is_gcm4543
)
647 size_of_assoc
+= crypto_aead_ivsize(tfm
);
651 goto chain_assoc_exit
;
654 if (areq_ctx
->assoclen
== 0) {
655 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_NULL
;
656 areq_ctx
->assoc
.nents
= 0;
657 areq_ctx
->assoc
.mlli_nents
= 0;
658 dev_dbg(dev
, "Chain assoc of length 0: buff_type=%s nents=%u\n",
659 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
660 areq_ctx
->assoc
.nents
);
661 goto chain_assoc_exit
;
664 mapped_nents
= sg_nents_for_len(req
->src
, size_of_assoc
);
665 if (mapped_nents
< 0)
668 if (mapped_nents
> LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
669 dev_err(dev
, "Too many fragments. current %d max %d\n",
670 mapped_nents
, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
673 areq_ctx
->assoc
.nents
= mapped_nents
;
675 /* in CCM case we have additional entry for
676 * ccm header configurations
678 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
679 if ((mapped_nents
+ 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
) {
680 dev_err(dev
, "CCM case.Too many fragments. Current %d max %d\n",
681 (areq_ctx
->assoc
.nents
+ 1),
682 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
);
684 goto chain_assoc_exit
;
688 if (mapped_nents
== 1 && areq_ctx
->ccm_hdr_size
== ccm_header_size_null
)
689 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_DLLI
;
691 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
693 if (do_chain
|| areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
694 dev_dbg(dev
, "Chain assoc: buff_type=%s nents=%u\n",
695 cc_dma_buf_type(areq_ctx
->assoc_buff_type
),
696 areq_ctx
->assoc
.nents
);
697 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->assoc
.nents
, req
->src
,
698 areq_ctx
->assoclen
, 0, is_last
,
699 &areq_ctx
->assoc
.mlli_nents
);
700 areq_ctx
->assoc_buff_type
= CC_DMA_BUF_MLLI
;
707 static void cc_prepare_aead_data_dlli(struct aead_request
*req
,
708 u32
*src_last_bytes
, u32
*dst_last_bytes
)
710 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
711 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
712 unsigned int authsize
= areq_ctx
->req_authsize
;
713 struct scatterlist
*sg
;
716 areq_ctx
->is_icv_fragmented
= false;
718 if ((req
->src
== req
->dst
) || direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
719 sg
= areq_ctx
->src_sgl
;
720 offset
= *src_last_bytes
- authsize
;
722 sg
= areq_ctx
->dst_sgl
;
723 offset
= *dst_last_bytes
- authsize
;
726 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) + offset
;
727 areq_ctx
->icv_virt_addr
= sg_virt(sg
) + offset
;
730 static void cc_prepare_aead_data_mlli(struct cc_drvdata
*drvdata
,
731 struct aead_request
*req
,
732 struct buffer_array
*sg_data
,
733 u32
*src_last_bytes
, u32
*dst_last_bytes
,
736 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
737 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
738 unsigned int authsize
= areq_ctx
->req_authsize
;
739 struct device
*dev
= drvdata_to_dev(drvdata
);
740 struct scatterlist
*sg
;
742 if (req
->src
== req
->dst
) {
744 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
745 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
746 areq_ctx
->src_offset
, is_last_table
,
747 &areq_ctx
->src
.mlli_nents
);
749 areq_ctx
->is_icv_fragmented
=
750 cc_is_icv_frag(areq_ctx
->src
.nents
, authsize
,
753 if (areq_ctx
->is_icv_fragmented
) {
754 /* Backup happens only when ICV is fragmented, ICV
755 * verification is made by CPU compare in order to
756 * simplify MAC verification upon request completion
758 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
759 /* In coherent platforms (e.g. ACP)
760 * already copying ICV for any
761 * INPLACE-DECRYPT operation, hence
762 * we must neglect this code.
764 if (!drvdata
->coherent
)
765 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
767 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
769 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
770 areq_ctx
->icv_dma_addr
=
771 areq_ctx
->mac_buf_dma_addr
;
773 } else { /* Contig. ICV */
774 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
775 /*Should hanlde if the sg is not contig.*/
776 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
777 (*src_last_bytes
- authsize
);
778 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
779 (*src_last_bytes
- authsize
);
782 } else if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
783 /*NON-INPLACE and DECRYPT*/
784 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
785 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
786 areq_ctx
->src_offset
, is_last_table
,
787 &areq_ctx
->src
.mlli_nents
);
788 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
789 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
790 areq_ctx
->dst_offset
, is_last_table
,
791 &areq_ctx
->dst
.mlli_nents
);
793 areq_ctx
->is_icv_fragmented
=
794 cc_is_icv_frag(areq_ctx
->src
.nents
, authsize
,
796 /* Backup happens only when ICV is fragmented, ICV
798 * verification is made by CPU compare in order to simplify
799 * MAC verification upon request completion
801 if (areq_ctx
->is_icv_fragmented
) {
802 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
803 areq_ctx
->icv_virt_addr
= areq_ctx
->backup_mac
;
805 } else { /* Contig. ICV */
806 sg
= &areq_ctx
->src_sgl
[areq_ctx
->src
.nents
- 1];
807 /*Should hanlde if the sg is not contig.*/
808 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
809 (*src_last_bytes
- authsize
);
810 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
811 (*src_last_bytes
- authsize
);
815 /*NON-INPLACE and ENCRYPT*/
816 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->dst
.nents
,
817 areq_ctx
->dst_sgl
, areq_ctx
->cryptlen
,
818 areq_ctx
->dst_offset
, is_last_table
,
819 &areq_ctx
->dst
.mlli_nents
);
820 cc_add_sg_entry(dev
, sg_data
, areq_ctx
->src
.nents
,
821 areq_ctx
->src_sgl
, areq_ctx
->cryptlen
,
822 areq_ctx
->src_offset
, is_last_table
,
823 &areq_ctx
->src
.mlli_nents
);
825 areq_ctx
->is_icv_fragmented
=
826 cc_is_icv_frag(areq_ctx
->dst
.nents
, authsize
,
829 if (!areq_ctx
->is_icv_fragmented
) {
830 sg
= &areq_ctx
->dst_sgl
[areq_ctx
->dst
.nents
- 1];
832 areq_ctx
->icv_dma_addr
= sg_dma_address(sg
) +
833 (*dst_last_bytes
- authsize
);
834 areq_ctx
->icv_virt_addr
= sg_virt(sg
) +
835 (*dst_last_bytes
- authsize
);
837 areq_ctx
->icv_dma_addr
= areq_ctx
->mac_buf_dma_addr
;
838 areq_ctx
->icv_virt_addr
= areq_ctx
->mac_buf
;
843 static int cc_aead_chain_data(struct cc_drvdata
*drvdata
,
844 struct aead_request
*req
,
845 struct buffer_array
*sg_data
,
846 bool is_last_table
, bool do_chain
)
848 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
849 struct device
*dev
= drvdata_to_dev(drvdata
);
850 enum drv_crypto_direction direct
= areq_ctx
->gen_ctx
.op_type
;
851 unsigned int authsize
= areq_ctx
->req_authsize
;
852 unsigned int src_last_bytes
= 0, dst_last_bytes
= 0;
854 u32 src_mapped_nents
= 0, dst_mapped_nents
= 0;
856 /* non-inplace mode */
857 unsigned int size_for_map
= areq_ctx
->assoclen
+ req
->cryptlen
;
858 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
860 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
861 u32 size_to_skip
= areq_ctx
->assoclen
;
862 struct scatterlist
*sgl
;
865 size_to_skip
+= crypto_aead_ivsize(tfm
);
867 offset
= size_to_skip
;
872 areq_ctx
->src_sgl
= req
->src
;
873 areq_ctx
->dst_sgl
= req
->dst
;
876 size_for_map
+= crypto_aead_ivsize(tfm
);
878 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
880 src_mapped_nents
= cc_get_sgl_nents(dev
, req
->src
, size_for_map
,
882 sg_index
= areq_ctx
->src_sgl
->length
;
883 //check where the data starts
884 while (sg_index
<= size_to_skip
) {
886 offset
-= areq_ctx
->src_sgl
->length
;
887 sgl
= sg_next(areq_ctx
->src_sgl
);
890 areq_ctx
->src_sgl
= sgl
;
891 sg_index
+= areq_ctx
->src_sgl
->length
;
893 if (src_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
894 dev_err(dev
, "Too many fragments. current %d max %d\n",
895 src_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
899 areq_ctx
->src
.nents
= src_mapped_nents
;
901 areq_ctx
->src_offset
= offset
;
903 if (req
->src
!= req
->dst
) {
904 size_for_map
= areq_ctx
->assoclen
+ req
->cryptlen
;
905 size_for_map
+= (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
908 size_for_map
+= crypto_aead_ivsize(tfm
);
910 rc
= cc_map_sg(dev
, req
->dst
, size_for_map
, DMA_BIDIRECTIONAL
,
911 &areq_ctx
->dst
.nents
,
912 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dst_last_bytes
,
915 goto chain_data_exit
;
918 dst_mapped_nents
= cc_get_sgl_nents(dev
, req
->dst
, size_for_map
,
920 sg_index
= areq_ctx
->dst_sgl
->length
;
921 offset
= size_to_skip
;
923 //check where the data starts
924 while (sg_index
<= size_to_skip
) {
926 offset
-= areq_ctx
->dst_sgl
->length
;
927 sgl
= sg_next(areq_ctx
->dst_sgl
);
930 areq_ctx
->dst_sgl
= sgl
;
931 sg_index
+= areq_ctx
->dst_sgl
->length
;
933 if (dst_mapped_nents
> LLI_MAX_NUM_OF_DATA_ENTRIES
) {
934 dev_err(dev
, "Too many fragments. current %d max %d\n",
935 dst_mapped_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
);
938 areq_ctx
->dst
.nents
= dst_mapped_nents
;
939 areq_ctx
->dst_offset
= offset
;
940 if (src_mapped_nents
> 1 ||
941 dst_mapped_nents
> 1 ||
943 areq_ctx
->data_buff_type
= CC_DMA_BUF_MLLI
;
944 cc_prepare_aead_data_mlli(drvdata
, req
, sg_data
,
945 &src_last_bytes
, &dst_last_bytes
,
948 areq_ctx
->data_buff_type
= CC_DMA_BUF_DLLI
;
949 cc_prepare_aead_data_dlli(req
, &src_last_bytes
,
957 static void cc_update_aead_mlli_nents(struct cc_drvdata
*drvdata
,
958 struct aead_request
*req
)
960 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
961 u32 curr_mlli_size
= 0;
963 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
) {
964 areq_ctx
->assoc
.sram_addr
= drvdata
->mlli_sram_addr
;
965 curr_mlli_size
= areq_ctx
->assoc
.mlli_nents
*
969 if (areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
970 /*Inplace case dst nents equal to src nents*/
971 if (req
->src
== req
->dst
) {
972 areq_ctx
->dst
.mlli_nents
= areq_ctx
->src
.mlli_nents
;
973 areq_ctx
->src
.sram_addr
= drvdata
->mlli_sram_addr
+
975 areq_ctx
->dst
.sram_addr
= areq_ctx
->src
.sram_addr
;
976 if (!areq_ctx
->is_single_pass
)
977 areq_ctx
->assoc
.mlli_nents
+=
978 areq_ctx
->src
.mlli_nents
;
980 if (areq_ctx
->gen_ctx
.op_type
==
981 DRV_CRYPTO_DIRECTION_DECRYPT
) {
982 areq_ctx
->src
.sram_addr
=
983 drvdata
->mlli_sram_addr
+
985 areq_ctx
->dst
.sram_addr
=
986 areq_ctx
->src
.sram_addr
+
987 areq_ctx
->src
.mlli_nents
*
989 if (!areq_ctx
->is_single_pass
)
990 areq_ctx
->assoc
.mlli_nents
+=
991 areq_ctx
->src
.mlli_nents
;
993 areq_ctx
->dst
.sram_addr
=
994 drvdata
->mlli_sram_addr
+
996 areq_ctx
->src
.sram_addr
=
997 areq_ctx
->dst
.sram_addr
+
998 areq_ctx
->dst
.mlli_nents
*
1000 if (!areq_ctx
->is_single_pass
)
1001 areq_ctx
->assoc
.mlli_nents
+=
1002 areq_ctx
->dst
.mlli_nents
;
1008 int cc_map_aead_request(struct cc_drvdata
*drvdata
, struct aead_request
*req
)
1010 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1011 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1012 struct device
*dev
= drvdata_to_dev(drvdata
);
1013 struct buffer_array sg_data
;
1014 unsigned int authsize
= areq_ctx
->req_authsize
;
1015 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1017 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1018 bool is_gcm4543
= areq_ctx
->is_gcm4543
;
1019 dma_addr_t dma_addr
;
1020 u32 mapped_nents
= 0;
1021 u32 dummy
= 0; /*used for the assoc data fragments */
1022 u32 size_to_map
= 0;
1023 gfp_t flags
= cc_gfp_flags(&req
->base
);
1025 mlli_params
->curr_pool
= NULL
;
1026 sg_data
.num_of_buffers
= 0;
1028 /* copy mac to a temporary location to deal with possible
1029 * data memory overriding that caused by cache coherence problem.
1031 if (drvdata
->coherent
&&
1032 areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
1033 req
->src
== req
->dst
)
1034 cc_copy_mac(dev
, req
, CC_SG_TO_BUF
);
1036 /* cacluate the size for cipher remove ICV in decrypt*/
1037 areq_ctx
->cryptlen
= (areq_ctx
->gen_ctx
.op_type
==
1038 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1040 (req
->cryptlen
- authsize
);
1042 dma_addr
= dma_map_single(dev
, areq_ctx
->mac_buf
, MAX_MAC_SIZE
,
1044 if (dma_mapping_error(dev
, dma_addr
)) {
1045 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1046 MAX_MAC_SIZE
, areq_ctx
->mac_buf
);
1048 goto aead_map_failure
;
1050 areq_ctx
->mac_buf_dma_addr
= dma_addr
;
1052 if (areq_ctx
->ccm_hdr_size
!= ccm_header_size_null
) {
1053 void *addr
= areq_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
1055 dma_addr
= dma_map_single(dev
, addr
, AES_BLOCK_SIZE
,
1058 if (dma_mapping_error(dev
, dma_addr
)) {
1059 dev_err(dev
, "Mapping mac_buf %u B at va=%pK for DMA failed\n",
1060 AES_BLOCK_SIZE
, addr
);
1061 areq_ctx
->ccm_iv0_dma_addr
= 0;
1063 goto aead_map_failure
;
1065 areq_ctx
->ccm_iv0_dma_addr
= dma_addr
;
1067 rc
= cc_set_aead_conf_buf(dev
, areq_ctx
, areq_ctx
->ccm_config
,
1068 &sg_data
, areq_ctx
->assoclen
);
1070 goto aead_map_failure
;
1073 if (areq_ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1074 dma_addr
= dma_map_single(dev
, areq_ctx
->hkey
, AES_BLOCK_SIZE
,
1076 if (dma_mapping_error(dev
, dma_addr
)) {
1077 dev_err(dev
, "Mapping hkey %u B at va=%pK for DMA failed\n",
1078 AES_BLOCK_SIZE
, areq_ctx
->hkey
);
1080 goto aead_map_failure
;
1082 areq_ctx
->hkey_dma_addr
= dma_addr
;
1084 dma_addr
= dma_map_single(dev
, &areq_ctx
->gcm_len_block
,
1085 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1086 if (dma_mapping_error(dev
, dma_addr
)) {
1087 dev_err(dev
, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n",
1088 AES_BLOCK_SIZE
, &areq_ctx
->gcm_len_block
);
1090 goto aead_map_failure
;
1092 areq_ctx
->gcm_block_len_dma_addr
= dma_addr
;
1094 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc1
,
1095 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1097 if (dma_mapping_error(dev
, dma_addr
)) {
1098 dev_err(dev
, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n",
1099 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc1
));
1100 areq_ctx
->gcm_iv_inc1_dma_addr
= 0;
1102 goto aead_map_failure
;
1104 areq_ctx
->gcm_iv_inc1_dma_addr
= dma_addr
;
1106 dma_addr
= dma_map_single(dev
, areq_ctx
->gcm_iv_inc2
,
1107 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
1109 if (dma_mapping_error(dev
, dma_addr
)) {
1110 dev_err(dev
, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n",
1111 AES_BLOCK_SIZE
, (areq_ctx
->gcm_iv_inc2
));
1112 areq_ctx
->gcm_iv_inc2_dma_addr
= 0;
1114 goto aead_map_failure
;
1116 areq_ctx
->gcm_iv_inc2_dma_addr
= dma_addr
;
1119 size_to_map
= req
->cryptlen
+ areq_ctx
->assoclen
;
1120 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_ENCRYPT
)
1121 size_to_map
+= authsize
;
1124 size_to_map
+= crypto_aead_ivsize(tfm
);
1125 rc
= cc_map_sg(dev
, req
->src
, size_to_map
, DMA_BIDIRECTIONAL
,
1126 &areq_ctx
->src
.nents
,
1127 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES
+
1128 LLI_MAX_NUM_OF_DATA_ENTRIES
),
1129 &dummy
, &mapped_nents
);
1131 goto aead_map_failure
;
1133 if (areq_ctx
->is_single_pass
) {
1135 * Create MLLI table for:
1138 * Note: IV is contg. buffer (not an SGL)
1140 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, true, false);
1142 goto aead_map_failure
;
1143 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, true, false);
1145 goto aead_map_failure
;
1146 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, false);
1148 goto aead_map_failure
;
1149 } else { /* DOUBLE-PASS flow */
1151 * Prepare MLLI table(s) in this order:
1153 * If ENCRYPT/DECRYPT (inplace):
1154 * (1) MLLI table for assoc
1155 * (2) IV entry (chained right after end of assoc)
1156 * (3) MLLI for src/dst (inplace operation)
1158 * If ENCRYPT (non-inplace)
1159 * (1) MLLI table for assoc
1160 * (2) IV entry (chained right after end of assoc)
1164 * If DECRYPT (non-inplace)
1165 * (1) MLLI table for assoc
1166 * (2) IV entry (chained right after end of assoc)
1170 rc
= cc_aead_chain_assoc(drvdata
, req
, &sg_data
, false, true);
1172 goto aead_map_failure
;
1173 rc
= cc_aead_chain_iv(drvdata
, req
, &sg_data
, false, true);
1175 goto aead_map_failure
;
1176 rc
= cc_aead_chain_data(drvdata
, req
, &sg_data
, true, true);
1178 goto aead_map_failure
;
1181 /* Mlli support -start building the MLLI according to the above
1184 if (areq_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1185 areq_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
) {
1186 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1187 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1189 goto aead_map_failure
;
1191 cc_update_aead_mlli_nents(drvdata
, req
);
1192 dev_dbg(dev
, "assoc params mn %d\n",
1193 areq_ctx
->assoc
.mlli_nents
);
1194 dev_dbg(dev
, "src params mn %d\n", areq_ctx
->src
.mlli_nents
);
1195 dev_dbg(dev
, "dst params mn %d\n", areq_ctx
->dst
.mlli_nents
);
1200 cc_unmap_aead_request(dev
, req
);
1204 int cc_map_hash_request_final(struct cc_drvdata
*drvdata
, void *ctx
,
1205 struct scatterlist
*src
, unsigned int nbytes
,
1206 bool do_update
, gfp_t flags
)
1208 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1209 struct device
*dev
= drvdata_to_dev(drvdata
);
1210 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1211 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1212 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1213 struct buffer_array sg_data
;
1214 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1217 u32 mapped_nents
= 0;
1219 dev_dbg(dev
, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n",
1220 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1221 /* Init the type of the dma buffer */
1222 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1223 mlli_params
->curr_pool
= NULL
;
1224 sg_data
.num_of_buffers
= 0;
1225 areq_ctx
->in_nents
= 0;
1227 if (nbytes
== 0 && *curr_buff_cnt
== 0) {
1232 /*TODO: copy data in case that buffer is enough for operation */
1233 /* map the previous buffer */
1234 if (*curr_buff_cnt
) {
1235 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1241 if (src
&& nbytes
> 0 && do_update
) {
1242 rc
= cc_map_sg(dev
, src
, nbytes
, DMA_TO_DEVICE
,
1243 &areq_ctx
->in_nents
, LLI_MAX_NUM_OF_DATA_ENTRIES
,
1244 &dummy
, &mapped_nents
);
1246 goto unmap_curr_buff
;
1247 if (src
&& mapped_nents
== 1 &&
1248 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1249 memcpy(areq_ctx
->buff_sg
, src
,
1250 sizeof(struct scatterlist
));
1251 areq_ctx
->buff_sg
->length
= nbytes
;
1252 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1253 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1255 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1260 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1261 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1262 /* add the src data to the sg_data */
1263 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
, nbytes
,
1264 0, true, &areq_ctx
->mlli_nents
);
1265 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1267 goto fail_unmap_din
;
1269 /* change the buffer index for the unmap function */
1270 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ 1);
1271 dev_dbg(dev
, "areq_ctx->data_dma_buf_type = %s\n",
1272 cc_dma_buf_type(areq_ctx
->data_dma_buf_type
));
1276 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1280 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1285 int cc_map_hash_request_update(struct cc_drvdata
*drvdata
, void *ctx
,
1286 struct scatterlist
*src
, unsigned int nbytes
,
1287 unsigned int block_size
, gfp_t flags
)
1289 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1290 struct device
*dev
= drvdata_to_dev(drvdata
);
1291 u8
*curr_buff
= cc_hash_buf(areq_ctx
);
1292 u32
*curr_buff_cnt
= cc_hash_buf_cnt(areq_ctx
);
1293 u8
*next_buff
= cc_next_buf(areq_ctx
);
1294 u32
*next_buff_cnt
= cc_next_buf_cnt(areq_ctx
);
1295 struct mlli_params
*mlli_params
= &areq_ctx
->mlli_params
;
1296 unsigned int update_data_len
;
1297 u32 total_in_len
= nbytes
+ *curr_buff_cnt
;
1298 struct buffer_array sg_data
;
1299 struct buff_mgr_handle
*buff_mgr
= drvdata
->buff_mgr_handle
;
1300 unsigned int swap_index
= 0;
1303 u32 mapped_nents
= 0;
1305 dev_dbg(dev
, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n",
1306 curr_buff
, *curr_buff_cnt
, nbytes
, src
, areq_ctx
->buff_index
);
1307 /* Init the type of the dma buffer */
1308 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_NULL
;
1309 mlli_params
->curr_pool
= NULL
;
1310 areq_ctx
->curr_sg
= NULL
;
1311 sg_data
.num_of_buffers
= 0;
1312 areq_ctx
->in_nents
= 0;
1314 if (total_in_len
< block_size
) {
1315 dev_dbg(dev
, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
1316 curr_buff
, *curr_buff_cnt
, &curr_buff
[*curr_buff_cnt
]);
1317 areq_ctx
->in_nents
= sg_nents_for_len(src
, nbytes
);
1318 sg_copy_to_buffer(src
, areq_ctx
->in_nents
,
1319 &curr_buff
[*curr_buff_cnt
], nbytes
);
1320 *curr_buff_cnt
+= nbytes
;
1324 /* Calculate the residue size*/
1325 *next_buff_cnt
= total_in_len
& (block_size
- 1);
1326 /* update data len */
1327 update_data_len
= total_in_len
- *next_buff_cnt
;
1329 dev_dbg(dev
, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n",
1330 *next_buff_cnt
, update_data_len
);
1332 /* Copy the new residue to next buffer */
1333 if (*next_buff_cnt
) {
1334 dev_dbg(dev
, " handle residue: next buff %pK skip data %u residue %u\n",
1335 next_buff
, (update_data_len
- *curr_buff_cnt
),
1337 cc_copy_sg_portion(dev
, next_buff
, src
,
1338 (update_data_len
- *curr_buff_cnt
),
1339 nbytes
, CC_SG_TO_BUF
);
1340 /* change the buffer index for next operation */
1344 if (*curr_buff_cnt
) {
1345 rc
= cc_set_hash_buf(dev
, areq_ctx
, curr_buff
, *curr_buff_cnt
,
1349 /* change the buffer index for next operation */
1353 if (update_data_len
> *curr_buff_cnt
) {
1354 rc
= cc_map_sg(dev
, src
, (update_data_len
- *curr_buff_cnt
),
1355 DMA_TO_DEVICE
, &areq_ctx
->in_nents
,
1356 LLI_MAX_NUM_OF_DATA_ENTRIES
, &dummy
,
1359 goto unmap_curr_buff
;
1360 if (mapped_nents
== 1 &&
1361 areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
1362 /* only one entry in the SG and no previous data */
1363 memcpy(areq_ctx
->buff_sg
, src
,
1364 sizeof(struct scatterlist
));
1365 areq_ctx
->buff_sg
->length
= update_data_len
;
1366 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_DLLI
;
1367 areq_ctx
->curr_sg
= areq_ctx
->buff_sg
;
1369 areq_ctx
->data_dma_buf_type
= CC_DMA_BUF_MLLI
;
1373 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_MLLI
) {
1374 mlli_params
->curr_pool
= buff_mgr
->mlli_buffs_pool
;
1375 /* add the src data to the sg_data */
1376 cc_add_sg_entry(dev
, &sg_data
, areq_ctx
->in_nents
, src
,
1377 (update_data_len
- *curr_buff_cnt
), 0, true,
1378 &areq_ctx
->mlli_nents
);
1379 rc
= cc_generate_mlli(dev
, &sg_data
, mlli_params
, flags
);
1381 goto fail_unmap_din
;
1383 areq_ctx
->buff_index
= (areq_ctx
->buff_index
^ swap_index
);
1388 dma_unmap_sg(dev
, src
, areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1392 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1397 void cc_unmap_hash_request(struct device
*dev
, void *ctx
,
1398 struct scatterlist
*src
, bool do_revert
)
1400 struct ahash_req_ctx
*areq_ctx
= (struct ahash_req_ctx
*)ctx
;
1401 u32
*prev_len
= cc_next_buf_cnt(areq_ctx
);
1403 /*In case a pool was set, a table was
1404 *allocated and should be released
1406 if (areq_ctx
->mlli_params
.curr_pool
) {
1407 dev_dbg(dev
, "free MLLI buffer: dma=%pad virt=%pK\n",
1408 &areq_ctx
->mlli_params
.mlli_dma_addr
,
1409 areq_ctx
->mlli_params
.mlli_virt_addr
);
1410 dma_pool_free(areq_ctx
->mlli_params
.curr_pool
,
1411 areq_ctx
->mlli_params
.mlli_virt_addr
,
1412 areq_ctx
->mlli_params
.mlli_dma_addr
);
1415 if (src
&& areq_ctx
->in_nents
) {
1416 dev_dbg(dev
, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n",
1417 sg_virt(src
), &sg_dma_address(src
), sg_dma_len(src
));
1418 dma_unmap_sg(dev
, src
,
1419 areq_ctx
->in_nents
, DMA_TO_DEVICE
);
1423 dev_dbg(dev
, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n",
1424 sg_virt(areq_ctx
->buff_sg
),
1425 &sg_dma_address(areq_ctx
->buff_sg
),
1426 sg_dma_len(areq_ctx
->buff_sg
));
1427 dma_unmap_sg(dev
, areq_ctx
->buff_sg
, 1, DMA_TO_DEVICE
);
1429 /* clean the previous data length for update
1434 areq_ctx
->buff_index
^= 1;
1439 int cc_buffer_mgr_init(struct cc_drvdata
*drvdata
)
1441 struct buff_mgr_handle
*buff_mgr_handle
;
1442 struct device
*dev
= drvdata_to_dev(drvdata
);
1444 buff_mgr_handle
= kmalloc(sizeof(*buff_mgr_handle
), GFP_KERNEL
);
1445 if (!buff_mgr_handle
)
1448 drvdata
->buff_mgr_handle
= buff_mgr_handle
;
1450 buff_mgr_handle
->mlli_buffs_pool
=
1451 dma_pool_create("dx_single_mlli_tables", dev
,
1452 MAX_NUM_OF_TOTAL_MLLI_ENTRIES
*
1453 LLI_ENTRY_BYTE_SIZE
,
1454 MLLI_TABLE_MIN_ALIGNMENT
, 0);
1456 if (!buff_mgr_handle
->mlli_buffs_pool
)
1462 cc_buffer_mgr_fini(drvdata
);
1466 int cc_buffer_mgr_fini(struct cc_drvdata
*drvdata
)
1468 struct buff_mgr_handle
*buff_mgr_handle
= drvdata
->buff_mgr_handle
;
1470 if (buff_mgr_handle
) {
1471 dma_pool_destroy(buff_mgr_handle
->mlli_buffs_pool
);
1472 kfree(drvdata
->buff_mgr_handle
);
1473 drvdata
->buff_mgr_handle
= NULL
;