1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16 #define PENDING_SIG 0xFFFFFFFFFFFFFFFFUL
18 #define REQ_NOT_POSTED 1
23 * Response codes from SE microcode
25 * Completion with no error
26 * 0x43 - ERR_GC_DATA_LEN_INVALID
27 * Invalid Data length if Encryption Data length is
28 * less than 16 bytes for AES-XTS and AES-CTS.
29 * 0x45 - ERR_GC_CTX_LEN_INVALID
30 * Invalid context length: CTXL != 23 words.
31 * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
32 * DOCSIS support is enabled with other than
33 * AES/DES-CBC mode encryption.
34 * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
35 * Authentication offset is other than 0 with
36 * Encryption IV source = 0.
37 * Authentication offset is other than 8 (DES)/16 (AES)
38 * with Encryption IV source = 1
39 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
40 * CRC32 is enabled for other than DOCSIS encryption.
41 * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
42 * Invalid flag options in AES-CCM IV.
46 * dma_free_sglist - unmap and free the sg lists.
50 static void softreq_unmap_sgbufs(struct nitrox_softreq
*sr
)
52 struct nitrox_device
*ndev
= sr
->ndev
;
53 struct device
*dev
= DEV(ndev
);
54 struct nitrox_sglist
*sglist
;
57 sglist
= sr
->in
.sglist
;
62 dma_unmap_single(dev
, sglist
->dma
, sglist
->len
, DMA_BIDIRECTIONAL
);
63 /* unmpa src sglist */
64 dma_unmap_sg(dev
, sr
->in
.buf
, (sr
->in
.map_bufs_cnt
- 1), sr
->in
.dir
);
65 /* unamp gather component */
66 dma_unmap_single(dev
, sr
->in
.dma
, sr
->in
.len
, DMA_TO_DEVICE
);
71 sr
->in
.map_bufs_cnt
= 0;
75 sglist
= sr
->out
.sglist
;
80 dma_unmap_single(dev
, sr
->resp
.orh_dma
, ORH_HLEN
, sr
->out
.dir
);
82 /* unmap dst sglist */
84 dma_unmap_sg(dev
, sr
->out
.buf
, (sr
->out
.map_bufs_cnt
- 3),
87 /* unmap completion */
88 dma_unmap_single(dev
, sr
->resp
.completion_dma
, COMP_HLEN
, sr
->out
.dir
);
90 /* unmap scatter component */
91 dma_unmap_single(dev
, sr
->out
.dma
, sr
->out
.len
, DMA_TO_DEVICE
);
92 kfree(sr
->out
.sglist
);
93 kfree(sr
->out
.sgcomp
);
94 sr
->out
.sglist
= NULL
;
96 sr
->out
.map_bufs_cnt
= 0;
99 static void softreq_destroy(struct nitrox_softreq
*sr
)
101 softreq_unmap_sgbufs(sr
);
106 * create_sg_component - create SG componets for N5 device.
107 * @sr: Request structure
109 * @nr_comp: total number of components required
111 * Component structure
113 * 63 48 47 32 31 16 15 0
114 * --------------------------------------
115 * | LEN0 | LEN1 | LEN2 | LEN3 |
116 * |-------------------------------------
118 * --------------------------------------
120 * --------------------------------------
122 * --------------------------------------
124 * --------------------------------------
126 * Returns 0 if success or a negative errno code on error.
128 static int create_sg_component(struct nitrox_softreq
*sr
,
129 struct nitrox_sgtable
*sgtbl
, int map_nents
)
131 struct nitrox_device
*ndev
= sr
->ndev
;
132 struct nitrox_sgcomp
*sgcomp
;
133 struct nitrox_sglist
*sglist
;
138 nr_sgcomp
= roundup(map_nents
, 4) / 4;
140 /* each component holds 4 dma pointers */
141 sz_comp
= nr_sgcomp
* sizeof(*sgcomp
);
142 sgcomp
= kzalloc(sz_comp
, sr
->gfp
);
146 sgtbl
->sgcomp
= sgcomp
;
147 sgtbl
->nr_sgcomp
= nr_sgcomp
;
149 sglist
= sgtbl
->sglist
;
150 /* populate device sg component */
151 for (i
= 0; i
< nr_sgcomp
; i
++) {
152 for (j
= 0; j
< 4; j
++) {
153 sgcomp
->len
[j
] = cpu_to_be16(sglist
->len
);
154 sgcomp
->dma
[j
] = cpu_to_be64(sglist
->dma
);
159 /* map the device sg component */
160 dma
= dma_map_single(DEV(ndev
), sgtbl
->sgcomp
, sz_comp
, DMA_TO_DEVICE
);
161 if (dma_mapping_error(DEV(ndev
), dma
)) {
162 kfree(sgtbl
->sgcomp
);
163 sgtbl
->sgcomp
= NULL
;
168 sgtbl
->len
= sz_comp
;
174 * dma_map_inbufs - DMA map input sglist and creates sglist component
176 * @sr: Request structure
177 * @req: Crypto request structre
179 * Returns 0 if successful or a negative errno code on error.
181 static int dma_map_inbufs(struct nitrox_softreq
*sr
,
182 struct se_crypto_request
*req
)
184 struct device
*dev
= DEV(sr
->ndev
);
185 struct scatterlist
*sg
= req
->src
;
186 struct nitrox_sglist
*glist
;
187 int i
, nents
, ret
= 0;
191 nents
= sg_nents(req
->src
);
193 /* creater gather list IV and src entries */
194 sz
= roundup((1 + nents
), 4) * sizeof(*glist
);
195 glist
= kzalloc(sz
, sr
->gfp
);
199 sr
->in
.sglist
= glist
;
201 dma
= dma_map_single(dev
, &req
->iv
, req
->ivsize
, DMA_BIDIRECTIONAL
);
202 if (dma_mapping_error(dev
, dma
)) {
207 sr
->in
.dir
= (req
->src
== req
->dst
) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
;
208 /* map src entries */
209 nents
= dma_map_sg(dev
, req
->src
, nents
, sr
->in
.dir
);
214 sr
->in
.buf
= req
->src
;
216 /* store the mappings */
217 glist
->len
= req
->ivsize
;
220 sr
->in
.total_bytes
+= req
->ivsize
;
222 for_each_sg(req
->src
, sg
, nents
, i
) {
223 glist
->len
= sg_dma_len(sg
);
224 glist
->dma
= sg_dma_address(sg
);
225 sr
->in
.total_bytes
+= glist
->len
;
228 /* roundup map count to align with entires in sg component */
229 sr
->in
.map_bufs_cnt
= (1 + nents
);
231 /* create NITROX gather component */
232 ret
= create_sg_component(sr
, &sr
->in
, sr
->in
.map_bufs_cnt
);
239 dma_unmap_sg(dev
, req
->src
, nents
, sr
->in
.dir
);
240 sr
->in
.map_bufs_cnt
= 0;
242 dma_unmap_single(dev
, dma
, req
->ivsize
, DMA_BIDIRECTIONAL
);
244 kfree(sr
->in
.sglist
);
245 sr
->in
.sglist
= NULL
;
249 static int dma_map_outbufs(struct nitrox_softreq
*sr
,
250 struct se_crypto_request
*req
)
252 struct device
*dev
= DEV(sr
->ndev
);
253 struct nitrox_sglist
*glist
= sr
->in
.sglist
;
254 struct nitrox_sglist
*slist
;
255 struct scatterlist
*sg
;
256 int i
, nents
, map_bufs_cnt
, ret
= 0;
259 nents
= sg_nents(req
->dst
);
261 /* create scatter list ORH, IV, dst entries and Completion header */
262 sz
= roundup((3 + nents
), 4) * sizeof(*slist
);
263 slist
= kzalloc(sz
, sr
->gfp
);
267 sr
->out
.sglist
= slist
;
268 sr
->out
.dir
= DMA_BIDIRECTIONAL
;
270 sr
->resp
.orh_dma
= dma_map_single(dev
, &sr
->resp
.orh
, ORH_HLEN
,
272 if (dma_mapping_error(dev
, sr
->resp
.orh_dma
)) {
278 sr
->resp
.completion_dma
= dma_map_single(dev
, &sr
->resp
.completion
,
279 COMP_HLEN
, sr
->out
.dir
);
280 if (dma_mapping_error(dev
, sr
->resp
.completion_dma
)) {
285 sr
->inplace
= (req
->src
== req
->dst
) ? true : false;
288 nents
= dma_map_sg(dev
, req
->dst
, nents
, sr
->out
.dir
);
294 sr
->out
.buf
= req
->dst
;
296 /* store the mappings */
298 slist
->len
= ORH_HLEN
;
299 slist
->dma
= sr
->resp
.orh_dma
;
302 /* copy the glist mappings */
304 nents
= sr
->in
.map_bufs_cnt
- 1;
305 map_bufs_cnt
= sr
->in
.map_bufs_cnt
;
306 while (map_bufs_cnt
--) {
307 slist
->len
= glist
->len
;
308 slist
->dma
= glist
->dma
;
313 /* copy iv mapping */
314 slist
->len
= glist
->len
;
315 slist
->dma
= glist
->dma
;
317 /* copy remaining maps */
318 for_each_sg(req
->dst
, sg
, nents
, i
) {
319 slist
->len
= sg_dma_len(sg
);
320 slist
->dma
= sg_dma_address(sg
);
326 slist
->len
= COMP_HLEN
;
327 slist
->dma
= sr
->resp
.completion_dma
;
329 sr
->out
.map_bufs_cnt
= (3 + nents
);
331 ret
= create_sg_component(sr
, &sr
->out
, sr
->out
.map_bufs_cnt
);
333 goto outcomp_map_err
;
339 dma_unmap_sg(dev
, req
->dst
, nents
, sr
->out
.dir
);
340 sr
->out
.map_bufs_cnt
= 0;
343 dma_unmap_single(dev
, sr
->resp
.completion_dma
, COMP_HLEN
, sr
->out
.dir
);
344 sr
->resp
.completion_dma
= 0;
346 dma_unmap_single(dev
, sr
->resp
.orh_dma
, ORH_HLEN
, sr
->out
.dir
);
347 sr
->resp
.orh_dma
= 0;
349 kfree(sr
->out
.sglist
);
350 sr
->out
.sglist
= NULL
;
354 static inline int softreq_map_iobuf(struct nitrox_softreq
*sr
,
355 struct se_crypto_request
*creq
)
359 ret
= dma_map_inbufs(sr
, creq
);
363 ret
= dma_map_outbufs(sr
, creq
);
365 softreq_unmap_sgbufs(sr
);
370 static inline void backlog_list_add(struct nitrox_softreq
*sr
,
371 struct nitrox_cmdq
*cmdq
)
373 INIT_LIST_HEAD(&sr
->backlog
);
375 spin_lock_bh(&cmdq
->backlog_lock
);
376 list_add_tail(&sr
->backlog
, &cmdq
->backlog_head
);
377 atomic_inc(&cmdq
->backlog_count
);
378 atomic_set(&sr
->status
, REQ_BACKLOG
);
379 spin_unlock_bh(&cmdq
->backlog_lock
);
382 static inline void response_list_add(struct nitrox_softreq
*sr
,
383 struct nitrox_cmdq
*cmdq
)
385 INIT_LIST_HEAD(&sr
->response
);
387 spin_lock_bh(&cmdq
->response_lock
);
388 list_add_tail(&sr
->response
, &cmdq
->response_head
);
389 spin_unlock_bh(&cmdq
->response_lock
);
392 static inline void response_list_del(struct nitrox_softreq
*sr
,
393 struct nitrox_cmdq
*cmdq
)
395 spin_lock_bh(&cmdq
->response_lock
);
396 list_del(&sr
->response
);
397 spin_unlock_bh(&cmdq
->response_lock
);
400 static struct nitrox_softreq
*
401 get_first_response_entry(struct nitrox_cmdq
*cmdq
)
403 return list_first_entry_or_null(&cmdq
->response_head
,
404 struct nitrox_softreq
, response
);
407 static inline bool cmdq_full(struct nitrox_cmdq
*cmdq
, int qlen
)
409 if (atomic_inc_return(&cmdq
->pending_count
) > qlen
) {
410 atomic_dec(&cmdq
->pending_count
);
411 /* sync with other cpus */
412 smp_mb__after_atomic();
419 * post_se_instr - Post SE instruction to Packet Input ring
420 * @sr: Request structure
422 * Returns 0 if successful or a negative error code,
423 * if no space in ring.
425 static void post_se_instr(struct nitrox_softreq
*sr
,
426 struct nitrox_cmdq
*cmdq
)
428 struct nitrox_device
*ndev
= sr
->ndev
;
429 union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell
;
433 spin_lock_bh(&cmdq
->cmdq_lock
);
435 /* get the next write offset */
436 offset
= NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq
->qno
);
437 pkt_in_baoff_dbell
.value
= nitrox_read_csr(ndev
, offset
);
438 /* copy the instruction */
439 ent
= cmdq
->head
+ pkt_in_baoff_dbell
.s
.aoff
;
440 memcpy(ent
, &sr
->instr
, cmdq
->instr_size
);
441 /* flush the command queue updates */
444 sr
->tstamp
= jiffies
;
445 atomic_set(&sr
->status
, REQ_POSTED
);
446 response_list_add(sr
, cmdq
);
448 /* Ring doorbell with count 1 */
449 writeq(1, cmdq
->dbell_csr_addr
);
450 /* orders the doorbell rings */
453 spin_unlock_bh(&cmdq
->cmdq_lock
);
456 static int post_backlog_cmds(struct nitrox_cmdq
*cmdq
)
458 struct nitrox_device
*ndev
= cmdq
->ndev
;
459 struct nitrox_softreq
*sr
, *tmp
;
462 spin_lock_bh(&cmdq
->backlog_lock
);
464 list_for_each_entry_safe(sr
, tmp
, &cmdq
->backlog_head
, backlog
) {
465 struct skcipher_request
*skreq
;
467 /* submit until space available */
468 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
472 /* delete from backlog list */
473 list_del(&sr
->backlog
);
474 atomic_dec(&cmdq
->backlog_count
);
475 /* sync with other cpus */
476 smp_mb__after_atomic();
479 /* post the command */
480 post_se_instr(sr
, cmdq
);
482 /* backlog requests are posted, wakeup with -EINPROGRESS */
483 skcipher_request_complete(skreq
, -EINPROGRESS
);
485 spin_unlock_bh(&cmdq
->backlog_lock
);
490 static int nitrox_enqueue_request(struct nitrox_softreq
*sr
)
492 struct nitrox_cmdq
*cmdq
= sr
->cmdq
;
493 struct nitrox_device
*ndev
= sr
->ndev
;
496 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
497 if (!(sr
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
))
500 backlog_list_add(sr
, cmdq
);
502 ret
= post_backlog_cmds(cmdq
);
504 backlog_list_add(sr
, cmdq
);
507 post_se_instr(sr
, cmdq
);
514 * nitrox_se_request - Send request to SE core
515 * @ndev: NITROX device
516 * @req: Crypto request
518 * Returns 0 on success, or a negative error code.
520 int nitrox_process_se_request(struct nitrox_device
*ndev
,
521 struct se_crypto_request
*req
,
522 completion_t callback
,
523 struct skcipher_request
*skreq
)
525 struct nitrox_softreq
*sr
;
526 dma_addr_t ctx_handle
= 0;
529 if (!nitrox_ready(ndev
))
532 sr
= kzalloc(sizeof(*sr
), req
->gfp
);
537 sr
->flags
= req
->flags
;
539 sr
->callback
= callback
;
542 atomic_set(&sr
->status
, REQ_NOT_POSTED
);
544 WRITE_ONCE(sr
->resp
.orh
, PENDING_SIG
);
545 WRITE_ONCE(sr
->resp
.completion
, PENDING_SIG
);
547 ret
= softreq_map_iobuf(sr
, req
);
553 /* get the context handle */
554 if (req
->ctx_handle
) {
558 ctx_ptr
= (u8
*)(uintptr_t)req
->ctx_handle
;
559 hdr
= (struct ctx_hdr
*)(ctx_ptr
- sizeof(struct ctx_hdr
));
560 ctx_handle
= hdr
->ctx_dma
;
563 /* select the queue */
564 qno
= smp_processor_id() % ndev
->nr_queues
;
566 sr
->cmdq
= &ndev
->pkt_cmdqs
[qno
];
569 * 64-Byte Instruction Format
571 * ----------------------
573 * ----------------------
574 * | PKT_IN_INSTR_HDR | 8 bytes
575 * ----------------------
576 * | PKT_IN_HDR | 16 bytes
577 * ----------------------
578 * | SLC_INFO | 16 bytes
579 * ----------------------
580 * | Front data | 16 bytes
581 * ----------------------
584 /* fill the packet instruction */
586 sr
->instr
.dptr0
= cpu_to_be64(sr
->in
.dma
);
589 sr
->instr
.ih
.value
= 0;
590 sr
->instr
.ih
.s
.g
= 1;
591 sr
->instr
.ih
.s
.gsz
= sr
->in
.map_bufs_cnt
;
592 sr
->instr
.ih
.s
.ssz
= sr
->out
.map_bufs_cnt
;
593 sr
->instr
.ih
.s
.fsz
= FDATA_SIZE
+ sizeof(struct gphdr
);
594 sr
->instr
.ih
.s
.tlen
= sr
->instr
.ih
.s
.fsz
+ sr
->in
.total_bytes
;
595 sr
->instr
.ih
.value
= cpu_to_be64(sr
->instr
.ih
.value
);
598 sr
->instr
.irh
.value
[0] = 0;
599 sr
->instr
.irh
.s
.uddl
= MIN_UDD_LEN
;
600 /* context length in 64-bit words */
601 sr
->instr
.irh
.s
.ctxl
= (req
->ctrl
.s
.ctxl
/ 8);
602 /* offset from solicit base port 256 */
603 sr
->instr
.irh
.s
.destport
= SOLICIT_BASE_DPORT
+ qno
;
604 sr
->instr
.irh
.s
.ctxc
= req
->ctrl
.s
.ctxc
;
605 sr
->instr
.irh
.s
.arg
= req
->ctrl
.s
.arg
;
606 sr
->instr
.irh
.s
.opcode
= req
->opcode
;
607 sr
->instr
.irh
.value
[0] = cpu_to_be64(sr
->instr
.irh
.value
[0]);
610 sr
->instr
.irh
.s
.ctxp
= cpu_to_be64(ctx_handle
);
613 sr
->instr
.slc
.value
[0] = 0;
614 sr
->instr
.slc
.s
.ssz
= sr
->out
.map_bufs_cnt
;
615 sr
->instr
.slc
.value
[0] = cpu_to_be64(sr
->instr
.slc
.value
[0]);
618 sr
->instr
.slc
.s
.rptr
= cpu_to_be64(sr
->out
.dma
);
621 * No conversion for front data,
622 * It goes into payload
623 * put GP Header in front data
625 sr
->instr
.fdata
[0] = *((u64
*)&req
->gph
);
626 sr
->instr
.fdata
[1] = 0;
627 /* flush the soft_req changes before posting the cmd */
630 ret
= nitrox_enqueue_request(sr
);
641 static inline int cmd_timeout(unsigned long tstamp
, unsigned long timeout
)
643 return time_after_eq(jiffies
, (tstamp
+ timeout
));
646 void backlog_qflush_work(struct work_struct
*work
)
648 struct nitrox_cmdq
*cmdq
;
650 cmdq
= container_of(work
, struct nitrox_cmdq
, backlog_qflush
);
651 post_backlog_cmds(cmdq
);
655 * process_request_list - process completed requests
657 * @qno: queue to operate
659 * Returns the number of responses processed.
661 static void process_response_list(struct nitrox_cmdq
*cmdq
)
663 struct nitrox_device
*ndev
= cmdq
->ndev
;
664 struct nitrox_softreq
*sr
;
665 struct skcipher_request
*skreq
;
666 completion_t callback
;
667 int req_completed
= 0, err
= 0, budget
;
669 /* check all pending requests */
670 budget
= atomic_read(&cmdq
->pending_count
);
672 while (req_completed
< budget
) {
673 sr
= get_first_response_entry(cmdq
);
677 if (atomic_read(&sr
->status
) != REQ_POSTED
)
680 /* check orh and completion bytes updates */
681 if (READ_ONCE(sr
->resp
.orh
) == READ_ONCE(sr
->resp
.completion
)) {
682 /* request not completed, check for timeout */
683 if (!cmd_timeout(sr
->tstamp
, ndev
->timeout
))
685 dev_err_ratelimited(DEV(ndev
),
686 "Request timeout, orh 0x%016llx\n",
687 READ_ONCE(sr
->resp
.orh
));
689 atomic_dec(&cmdq
->pending_count
);
690 /* sync with other cpus */
691 smp_mb__after_atomic();
692 /* remove from response list */
693 response_list_del(sr
, cmdq
);
695 callback
= sr
->callback
;
699 err
= READ_ONCE(sr
->resp
.orh
) & 0xff;
703 callback(skreq
, err
);
710 * pkt_slc_resp_handler - post processing of SE responses
712 void pkt_slc_resp_handler(unsigned long data
)
714 struct bh_data
*bh
= (void *)(uintptr_t)(data
);
715 struct nitrox_cmdq
*cmdq
= bh
->cmdq
;
716 union nps_pkt_slc_cnts pkt_slc_cnts
;
718 /* read completion count */
719 pkt_slc_cnts
.value
= readq(bh
->completion_cnt_csr_addr
);
720 /* resend the interrupt if more work to do */
721 pkt_slc_cnts
.s
.resend
= 1;
723 process_response_list(cmdq
);
726 * clear the interrupt with resend bit enabled,
727 * MSI-X interrupt generates if Completion count > Threshold
729 writeq(pkt_slc_cnts
.value
, bh
->completion_cnt_csr_addr
);
730 /* order the writes */
733 if (atomic_read(&cmdq
->backlog_count
))
734 schedule_work(&cmdq
->backlog_qflush
);