1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
6 #include "nitrox_common.h"
7 #include "nitrox_dev.h"
8 #include "nitrox_req.h"
9 #include "nitrox_csr.h"
12 #define MIN_UDD_LEN 16
13 /* PKT_IN_HDR + SLC_STORE_INFO */
15 /* Base destination port for the solicited requests */
16 #define SOLICIT_BASE_DPORT 256
18 #define REQ_NOT_POSTED 1
23 * Response codes from SE microcode
25 * Completion with no error
26 * 0x43 - ERR_GC_DATA_LEN_INVALID
27 * Invalid Data length if Encryption Data length is
28 * less than 16 bytes for AES-XTS and AES-CTS.
29 * 0x45 - ERR_GC_CTX_LEN_INVALID
30 * Invalid context length: CTXL != 23 words.
31 * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
32 * DOCSIS support is enabled with other than
33 * AES/DES-CBC mode encryption.
34 * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
35 * Authentication offset is other than 0 with
36 * Encryption IV source = 0.
37 * Authentication offset is other than 8 (DES)/16 (AES)
38 * with Encryption IV source = 1
39 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
40 * CRC32 is enabled for other than DOCSIS encryption.
41 * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
42 * Invalid flag options in AES-CCM IV.
45 static inline int incr_index(int index
, int count
, int max
)
47 if ((index
+ count
) >= max
)
48 index
= index
+ count
- max
;
55 static void softreq_unmap_sgbufs(struct nitrox_softreq
*sr
)
57 struct nitrox_device
*ndev
= sr
->ndev
;
58 struct device
*dev
= DEV(ndev
);
61 dma_unmap_sg(dev
, sr
->in
.sg
, sg_nents(sr
->in
.sg
),
63 dma_unmap_single(dev
, sr
->in
.sgcomp_dma
, sr
->in
.sgcomp_len
,
69 dma_unmap_sg(dev
, sr
->out
.sg
, sg_nents(sr
->out
.sg
),
71 dma_unmap_single(dev
, sr
->out
.sgcomp_dma
, sr
->out
.sgcomp_len
,
73 kfree(sr
->out
.sgcomp
);
75 sr
->out
.sgmap_cnt
= 0;
78 static void softreq_destroy(struct nitrox_softreq
*sr
)
80 softreq_unmap_sgbufs(sr
);
85 * create_sg_component - create SG componets for N5 device.
86 * @sr: Request structure
88 * @map_nents: number of dma mapped entries
92 * 63 48 47 32 31 16 15 0
93 * --------------------------------------
94 * | LEN0 | LEN1 | LEN2 | LEN3 |
95 * |-------------------------------------
97 * --------------------------------------
99 * --------------------------------------
101 * --------------------------------------
103 * --------------------------------------
105 * Returns 0 if success or a negative errno code on error.
107 static int create_sg_component(struct nitrox_softreq
*sr
,
108 struct nitrox_sgtable
*sgtbl
, int map_nents
)
110 struct nitrox_device
*ndev
= sr
->ndev
;
111 struct nitrox_sgcomp
*sgcomp
;
112 struct scatterlist
*sg
;
117 nr_sgcomp
= roundup(map_nents
, 4) / 4;
119 /* each component holds 4 dma pointers */
120 sz_comp
= nr_sgcomp
* sizeof(*sgcomp
);
121 sgcomp
= kzalloc(sz_comp
, sr
->gfp
);
125 sgtbl
->sgcomp
= sgcomp
;
128 /* populate device sg component */
129 for (i
= 0; i
< nr_sgcomp
; i
++) {
130 for (j
= 0; j
< 4 && sg
; j
++) {
131 sgcomp
[i
].len
[j
] = cpu_to_be16(sg_dma_len(sg
));
132 sgcomp
[i
].dma
[j
] = cpu_to_be64(sg_dma_address(sg
));
136 /* map the device sg component */
137 dma
= dma_map_single(DEV(ndev
), sgtbl
->sgcomp
, sz_comp
, DMA_TO_DEVICE
);
138 if (dma_mapping_error(DEV(ndev
), dma
)) {
139 kfree(sgtbl
->sgcomp
);
140 sgtbl
->sgcomp
= NULL
;
144 sgtbl
->sgcomp_dma
= dma
;
145 sgtbl
->sgcomp_len
= sz_comp
;
151 * dma_map_inbufs - DMA map input sglist and creates sglist component
153 * @sr: Request structure
154 * @req: Crypto request structre
156 * Returns 0 if successful or a negative errno code on error.
158 static int dma_map_inbufs(struct nitrox_softreq
*sr
,
159 struct se_crypto_request
*req
)
161 struct device
*dev
= DEV(sr
->ndev
);
162 struct scatterlist
*sg
;
163 int i
, nents
, ret
= 0;
165 nents
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
170 for_each_sg(req
->src
, sg
, nents
, i
)
171 sr
->in
.total_bytes
+= sg_dma_len(sg
);
173 sr
->in
.sg
= req
->src
;
174 sr
->in
.sgmap_cnt
= nents
;
175 ret
= create_sg_component(sr
, &sr
->in
, sr
->in
.sgmap_cnt
);
182 dma_unmap_sg(dev
, req
->src
, sg_nents(req
->src
), DMA_BIDIRECTIONAL
);
183 sr
->in
.sgmap_cnt
= 0;
187 static int dma_map_outbufs(struct nitrox_softreq
*sr
,
188 struct se_crypto_request
*req
)
190 struct device
*dev
= DEV(sr
->ndev
);
193 nents
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
198 sr
->out
.sg
= req
->dst
;
199 sr
->out
.sgmap_cnt
= nents
;
200 ret
= create_sg_component(sr
, &sr
->out
, sr
->out
.sgmap_cnt
);
202 goto outcomp_map_err
;
207 dma_unmap_sg(dev
, req
->dst
, sg_nents(req
->dst
), DMA_BIDIRECTIONAL
);
208 sr
->out
.sgmap_cnt
= 0;
213 static inline int softreq_map_iobuf(struct nitrox_softreq
*sr
,
214 struct se_crypto_request
*creq
)
218 ret
= dma_map_inbufs(sr
, creq
);
222 ret
= dma_map_outbufs(sr
, creq
);
224 softreq_unmap_sgbufs(sr
);
229 static inline void backlog_list_add(struct nitrox_softreq
*sr
,
230 struct nitrox_cmdq
*cmdq
)
232 INIT_LIST_HEAD(&sr
->backlog
);
234 spin_lock_bh(&cmdq
->backlog_qlock
);
235 list_add_tail(&sr
->backlog
, &cmdq
->backlog_head
);
236 atomic_inc(&cmdq
->backlog_count
);
237 atomic_set(&sr
->status
, REQ_BACKLOG
);
238 spin_unlock_bh(&cmdq
->backlog_qlock
);
241 static inline void response_list_add(struct nitrox_softreq
*sr
,
242 struct nitrox_cmdq
*cmdq
)
244 INIT_LIST_HEAD(&sr
->response
);
246 spin_lock_bh(&cmdq
->resp_qlock
);
247 list_add_tail(&sr
->response
, &cmdq
->response_head
);
248 spin_unlock_bh(&cmdq
->resp_qlock
);
251 static inline void response_list_del(struct nitrox_softreq
*sr
,
252 struct nitrox_cmdq
*cmdq
)
254 spin_lock_bh(&cmdq
->resp_qlock
);
255 list_del(&sr
->response
);
256 spin_unlock_bh(&cmdq
->resp_qlock
);
259 static struct nitrox_softreq
*
260 get_first_response_entry(struct nitrox_cmdq
*cmdq
)
262 return list_first_entry_or_null(&cmdq
->response_head
,
263 struct nitrox_softreq
, response
);
266 static inline bool cmdq_full(struct nitrox_cmdq
*cmdq
, int qlen
)
268 if (atomic_inc_return(&cmdq
->pending_count
) > qlen
) {
269 atomic_dec(&cmdq
->pending_count
);
270 /* sync with other cpus */
271 smp_mb__after_atomic();
274 /* sync with other cpus */
275 smp_mb__after_atomic();
280 * post_se_instr - Post SE instruction to Packet Input ring
281 * @sr: Request structure
282 * @cmdq: Command queue structure
284 * Returns 0 if successful or a negative error code,
285 * if no space in ring.
287 static void post_se_instr(struct nitrox_softreq
*sr
,
288 struct nitrox_cmdq
*cmdq
)
290 struct nitrox_device
*ndev
= sr
->ndev
;
294 spin_lock_bh(&cmdq
->cmd_qlock
);
296 idx
= cmdq
->write_idx
;
297 /* copy the instruction */
298 ent
= cmdq
->base
+ (idx
* cmdq
->instr_size
);
299 memcpy(ent
, &sr
->instr
, cmdq
->instr_size
);
301 atomic_set(&sr
->status
, REQ_POSTED
);
302 response_list_add(sr
, cmdq
);
303 sr
->tstamp
= jiffies
;
304 /* flush the command queue updates */
307 /* Ring doorbell with count 1 */
308 writeq(1, cmdq
->dbell_csr_addr
);
310 cmdq
->write_idx
= incr_index(idx
, 1, ndev
->qlen
);
312 spin_unlock_bh(&cmdq
->cmd_qlock
);
314 /* increment the posted command count */
315 atomic64_inc(&ndev
->stats
.posted
);
318 static int post_backlog_cmds(struct nitrox_cmdq
*cmdq
)
320 struct nitrox_device
*ndev
= cmdq
->ndev
;
321 struct nitrox_softreq
*sr
, *tmp
;
324 if (!atomic_read(&cmdq
->backlog_count
))
327 spin_lock_bh(&cmdq
->backlog_qlock
);
329 list_for_each_entry_safe(sr
, tmp
, &cmdq
->backlog_head
, backlog
) {
330 /* submit until space available */
331 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
335 /* delete from backlog list */
336 list_del(&sr
->backlog
);
337 atomic_dec(&cmdq
->backlog_count
);
338 /* sync with other cpus */
339 smp_mb__after_atomic();
341 /* post the command */
342 post_se_instr(sr
, cmdq
);
344 spin_unlock_bh(&cmdq
->backlog_qlock
);
349 static int nitrox_enqueue_request(struct nitrox_softreq
*sr
)
351 struct nitrox_cmdq
*cmdq
= sr
->cmdq
;
352 struct nitrox_device
*ndev
= sr
->ndev
;
354 /* try to post backlog requests */
355 post_backlog_cmds(cmdq
);
357 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
358 if (!(sr
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
359 /* increment drop count */
360 atomic64_inc(&ndev
->stats
.dropped
);
363 /* add to backlog list */
364 backlog_list_add(sr
, cmdq
);
367 post_se_instr(sr
, cmdq
);
373 * nitrox_process_se_request - Send request to SE core
374 * @ndev: NITROX device
375 * @req: Crypto request
376 * @callback: Completion callback
377 * @cb_arg: Completion callback arguments
379 * Returns 0 on success, or a negative error code.
381 int nitrox_process_se_request(struct nitrox_device
*ndev
,
382 struct se_crypto_request
*req
,
383 completion_t callback
,
386 struct nitrox_softreq
*sr
;
387 dma_addr_t ctx_handle
= 0;
390 if (!nitrox_ready(ndev
))
393 sr
= kzalloc(sizeof(*sr
), req
->gfp
);
398 sr
->flags
= req
->flags
;
400 sr
->callback
= callback
;
403 atomic_set(&sr
->status
, REQ_NOT_POSTED
);
405 sr
->resp
.orh
= req
->orh
;
406 sr
->resp
.completion
= req
->comp
;
408 ret
= softreq_map_iobuf(sr
, req
);
414 /* get the context handle */
415 if (req
->ctx_handle
) {
419 ctx_ptr
= (u8
*)(uintptr_t)req
->ctx_handle
;
420 hdr
= (struct ctx_hdr
*)(ctx_ptr
- sizeof(struct ctx_hdr
));
421 ctx_handle
= hdr
->ctx_dma
;
424 /* select the queue */
425 qno
= smp_processor_id() % ndev
->nr_queues
;
427 sr
->cmdq
= &ndev
->pkt_inq
[qno
];
430 * 64-Byte Instruction Format
432 * ----------------------
434 * ----------------------
435 * | PKT_IN_INSTR_HDR | 8 bytes
436 * ----------------------
437 * | PKT_IN_HDR | 16 bytes
438 * ----------------------
439 * | SLC_INFO | 16 bytes
440 * ----------------------
441 * | Front data | 16 bytes
442 * ----------------------
445 /* fill the packet instruction */
447 sr
->instr
.dptr0
= cpu_to_be64(sr
->in
.sgcomp_dma
);
450 sr
->instr
.ih
.value
= 0;
451 sr
->instr
.ih
.s
.g
= 1;
452 sr
->instr
.ih
.s
.gsz
= sr
->in
.sgmap_cnt
;
453 sr
->instr
.ih
.s
.ssz
= sr
->out
.sgmap_cnt
;
454 sr
->instr
.ih
.s
.fsz
= FDATA_SIZE
+ sizeof(struct gphdr
);
455 sr
->instr
.ih
.s
.tlen
= sr
->instr
.ih
.s
.fsz
+ sr
->in
.total_bytes
;
456 sr
->instr
.ih
.bev
= cpu_to_be64(sr
->instr
.ih
.value
);
459 sr
->instr
.irh
.value
[0] = 0;
460 sr
->instr
.irh
.s
.uddl
= MIN_UDD_LEN
;
461 /* context length in 64-bit words */
462 sr
->instr
.irh
.s
.ctxl
= (req
->ctrl
.s
.ctxl
/ 8);
463 /* offset from solicit base port 256 */
464 sr
->instr
.irh
.s
.destport
= SOLICIT_BASE_DPORT
+ qno
;
465 sr
->instr
.irh
.s
.ctxc
= req
->ctrl
.s
.ctxc
;
466 sr
->instr
.irh
.s
.arg
= req
->ctrl
.s
.arg
;
467 sr
->instr
.irh
.s
.opcode
= req
->opcode
;
468 sr
->instr
.irh
.bev
[0] = cpu_to_be64(sr
->instr
.irh
.value
[0]);
471 sr
->instr
.irh
.s
.ctxp
= cpu_to_be64(ctx_handle
);
474 sr
->instr
.slc
.value
[0] = 0;
475 sr
->instr
.slc
.s
.ssz
= sr
->out
.sgmap_cnt
;
476 sr
->instr
.slc
.bev
[0] = cpu_to_be64(sr
->instr
.slc
.value
[0]);
479 sr
->instr
.slc
.s
.rptr
= cpu_to_be64(sr
->out
.sgcomp_dma
);
482 * No conversion for front data,
483 * It goes into payload
484 * put GP Header in front data
486 sr
->instr
.fdata
[0] = *((u64
*)&req
->gph
);
487 sr
->instr
.fdata
[1] = 0;
489 ret
= nitrox_enqueue_request(sr
);
500 static inline int cmd_timeout(unsigned long tstamp
, unsigned long timeout
)
502 return time_after_eq(jiffies
, (tstamp
+ timeout
));
505 void backlog_qflush_work(struct work_struct
*work
)
507 struct nitrox_cmdq
*cmdq
;
509 cmdq
= container_of(work
, struct nitrox_cmdq
, backlog_qflush
);
510 post_backlog_cmds(cmdq
);
513 static bool sr_completed(struct nitrox_softreq
*sr
)
515 u64 orh
= READ_ONCE(*sr
->resp
.orh
);
516 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1);
518 if ((orh
!= PENDING_SIG
) && (orh
& 0xff))
521 while (READ_ONCE(*sr
->resp
.completion
) == PENDING_SIG
) {
522 if (time_after(jiffies
, timeout
)) {
523 pr_err("comp not done\n");
532 * process_response_list - process completed requests
533 * @cmdq: Command queue structure
535 * Returns the number of responses processed.
537 static void process_response_list(struct nitrox_cmdq
*cmdq
)
539 struct nitrox_device
*ndev
= cmdq
->ndev
;
540 struct nitrox_softreq
*sr
;
541 int req_completed
= 0, err
= 0, budget
;
542 completion_t callback
;
545 /* check all pending requests */
546 budget
= atomic_read(&cmdq
->pending_count
);
548 while (req_completed
< budget
) {
549 sr
= get_first_response_entry(cmdq
);
553 if (atomic_read(&sr
->status
) != REQ_POSTED
)
556 /* check orh and completion bytes updates */
557 if (!sr_completed(sr
)) {
558 /* request not completed, check for timeout */
559 if (!cmd_timeout(sr
->tstamp
, ndev
->timeout
))
561 dev_err_ratelimited(DEV(ndev
),
562 "Request timeout, orh 0x%016llx\n",
563 READ_ONCE(*sr
->resp
.orh
));
565 atomic_dec(&cmdq
->pending_count
);
566 atomic64_inc(&ndev
->stats
.completed
);
567 /* sync with other cpus */
568 smp_mb__after_atomic();
569 /* remove from response list */
570 response_list_del(sr
, cmdq
);
572 err
= READ_ONCE(*sr
->resp
.orh
) & 0xff;
573 callback
= sr
->callback
;
577 callback(cb_arg
, err
);
584 * pkt_slc_resp_tasklet - post processing of SE responses
586 void pkt_slc_resp_tasklet(unsigned long data
)
588 struct nitrox_q_vector
*qvec
= (void *)(uintptr_t)(data
);
589 struct nitrox_cmdq
*cmdq
= qvec
->cmdq
;
590 union nps_pkt_slc_cnts slc_cnts
;
592 /* read completion count */
593 slc_cnts
.value
= readq(cmdq
->compl_cnt_csr_addr
);
594 /* resend the interrupt if more work to do */
595 slc_cnts
.s
.resend
= 1;
597 process_response_list(cmdq
);
600 * clear the interrupt with resend bit enabled,
601 * MSI-X interrupt generates if Completion count > Threshold
603 writeq(slc_cnts
.value
, cmdq
->compl_cnt_csr_addr
);
605 if (atomic_read(&cmdq
->backlog_count
))
606 schedule_work(&cmdq
->backlog_qflush
);