1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
17 #define REQ_NOT_POSTED 1
22 * Response codes from SE microcode
24 * Completion with no error
25 * 0x43 - ERR_GC_DATA_LEN_INVALID
26 * Invalid Data length if Encryption Data length is
27 * less than 16 bytes for AES-XTS and AES-CTS.
28 * 0x45 - ERR_GC_CTX_LEN_INVALID
29 * Invalid context length: CTXL != 23 words.
30 * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
31 * DOCSIS support is enabled with other than
32 * AES/DES-CBC mode encryption.
33 * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
34 * Authentication offset is other than 0 with
35 * Encryption IV source = 0.
36 * Authentication offset is other than 8 (DES)/16 (AES)
37 * with Encryption IV source = 1
38 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
39 * CRC32 is enabled for other than DOCSIS encryption.
40 * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
41 * Invalid flag options in AES-CCM IV.
44 static inline int incr_index(int index
, int count
, int max
)
46 if ((index
+ count
) >= max
)
47 index
= index
+ count
- max
;
54 static void softreq_unmap_sgbufs(struct nitrox_softreq
*sr
)
56 struct nitrox_device
*ndev
= sr
->ndev
;
57 struct device
*dev
= DEV(ndev
);
60 dma_unmap_sg(dev
, sr
->in
.sg
, sr
->in
.sgmap_cnt
, DMA_BIDIRECTIONAL
);
61 dma_unmap_single(dev
, sr
->in
.sgcomp_dma
, sr
->in
.sgcomp_len
,
67 dma_unmap_sg(dev
, sr
->out
.sg
, sr
->out
.sgmap_cnt
,
69 dma_unmap_single(dev
, sr
->out
.sgcomp_dma
, sr
->out
.sgcomp_len
,
71 kfree(sr
->out
.sgcomp
);
73 sr
->out
.sgmap_cnt
= 0;
76 static void softreq_destroy(struct nitrox_softreq
*sr
)
78 softreq_unmap_sgbufs(sr
);
83 * create_sg_component - create SG componets for N5 device.
84 * @sr: Request structure
86 * @map_nents: number of dma mapped entries
90 * 63 48 47 32 31 16 15 0
91 * --------------------------------------
92 * | LEN0 | LEN1 | LEN2 | LEN3 |
93 * |-------------------------------------
95 * --------------------------------------
97 * --------------------------------------
99 * --------------------------------------
101 * --------------------------------------
103 * Returns 0 if success or a negative errno code on error.
105 static int create_sg_component(struct nitrox_softreq
*sr
,
106 struct nitrox_sgtable
*sgtbl
, int map_nents
)
108 struct nitrox_device
*ndev
= sr
->ndev
;
109 struct nitrox_sgcomp
*sgcomp
;
110 struct scatterlist
*sg
;
115 nr_sgcomp
= roundup(map_nents
, 4) / 4;
117 /* each component holds 4 dma pointers */
118 sz_comp
= nr_sgcomp
* sizeof(*sgcomp
);
119 sgcomp
= kzalloc(sz_comp
, sr
->gfp
);
123 sgtbl
->sgcomp
= sgcomp
;
126 /* populate device sg component */
127 for (i
= 0; i
< nr_sgcomp
; i
++) {
128 for (j
= 0; j
< 4 && sg
; j
++) {
129 sgcomp
[i
].len
[j
] = cpu_to_be16(sg_dma_len(sg
));
130 sgcomp
[i
].dma
[j
] = cpu_to_be64(sg_dma_address(sg
));
134 /* map the device sg component */
135 dma
= dma_map_single(DEV(ndev
), sgtbl
->sgcomp
, sz_comp
, DMA_TO_DEVICE
);
136 if (dma_mapping_error(DEV(ndev
), dma
)) {
137 kfree(sgtbl
->sgcomp
);
138 sgtbl
->sgcomp
= NULL
;
142 sgtbl
->sgcomp_dma
= dma
;
143 sgtbl
->sgcomp_len
= sz_comp
;
149 * dma_map_inbufs - DMA map input sglist and creates sglist component
151 * @sr: Request structure
152 * @req: Crypto request structre
154 * Returns 0 if successful or a negative errno code on error.
156 static int dma_map_inbufs(struct nitrox_softreq
*sr
,
157 struct se_crypto_request
*req
)
159 struct device
*dev
= DEV(sr
->ndev
);
160 struct scatterlist
*sg
= req
->src
;
161 int i
, nents
, ret
= 0;
163 nents
= dma_map_sg(dev
, req
->src
, sg_nents(req
->src
),
168 for_each_sg(req
->src
, sg
, nents
, i
)
169 sr
->in
.total_bytes
+= sg_dma_len(sg
);
171 sr
->in
.sg
= req
->src
;
172 sr
->in
.sgmap_cnt
= nents
;
173 ret
= create_sg_component(sr
, &sr
->in
, sr
->in
.sgmap_cnt
);
180 dma_unmap_sg(dev
, req
->src
, nents
, DMA_BIDIRECTIONAL
);
181 sr
->in
.sgmap_cnt
= 0;
185 static int dma_map_outbufs(struct nitrox_softreq
*sr
,
186 struct se_crypto_request
*req
)
188 struct device
*dev
= DEV(sr
->ndev
);
191 nents
= dma_map_sg(dev
, req
->dst
, sg_nents(req
->dst
),
196 sr
->out
.sg
= req
->dst
;
197 sr
->out
.sgmap_cnt
= nents
;
198 ret
= create_sg_component(sr
, &sr
->out
, sr
->out
.sgmap_cnt
);
200 goto outcomp_map_err
;
205 dma_unmap_sg(dev
, req
->dst
, nents
, DMA_BIDIRECTIONAL
);
206 sr
->out
.sgmap_cnt
= 0;
211 static inline int softreq_map_iobuf(struct nitrox_softreq
*sr
,
212 struct se_crypto_request
*creq
)
216 ret
= dma_map_inbufs(sr
, creq
);
220 ret
= dma_map_outbufs(sr
, creq
);
222 softreq_unmap_sgbufs(sr
);
227 static inline void backlog_list_add(struct nitrox_softreq
*sr
,
228 struct nitrox_cmdq
*cmdq
)
230 INIT_LIST_HEAD(&sr
->backlog
);
232 spin_lock_bh(&cmdq
->backlog_qlock
);
233 list_add_tail(&sr
->backlog
, &cmdq
->backlog_head
);
234 atomic_inc(&cmdq
->backlog_count
);
235 atomic_set(&sr
->status
, REQ_BACKLOG
);
236 spin_unlock_bh(&cmdq
->backlog_qlock
);
239 static inline void response_list_add(struct nitrox_softreq
*sr
,
240 struct nitrox_cmdq
*cmdq
)
242 INIT_LIST_HEAD(&sr
->response
);
244 spin_lock_bh(&cmdq
->resp_qlock
);
245 list_add_tail(&sr
->response
, &cmdq
->response_head
);
246 spin_unlock_bh(&cmdq
->resp_qlock
);
249 static inline void response_list_del(struct nitrox_softreq
*sr
,
250 struct nitrox_cmdq
*cmdq
)
252 spin_lock_bh(&cmdq
->resp_qlock
);
253 list_del(&sr
->response
);
254 spin_unlock_bh(&cmdq
->resp_qlock
);
257 static struct nitrox_softreq
*
258 get_first_response_entry(struct nitrox_cmdq
*cmdq
)
260 return list_first_entry_or_null(&cmdq
->response_head
,
261 struct nitrox_softreq
, response
);
264 static inline bool cmdq_full(struct nitrox_cmdq
*cmdq
, int qlen
)
266 if (atomic_inc_return(&cmdq
->pending_count
) > qlen
) {
267 atomic_dec(&cmdq
->pending_count
);
268 /* sync with other cpus */
269 smp_mb__after_atomic();
272 /* sync with other cpus */
273 smp_mb__after_atomic();
278 * post_se_instr - Post SE instruction to Packet Input ring
279 * @sr: Request structure
281 * Returns 0 if successful or a negative error code,
282 * if no space in ring.
284 static void post_se_instr(struct nitrox_softreq
*sr
,
285 struct nitrox_cmdq
*cmdq
)
287 struct nitrox_device
*ndev
= sr
->ndev
;
291 spin_lock_bh(&cmdq
->cmd_qlock
);
293 idx
= cmdq
->write_idx
;
294 /* copy the instruction */
295 ent
= cmdq
->base
+ (idx
* cmdq
->instr_size
);
296 memcpy(ent
, &sr
->instr
, cmdq
->instr_size
);
298 atomic_set(&sr
->status
, REQ_POSTED
);
299 response_list_add(sr
, cmdq
);
300 sr
->tstamp
= jiffies
;
301 /* flush the command queue updates */
304 /* Ring doorbell with count 1 */
305 writeq(1, cmdq
->dbell_csr_addr
);
307 cmdq
->write_idx
= incr_index(idx
, 1, ndev
->qlen
);
309 spin_unlock_bh(&cmdq
->cmd_qlock
);
311 /* increment the posted command count */
312 atomic64_inc(&ndev
->stats
.posted
);
315 static int post_backlog_cmds(struct nitrox_cmdq
*cmdq
)
317 struct nitrox_device
*ndev
= cmdq
->ndev
;
318 struct nitrox_softreq
*sr
, *tmp
;
321 if (!atomic_read(&cmdq
->backlog_count
))
324 spin_lock_bh(&cmdq
->backlog_qlock
);
326 list_for_each_entry_safe(sr
, tmp
, &cmdq
->backlog_head
, backlog
) {
327 /* submit until space available */
328 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
332 /* delete from backlog list */
333 list_del(&sr
->backlog
);
334 atomic_dec(&cmdq
->backlog_count
);
335 /* sync with other cpus */
336 smp_mb__after_atomic();
338 /* post the command */
339 post_se_instr(sr
, cmdq
);
341 spin_unlock_bh(&cmdq
->backlog_qlock
);
346 static int nitrox_enqueue_request(struct nitrox_softreq
*sr
)
348 struct nitrox_cmdq
*cmdq
= sr
->cmdq
;
349 struct nitrox_device
*ndev
= sr
->ndev
;
351 /* try to post backlog requests */
352 post_backlog_cmds(cmdq
);
354 if (unlikely(cmdq_full(cmdq
, ndev
->qlen
))) {
355 if (!(sr
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
)) {
356 /* increment drop count */
357 atomic64_inc(&ndev
->stats
.dropped
);
360 /* add to backlog list */
361 backlog_list_add(sr
, cmdq
);
364 post_se_instr(sr
, cmdq
);
370 * nitrox_se_request - Send request to SE core
371 * @ndev: NITROX device
372 * @req: Crypto request
374 * Returns 0 on success, or a negative error code.
376 int nitrox_process_se_request(struct nitrox_device
*ndev
,
377 struct se_crypto_request
*req
,
378 completion_t callback
,
381 struct nitrox_softreq
*sr
;
382 dma_addr_t ctx_handle
= 0;
385 if (!nitrox_ready(ndev
))
388 sr
= kzalloc(sizeof(*sr
), req
->gfp
);
393 sr
->flags
= req
->flags
;
395 sr
->callback
= callback
;
398 atomic_set(&sr
->status
, REQ_NOT_POSTED
);
400 sr
->resp
.orh
= req
->orh
;
401 sr
->resp
.completion
= req
->comp
;
403 ret
= softreq_map_iobuf(sr
, req
);
409 /* get the context handle */
410 if (req
->ctx_handle
) {
414 ctx_ptr
= (u8
*)(uintptr_t)req
->ctx_handle
;
415 hdr
= (struct ctx_hdr
*)(ctx_ptr
- sizeof(struct ctx_hdr
));
416 ctx_handle
= hdr
->ctx_dma
;
419 /* select the queue */
420 qno
= smp_processor_id() % ndev
->nr_queues
;
422 sr
->cmdq
= &ndev
->pkt_inq
[qno
];
425 * 64-Byte Instruction Format
427 * ----------------------
429 * ----------------------
430 * | PKT_IN_INSTR_HDR | 8 bytes
431 * ----------------------
432 * | PKT_IN_HDR | 16 bytes
433 * ----------------------
434 * | SLC_INFO | 16 bytes
435 * ----------------------
436 * | Front data | 16 bytes
437 * ----------------------
440 /* fill the packet instruction */
442 sr
->instr
.dptr0
= cpu_to_be64(sr
->in
.sgcomp_dma
);
445 sr
->instr
.ih
.value
= 0;
446 sr
->instr
.ih
.s
.g
= 1;
447 sr
->instr
.ih
.s
.gsz
= sr
->in
.sgmap_cnt
;
448 sr
->instr
.ih
.s
.ssz
= sr
->out
.sgmap_cnt
;
449 sr
->instr
.ih
.s
.fsz
= FDATA_SIZE
+ sizeof(struct gphdr
);
450 sr
->instr
.ih
.s
.tlen
= sr
->instr
.ih
.s
.fsz
+ sr
->in
.total_bytes
;
451 sr
->instr
.ih
.value
= cpu_to_be64(sr
->instr
.ih
.value
);
454 sr
->instr
.irh
.value
[0] = 0;
455 sr
->instr
.irh
.s
.uddl
= MIN_UDD_LEN
;
456 /* context length in 64-bit words */
457 sr
->instr
.irh
.s
.ctxl
= (req
->ctrl
.s
.ctxl
/ 8);
458 /* offset from solicit base port 256 */
459 sr
->instr
.irh
.s
.destport
= SOLICIT_BASE_DPORT
+ qno
;
460 sr
->instr
.irh
.s
.ctxc
= req
->ctrl
.s
.ctxc
;
461 sr
->instr
.irh
.s
.arg
= req
->ctrl
.s
.arg
;
462 sr
->instr
.irh
.s
.opcode
= req
->opcode
;
463 sr
->instr
.irh
.value
[0] = cpu_to_be64(sr
->instr
.irh
.value
[0]);
466 sr
->instr
.irh
.s
.ctxp
= cpu_to_be64(ctx_handle
);
469 sr
->instr
.slc
.value
[0] = 0;
470 sr
->instr
.slc
.s
.ssz
= sr
->out
.sgmap_cnt
;
471 sr
->instr
.slc
.value
[0] = cpu_to_be64(sr
->instr
.slc
.value
[0]);
474 sr
->instr
.slc
.s
.rptr
= cpu_to_be64(sr
->out
.sgcomp_dma
);
477 * No conversion for front data,
478 * It goes into payload
479 * put GP Header in front data
481 sr
->instr
.fdata
[0] = *((u64
*)&req
->gph
);
482 sr
->instr
.fdata
[1] = 0;
484 ret
= nitrox_enqueue_request(sr
);
495 static inline int cmd_timeout(unsigned long tstamp
, unsigned long timeout
)
497 return time_after_eq(jiffies
, (tstamp
+ timeout
));
500 void backlog_qflush_work(struct work_struct
*work
)
502 struct nitrox_cmdq
*cmdq
;
504 cmdq
= container_of(work
, struct nitrox_cmdq
, backlog_qflush
);
505 post_backlog_cmds(cmdq
);
508 static bool sr_completed(struct nitrox_softreq
*sr
)
510 u64 orh
= READ_ONCE(*sr
->resp
.orh
);
511 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1);
513 if ((orh
!= PENDING_SIG
) && (orh
& 0xff))
516 while (READ_ONCE(*sr
->resp
.completion
) == PENDING_SIG
) {
517 if (time_after(jiffies
, timeout
)) {
518 pr_err("comp not done\n");
527 * process_request_list - process completed requests
529 * @qno: queue to operate
531 * Returns the number of responses processed.
533 static void process_response_list(struct nitrox_cmdq
*cmdq
)
535 struct nitrox_device
*ndev
= cmdq
->ndev
;
536 struct nitrox_softreq
*sr
;
537 int req_completed
= 0, err
= 0, budget
;
538 completion_t callback
;
541 /* check all pending requests */
542 budget
= atomic_read(&cmdq
->pending_count
);
544 while (req_completed
< budget
) {
545 sr
= get_first_response_entry(cmdq
);
549 if (atomic_read(&sr
->status
) != REQ_POSTED
)
552 /* check orh and completion bytes updates */
553 if (!sr_completed(sr
)) {
554 /* request not completed, check for timeout */
555 if (!cmd_timeout(sr
->tstamp
, ndev
->timeout
))
557 dev_err_ratelimited(DEV(ndev
),
558 "Request timeout, orh 0x%016llx\n",
559 READ_ONCE(*sr
->resp
.orh
));
561 atomic_dec(&cmdq
->pending_count
);
562 atomic64_inc(&ndev
->stats
.completed
);
563 /* sync with other cpus */
564 smp_mb__after_atomic();
565 /* remove from response list */
566 response_list_del(sr
, cmdq
);
568 err
= READ_ONCE(*sr
->resp
.orh
) & 0xff;
569 callback
= sr
->callback
;
573 callback(cb_arg
, err
);
580 * pkt_slc_resp_tasklet - post processing of SE responses
582 void pkt_slc_resp_tasklet(unsigned long data
)
584 struct nitrox_q_vector
*qvec
= (void *)(uintptr_t)(data
);
585 struct nitrox_cmdq
*cmdq
= qvec
->cmdq
;
586 union nps_pkt_slc_cnts slc_cnts
;
588 /* read completion count */
589 slc_cnts
.value
= readq(cmdq
->compl_cnt_csr_addr
);
590 /* resend the interrupt if more work to do */
591 slc_cnts
.s
.resend
= 1;
593 process_response_list(cmdq
);
596 * clear the interrupt with resend bit enabled,
597 * MSI-X interrupt generates if Completion count > Threshold
599 writeq(slc_cnts
.value
, cmdq
->compl_cnt_csr_addr
);
601 if (atomic_read(&cmdq
->backlog_count
))
602 schedule_work(&cmdq
->backlog_qflush
);