1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Cavium, Inc.
7 #include "cptvf_algs.h"
8 #include "request_manager.h"
11 * get_free_pending_entry - get free entry from pending queue
15 static struct pending_entry
*get_free_pending_entry(struct pending_queue
*q
,
18 struct pending_entry
*ent
= NULL
;
20 ent
= &q
->head
[q
->rear
];
21 if (unlikely(ent
->busy
)) {
27 if (unlikely(q
->rear
== qlen
))
34 static inline void pending_queue_inc_front(struct pending_qinfo
*pqinfo
,
37 struct pending_queue
*queue
= &pqinfo
->queue
[qno
];
40 if (unlikely(queue
->front
== pqinfo
->qlen
))
44 static int setup_sgio_components(struct cpt_vf
*cptvf
, struct buf_ptr
*list
,
45 int buf_count
, u8
*buffer
)
49 struct sglist_component
*sg_ptr
= NULL
;
50 struct pci_dev
*pdev
= cptvf
->pdev
;
52 if (unlikely(!list
)) {
53 dev_err(&pdev
->dev
, "Input List pointer is NULL\n");
57 for (i
= 0; i
< buf_count
; i
++) {
58 if (likely(list
[i
].vptr
)) {
59 list
[i
].dma_addr
= dma_map_single(&pdev
->dev
,
63 if (unlikely(dma_mapping_error(&pdev
->dev
,
65 dev_err(&pdev
->dev
, "DMA map kernel buffer failed for component: %d\n",
73 components
= buf_count
/ 4;
74 sg_ptr
= (struct sglist_component
*)buffer
;
75 for (i
= 0; i
< components
; i
++) {
76 sg_ptr
->u
.s
.len0
= cpu_to_be16(list
[i
* 4 + 0].size
);
77 sg_ptr
->u
.s
.len1
= cpu_to_be16(list
[i
* 4 + 1].size
);
78 sg_ptr
->u
.s
.len2
= cpu_to_be16(list
[i
* 4 + 2].size
);
79 sg_ptr
->u
.s
.len3
= cpu_to_be16(list
[i
* 4 + 3].size
);
80 sg_ptr
->ptr0
= cpu_to_be64(list
[i
* 4 + 0].dma_addr
);
81 sg_ptr
->ptr1
= cpu_to_be64(list
[i
* 4 + 1].dma_addr
);
82 sg_ptr
->ptr2
= cpu_to_be64(list
[i
* 4 + 2].dma_addr
);
83 sg_ptr
->ptr3
= cpu_to_be64(list
[i
* 4 + 3].dma_addr
);
87 components
= buf_count
% 4;
91 sg_ptr
->u
.s
.len2
= cpu_to_be16(list
[i
* 4 + 2].size
);
92 sg_ptr
->ptr2
= cpu_to_be64(list
[i
* 4 + 2].dma_addr
);
95 sg_ptr
->u
.s
.len1
= cpu_to_be16(list
[i
* 4 + 1].size
);
96 sg_ptr
->ptr1
= cpu_to_be64(list
[i
* 4 + 1].dma_addr
);
99 sg_ptr
->u
.s
.len0
= cpu_to_be16(list
[i
* 4 + 0].size
);
100 sg_ptr
->ptr0
= cpu_to_be64(list
[i
* 4 + 0].dma_addr
);
109 for (j
= 0; j
< i
; j
++) {
110 if (list
[j
].dma_addr
) {
111 dma_unmap_single(&pdev
->dev
, list
[i
].dma_addr
,
112 list
[i
].size
, DMA_BIDIRECTIONAL
);
115 list
[j
].dma_addr
= 0;
121 static inline int setup_sgio_list(struct cpt_vf
*cptvf
,
122 struct cpt_info_buffer
*info
,
123 struct cpt_request_info
*req
)
125 u16 g_sz_bytes
= 0, s_sz_bytes
= 0;
127 struct pci_dev
*pdev
= cptvf
->pdev
;
129 if (req
->incnt
> MAX_SG_IN_CNT
|| req
->outcnt
> MAX_SG_OUT_CNT
) {
130 dev_err(&pdev
->dev
, "Request SG components are higher than supported\n");
132 goto scatter_gather_clean
;
135 /* Setup gather (input) components */
136 g_sz_bytes
= ((req
->incnt
+ 3) / 4) * sizeof(struct sglist_component
);
137 info
->gather_components
= kzalloc(g_sz_bytes
, req
->may_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
138 if (!info
->gather_components
) {
140 goto scatter_gather_clean
;
143 ret
= setup_sgio_components(cptvf
, req
->in
,
145 info
->gather_components
);
147 dev_err(&pdev
->dev
, "Failed to setup gather list\n");
149 goto scatter_gather_clean
;
152 /* Setup scatter (output) components */
153 s_sz_bytes
= ((req
->outcnt
+ 3) / 4) * sizeof(struct sglist_component
);
154 info
->scatter_components
= kzalloc(s_sz_bytes
, req
->may_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
155 if (!info
->scatter_components
) {
157 goto scatter_gather_clean
;
160 ret
= setup_sgio_components(cptvf
, req
->out
,
162 info
->scatter_components
);
164 dev_err(&pdev
->dev
, "Failed to setup gather list\n");
166 goto scatter_gather_clean
;
169 /* Create and initialize DPTR */
170 info
->dlen
= g_sz_bytes
+ s_sz_bytes
+ SG_LIST_HDR_SIZE
;
171 info
->in_buffer
= kzalloc(info
->dlen
, req
->may_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
172 if (!info
->in_buffer
) {
174 goto scatter_gather_clean
;
177 ((__be16
*)info
->in_buffer
)[0] = cpu_to_be16(req
->outcnt
);
178 ((__be16
*)info
->in_buffer
)[1] = cpu_to_be16(req
->incnt
);
179 ((__be16
*)info
->in_buffer
)[2] = 0;
180 ((__be16
*)info
->in_buffer
)[3] = 0;
182 memcpy(&info
->in_buffer
[8], info
->gather_components
,
184 memcpy(&info
->in_buffer
[8 + g_sz_bytes
],
185 info
->scatter_components
, s_sz_bytes
);
187 info
->dptr_baddr
= dma_map_single(&pdev
->dev
,
188 (void *)info
->in_buffer
,
191 if (dma_mapping_error(&pdev
->dev
, info
->dptr_baddr
)) {
192 dev_err(&pdev
->dev
, "Mapping DPTR Failed %d\n", info
->dlen
);
194 goto scatter_gather_clean
;
197 /* Create and initialize RPTR */
198 info
->out_buffer
= kzalloc(COMPLETION_CODE_SIZE
, req
->may_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
199 if (!info
->out_buffer
) {
201 goto scatter_gather_clean
;
204 *((u64
*)info
->out_buffer
) = ~((u64
)COMPLETION_CODE_INIT
);
205 info
->alternate_caddr
= (u64
*)info
->out_buffer
;
206 info
->rptr_baddr
= dma_map_single(&pdev
->dev
,
207 (void *)info
->out_buffer
,
208 COMPLETION_CODE_SIZE
,
210 if (dma_mapping_error(&pdev
->dev
, info
->rptr_baddr
)) {
211 dev_err(&pdev
->dev
, "Mapping RPTR Failed %d\n",
212 COMPLETION_CODE_SIZE
);
214 goto scatter_gather_clean
;
219 scatter_gather_clean
:
223 static int send_cpt_command(struct cpt_vf
*cptvf
, union cpt_inst_s
*cmd
,
226 struct pci_dev
*pdev
= cptvf
->pdev
;
227 struct command_qinfo
*qinfo
= NULL
;
228 struct command_queue
*queue
;
229 struct command_chunk
*chunk
;
233 if (unlikely(qno
>= cptvf
->nr_queues
)) {
234 dev_err(&pdev
->dev
, "Invalid queue (qno: %d, nr_queues: %d)\n",
235 qno
, cptvf
->nr_queues
);
239 qinfo
= &cptvf
->cqinfo
;
240 queue
= &qinfo
->queue
[qno
];
241 /* lock command queue */
242 spin_lock(&queue
->lock
);
243 ent
= &queue
->qhead
->head
[queue
->idx
* qinfo
->cmd_size
];
244 memcpy(ent
, (void *)cmd
, qinfo
->cmd_size
);
246 if (++queue
->idx
>= queue
->qhead
->size
/ 64) {
247 hlist_for_each_entry(chunk
, &queue
->chead
, nextchunk
) {
248 if (chunk
== queue
->qhead
) {
251 queue
->qhead
= chunk
;
257 /* make sure all memory stores are done before ringing doorbell */
259 cptvf_write_vq_doorbell(cptvf
, 1);
260 /* unlock command queue */
261 spin_unlock(&queue
->lock
);
266 static void do_request_cleanup(struct cpt_vf
*cptvf
,
267 struct cpt_info_buffer
*info
)
270 struct pci_dev
*pdev
= cptvf
->pdev
;
271 struct cpt_request_info
*req
;
273 if (info
->dptr_baddr
)
274 dma_unmap_single(&pdev
->dev
, info
->dptr_baddr
,
275 info
->dlen
, DMA_BIDIRECTIONAL
);
277 if (info
->rptr_baddr
)
278 dma_unmap_single(&pdev
->dev
, info
->rptr_baddr
,
279 COMPLETION_CODE_SIZE
, DMA_BIDIRECTIONAL
);
281 if (info
->comp_baddr
)
282 dma_unmap_single(&pdev
->dev
, info
->comp_baddr
,
283 sizeof(union cpt_res_s
), DMA_BIDIRECTIONAL
);
287 for (i
= 0; i
< req
->outcnt
; i
++) {
288 if (req
->out
[i
].dma_addr
)
289 dma_unmap_single(&pdev
->dev
,
290 req
->out
[i
].dma_addr
,
295 for (i
= 0; i
< req
->incnt
; i
++) {
296 if (req
->in
[i
].dma_addr
)
297 dma_unmap_single(&pdev
->dev
,
304 kfree_sensitive(info
->scatter_components
);
305 kfree_sensitive(info
->gather_components
);
306 kfree_sensitive(info
->out_buffer
);
307 kfree_sensitive(info
->in_buffer
);
308 kfree_sensitive((void *)info
->completion_addr
);
309 kfree_sensitive(info
);
312 static void do_post_process(struct cpt_vf
*cptvf
, struct cpt_info_buffer
*info
)
314 struct pci_dev
*pdev
= cptvf
->pdev
;
317 dev_err(&pdev
->dev
, "incorrect cpt_info_buffer for post processing\n");
321 do_request_cleanup(cptvf
, info
);
324 static inline void process_pending_queue(struct cpt_vf
*cptvf
,
325 struct pending_qinfo
*pqinfo
,
328 struct pci_dev
*pdev
= cptvf
->pdev
;
329 struct pending_queue
*pqueue
= &pqinfo
->queue
[qno
];
330 struct pending_entry
*pentry
= NULL
;
331 struct cpt_info_buffer
*info
= NULL
;
332 union cpt_res_s
*status
= NULL
;
336 spin_lock_bh(&pqueue
->lock
);
337 pentry
= &pqueue
->head
[pqueue
->front
];
338 if (unlikely(!pentry
->busy
)) {
339 spin_unlock_bh(&pqueue
->lock
);
343 info
= (struct cpt_info_buffer
*)pentry
->post_arg
;
344 if (unlikely(!info
)) {
345 dev_err(&pdev
->dev
, "Pending Entry post arg NULL\n");
346 pending_queue_inc_front(pqinfo
, qno
);
347 spin_unlock_bh(&pqueue
->lock
);
351 status
= (union cpt_res_s
*)pentry
->completion_addr
;
352 ccode
= status
->s
.compcode
;
353 if ((status
->s
.compcode
== CPT_COMP_E_FAULT
) ||
354 (status
->s
.compcode
== CPT_COMP_E_SWERR
)) {
355 dev_err(&pdev
->dev
, "Request failed with %s\n",
356 (status
->s
.compcode
== CPT_COMP_E_FAULT
) ?
357 "DMA Fault" : "Software error");
358 pentry
->completion_addr
= NULL
;
359 pentry
->busy
= false;
360 atomic64_dec((&pqueue
->pending_count
));
361 pentry
->post_arg
= NULL
;
362 pending_queue_inc_front(pqinfo
, qno
);
363 do_request_cleanup(cptvf
, info
);
364 spin_unlock_bh(&pqueue
->lock
);
366 } else if (status
->s
.compcode
== COMPLETION_CODE_INIT
) {
367 /* check for timeout */
368 if (time_after_eq(jiffies
,
370 (CPT_COMMAND_TIMEOUT
* HZ
)))) {
371 dev_err(&pdev
->dev
, "Request timed out");
372 pentry
->completion_addr
= NULL
;
373 pentry
->busy
= false;
374 atomic64_dec((&pqueue
->pending_count
));
375 pentry
->post_arg
= NULL
;
376 pending_queue_inc_front(pqinfo
, qno
);
377 do_request_cleanup(cptvf
, info
);
378 spin_unlock_bh(&pqueue
->lock
);
380 } else if ((*info
->alternate_caddr
==
381 (~COMPLETION_CODE_INIT
)) &&
382 (info
->extra_time
< TIME_IN_RESET_COUNT
)) {
383 info
->time_in
= jiffies
;
385 spin_unlock_bh(&pqueue
->lock
);
390 pentry
->completion_addr
= NULL
;
391 pentry
->busy
= false;
392 pentry
->post_arg
= NULL
;
393 atomic64_dec((&pqueue
->pending_count
));
394 pending_queue_inc_front(pqinfo
, qno
);
395 spin_unlock_bh(&pqueue
->lock
);
397 do_post_process(info
->cptvf
, info
);
399 * Calling callback after we find
400 * that the request has been serviced
402 pentry
->callback(ccode
, pentry
->callback_arg
);
406 int process_request(struct cpt_vf
*cptvf
, struct cpt_request_info
*req
)
408 int ret
= 0, clear
= 0, queue
= 0;
409 struct cpt_info_buffer
*info
= NULL
;
410 struct cptvf_request
*cpt_req
= NULL
;
411 union ctrl_info
*ctrl
= NULL
;
412 union cpt_res_s
*result
= NULL
;
413 struct pending_entry
*pentry
= NULL
;
414 struct pending_queue
*pqueue
= NULL
;
415 struct pci_dev
*pdev
= cptvf
->pdev
;
417 struct cpt_vq_command vq_cmd
;
418 union cpt_inst_s cptinst
;
420 info
= kzalloc(sizeof(*info
), req
->may_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
421 if (unlikely(!info
)) {
422 dev_err(&pdev
->dev
, "Unable to allocate memory for info_buffer\n");
426 cpt_req
= (struct cptvf_request
*)&req
->req
;
427 ctrl
= (union ctrl_info
*)&req
->ctrl
;
431 ret
= setup_sgio_list(cptvf
, info
, req
);
433 dev_err(&pdev
->dev
, "Setting up SG list failed");
434 goto request_cleanup
;
437 cpt_req
->dlen
= info
->dlen
;
439 * Get buffer for union cpt_res_s response
440 * structure and its physical address
442 info
->completion_addr
= kzalloc(sizeof(union cpt_res_s
), req
->may_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
443 if (unlikely(!info
->completion_addr
)) {
444 dev_err(&pdev
->dev
, "Unable to allocate memory for completion_addr\n");
446 goto request_cleanup
;
449 result
= (union cpt_res_s
*)info
->completion_addr
;
450 result
->s
.compcode
= COMPLETION_CODE_INIT
;
451 info
->comp_baddr
= dma_map_single(&pdev
->dev
,
452 (void *)info
->completion_addr
,
453 sizeof(union cpt_res_s
),
455 if (dma_mapping_error(&pdev
->dev
, info
->comp_baddr
)) {
456 dev_err(&pdev
->dev
, "mapping compptr Failed %lu\n",
457 sizeof(union cpt_res_s
));
459 goto request_cleanup
;
462 /* Fill the VQ command */
464 vq_cmd
.cmd
.s
.opcode
= cpu_to_be16(cpt_req
->opcode
.flags
);
465 vq_cmd
.cmd
.s
.param1
= cpu_to_be16(cpt_req
->param1
);
466 vq_cmd
.cmd
.s
.param2
= cpu_to_be16(cpt_req
->param2
);
467 vq_cmd
.cmd
.s
.dlen
= cpu_to_be16(cpt_req
->dlen
);
469 vq_cmd
.dptr
= info
->dptr_baddr
;
470 vq_cmd
.rptr
= info
->rptr_baddr
;
472 vq_cmd
.cptr
.s
.grp
= group
;
473 /* Get Pending Entry to submit command */
474 /* Always queue 0, because 1 queue per VF */
476 pqueue
= &cptvf
->pqinfo
.queue
[queue
];
478 if (atomic64_read(&pqueue
->pending_count
) > PENDING_THOLD
) {
479 dev_err(&pdev
->dev
, "pending threshold reached\n");
480 process_pending_queue(cptvf
, &cptvf
->pqinfo
, queue
);
484 spin_lock_bh(&pqueue
->lock
);
485 pentry
= get_free_pending_entry(pqueue
, cptvf
->pqinfo
.qlen
);
486 if (unlikely(!pentry
)) {
487 spin_unlock_bh(&pqueue
->lock
);
489 process_pending_queue(cptvf
, &cptvf
->pqinfo
, queue
);
491 goto get_pending_entry
;
493 dev_err(&pdev
->dev
, "Get free entry failed\n");
494 dev_err(&pdev
->dev
, "queue: %d, rear: %d, front: %d\n",
495 queue
, pqueue
->rear
, pqueue
->front
);
497 goto request_cleanup
;
500 pentry
->completion_addr
= info
->completion_addr
;
501 pentry
->post_arg
= (void *)info
;
502 pentry
->callback
= req
->callback
;
503 pentry
->callback_arg
= req
->callback_arg
;
504 info
->pentry
= pentry
;
506 atomic64_inc(&pqueue
->pending_count
);
508 /* Send CPT command */
509 info
->pentry
= pentry
;
510 info
->time_in
= jiffies
;
513 /* Create the CPT_INST_S type command for HW interpretation */
514 cptinst
.s
.doneint
= true;
515 cptinst
.s
.res_addr
= (u64
)info
->comp_baddr
;
518 cptinst
.s
.wq_ptr
= 0;
519 cptinst
.s
.ei0
= vq_cmd
.cmd
.u64
;
520 cptinst
.s
.ei1
= vq_cmd
.dptr
;
521 cptinst
.s
.ei2
= vq_cmd
.rptr
;
522 cptinst
.s
.ei3
= vq_cmd
.cptr
.u64
;
524 ret
= send_cpt_command(cptvf
, &cptinst
, queue
);
525 spin_unlock_bh(&pqueue
->lock
);
527 dev_err(&pdev
->dev
, "Send command failed for AE\n");
529 goto request_cleanup
;
535 dev_dbg(&pdev
->dev
, "Failed to submit CPT command\n");
536 do_request_cleanup(cptvf
, info
);
541 void vq_post_process(struct cpt_vf
*cptvf
, u32 qno
)
543 struct pci_dev
*pdev
= cptvf
->pdev
;
545 if (unlikely(qno
> cptvf
->nr_queues
)) {
546 dev_err(&pdev
->dev
, "Request for post processing on invalid pending queue: %u\n",
551 process_pending_queue(cptvf
, &cptvf
->pqinfo
, qno
);
554 int cptvf_do_request(void *vfdev
, struct cpt_request_info
*req
)
556 struct cpt_vf
*cptvf
= (struct cpt_vf
*)vfdev
;
557 struct pci_dev
*pdev
= cptvf
->pdev
;
559 if (!cpt_device_ready(cptvf
)) {
560 dev_err(&pdev
->dev
, "CPT Device is not ready");
564 if ((cptvf
->vftype
== SE_TYPES
) && (!req
->ctrl
.s
.se_req
)) {
565 dev_err(&pdev
->dev
, "CPTVF-%d of SE TYPE got AE request",
568 } else if ((cptvf
->vftype
== AE_TYPES
) && (req
->ctrl
.s
.se_req
)) {
569 dev_err(&pdev
->dev
, "CPTVF-%d of AE TYPE got SE request",
574 return process_request(cptvf
, req
);