1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Cavium, Inc.
7 #include "request_manager.h"
10 * get_free_pending_entry - get free entry from pending queue
11 * @param pqinfo: pending_qinfo structure
12 * @param qno: queue number
14 static struct pending_entry
*get_free_pending_entry(struct pending_queue
*q
,
17 struct pending_entry
*ent
= NULL
;
19 ent
= &q
->head
[q
->rear
];
20 if (unlikely(ent
->busy
)) {
26 if (unlikely(q
->rear
== qlen
))
33 static inline void pending_queue_inc_front(struct pending_qinfo
*pqinfo
,
36 struct pending_queue
*queue
= &pqinfo
->queue
[qno
];
39 if (unlikely(queue
->front
== pqinfo
->qlen
))
43 static int setup_sgio_components(struct cpt_vf
*cptvf
, struct buf_ptr
*list
,
44 int buf_count
, u8
*buffer
)
48 struct sglist_component
*sg_ptr
= NULL
;
49 struct pci_dev
*pdev
= cptvf
->pdev
;
51 if (unlikely(!list
)) {
52 dev_err(&pdev
->dev
, "Input List pointer is NULL\n");
56 for (i
= 0; i
< buf_count
; i
++) {
57 if (likely(list
[i
].vptr
)) {
58 list
[i
].dma_addr
= dma_map_single(&pdev
->dev
,
62 if (unlikely(dma_mapping_error(&pdev
->dev
,
64 dev_err(&pdev
->dev
, "DMA map kernel buffer failed for component: %d\n",
72 components
= buf_count
/ 4;
73 sg_ptr
= (struct sglist_component
*)buffer
;
74 for (i
= 0; i
< components
; i
++) {
75 sg_ptr
->u
.s
.len0
= cpu_to_be16(list
[i
* 4 + 0].size
);
76 sg_ptr
->u
.s
.len1
= cpu_to_be16(list
[i
* 4 + 1].size
);
77 sg_ptr
->u
.s
.len2
= cpu_to_be16(list
[i
* 4 + 2].size
);
78 sg_ptr
->u
.s
.len3
= cpu_to_be16(list
[i
* 4 + 3].size
);
79 sg_ptr
->ptr0
= cpu_to_be64(list
[i
* 4 + 0].dma_addr
);
80 sg_ptr
->ptr1
= cpu_to_be64(list
[i
* 4 + 1].dma_addr
);
81 sg_ptr
->ptr2
= cpu_to_be64(list
[i
* 4 + 2].dma_addr
);
82 sg_ptr
->ptr3
= cpu_to_be64(list
[i
* 4 + 3].dma_addr
);
86 components
= buf_count
% 4;
90 sg_ptr
->u
.s
.len2
= cpu_to_be16(list
[i
* 4 + 2].size
);
91 sg_ptr
->ptr2
= cpu_to_be64(list
[i
* 4 + 2].dma_addr
);
94 sg_ptr
->u
.s
.len1
= cpu_to_be16(list
[i
* 4 + 1].size
);
95 sg_ptr
->ptr1
= cpu_to_be64(list
[i
* 4 + 1].dma_addr
);
98 sg_ptr
->u
.s
.len0
= cpu_to_be16(list
[i
* 4 + 0].size
);
99 sg_ptr
->ptr0
= cpu_to_be64(list
[i
* 4 + 0].dma_addr
);
108 for (j
= 0; j
< i
; j
++) {
109 if (list
[j
].dma_addr
) {
110 dma_unmap_single(&pdev
->dev
, list
[i
].dma_addr
,
111 list
[i
].size
, DMA_BIDIRECTIONAL
);
114 list
[j
].dma_addr
= 0;
120 static inline int setup_sgio_list(struct cpt_vf
*cptvf
,
121 struct cpt_info_buffer
*info
,
122 struct cpt_request_info
*req
)
124 u16 g_sz_bytes
= 0, s_sz_bytes
= 0;
126 struct pci_dev
*pdev
= cptvf
->pdev
;
128 if (req
->incnt
> MAX_SG_IN_CNT
|| req
->outcnt
> MAX_SG_OUT_CNT
) {
129 dev_err(&pdev
->dev
, "Request SG components are higher than supported\n");
131 goto scatter_gather_clean
;
134 /* Setup gather (input) components */
135 g_sz_bytes
= ((req
->incnt
+ 3) / 4) * sizeof(struct sglist_component
);
136 info
->gather_components
= kzalloc(g_sz_bytes
, GFP_KERNEL
);
137 if (!info
->gather_components
) {
139 goto scatter_gather_clean
;
142 ret
= setup_sgio_components(cptvf
, req
->in
,
144 info
->gather_components
);
146 dev_err(&pdev
->dev
, "Failed to setup gather list\n");
148 goto scatter_gather_clean
;
151 /* Setup scatter (output) components */
152 s_sz_bytes
= ((req
->outcnt
+ 3) / 4) * sizeof(struct sglist_component
);
153 info
->scatter_components
= kzalloc(s_sz_bytes
, GFP_KERNEL
);
154 if (!info
->scatter_components
) {
156 goto scatter_gather_clean
;
159 ret
= setup_sgio_components(cptvf
, req
->out
,
161 info
->scatter_components
);
163 dev_err(&pdev
->dev
, "Failed to setup gather list\n");
165 goto scatter_gather_clean
;
168 /* Create and initialize DPTR */
169 info
->dlen
= g_sz_bytes
+ s_sz_bytes
+ SG_LIST_HDR_SIZE
;
170 info
->in_buffer
= kzalloc(info
->dlen
, GFP_KERNEL
);
171 if (!info
->in_buffer
) {
173 goto scatter_gather_clean
;
176 ((u16
*)info
->in_buffer
)[0] = req
->outcnt
;
177 ((u16
*)info
->in_buffer
)[1] = req
->incnt
;
178 ((u16
*)info
->in_buffer
)[2] = 0;
179 ((u16
*)info
->in_buffer
)[3] = 0;
180 *(u64
*)info
->in_buffer
= cpu_to_be64p((u64
*)info
->in_buffer
);
182 memcpy(&info
->in_buffer
[8], info
->gather_components
,
184 memcpy(&info
->in_buffer
[8 + g_sz_bytes
],
185 info
->scatter_components
, s_sz_bytes
);
187 info
->dptr_baddr
= dma_map_single(&pdev
->dev
,
188 (void *)info
->in_buffer
,
191 if (dma_mapping_error(&pdev
->dev
, info
->dptr_baddr
)) {
192 dev_err(&pdev
->dev
, "Mapping DPTR Failed %d\n", info
->dlen
);
194 goto scatter_gather_clean
;
197 /* Create and initialize RPTR */
198 info
->out_buffer
= kzalloc(COMPLETION_CODE_SIZE
, GFP_KERNEL
);
199 if (!info
->out_buffer
) {
201 goto scatter_gather_clean
;
204 *((u64
*)info
->out_buffer
) = ~((u64
)COMPLETION_CODE_INIT
);
205 info
->alternate_caddr
= (u64
*)info
->out_buffer
;
206 info
->rptr_baddr
= dma_map_single(&pdev
->dev
,
207 (void *)info
->out_buffer
,
208 COMPLETION_CODE_SIZE
,
210 if (dma_mapping_error(&pdev
->dev
, info
->rptr_baddr
)) {
211 dev_err(&pdev
->dev
, "Mapping RPTR Failed %d\n",
212 COMPLETION_CODE_SIZE
);
214 goto scatter_gather_clean
;
219 scatter_gather_clean
:
223 static int send_cpt_command(struct cpt_vf
*cptvf
, union cpt_inst_s
*cmd
,
226 struct pci_dev
*pdev
= cptvf
->pdev
;
227 struct command_qinfo
*qinfo
= NULL
;
228 struct command_queue
*queue
;
229 struct command_chunk
*chunk
;
233 if (unlikely(qno
>= cptvf
->nr_queues
)) {
234 dev_err(&pdev
->dev
, "Invalid queue (qno: %d, nr_queues: %d)\n",
235 qno
, cptvf
->nr_queues
);
239 qinfo
= &cptvf
->cqinfo
;
240 queue
= &qinfo
->queue
[qno
];
241 /* lock commad queue */
242 spin_lock(&queue
->lock
);
243 ent
= &queue
->qhead
->head
[queue
->idx
* qinfo
->cmd_size
];
244 memcpy(ent
, (void *)cmd
, qinfo
->cmd_size
);
246 if (++queue
->idx
>= queue
->qhead
->size
/ 64) {
247 struct hlist_node
*node
;
249 hlist_for_each(node
, &queue
->chead
) {
250 chunk
= hlist_entry(node
, struct command_chunk
,
252 if (chunk
== queue
->qhead
) {
255 queue
->qhead
= chunk
;
261 /* make sure all memory stores are done before ringing doorbell */
263 cptvf_write_vq_doorbell(cptvf
, 1);
264 /* unlock command queue */
265 spin_unlock(&queue
->lock
);
270 static void do_request_cleanup(struct cpt_vf
*cptvf
,
271 struct cpt_info_buffer
*info
)
274 struct pci_dev
*pdev
= cptvf
->pdev
;
275 struct cpt_request_info
*req
;
277 if (info
->dptr_baddr
)
278 dma_unmap_single(&pdev
->dev
, info
->dptr_baddr
,
279 info
->dlen
, DMA_BIDIRECTIONAL
);
281 if (info
->rptr_baddr
)
282 dma_unmap_single(&pdev
->dev
, info
->rptr_baddr
,
283 COMPLETION_CODE_SIZE
, DMA_BIDIRECTIONAL
);
285 if (info
->comp_baddr
)
286 dma_unmap_single(&pdev
->dev
, info
->comp_baddr
,
287 sizeof(union cpt_res_s
), DMA_BIDIRECTIONAL
);
291 for (i
= 0; i
< req
->outcnt
; i
++) {
292 if (req
->out
[i
].dma_addr
)
293 dma_unmap_single(&pdev
->dev
,
294 req
->out
[i
].dma_addr
,
299 for (i
= 0; i
< req
->incnt
; i
++) {
300 if (req
->in
[i
].dma_addr
)
301 dma_unmap_single(&pdev
->dev
,
308 kzfree(info
->scatter_components
);
309 kzfree(info
->gather_components
);
310 kzfree(info
->out_buffer
);
311 kzfree(info
->in_buffer
);
312 kzfree((void *)info
->completion_addr
);
316 static void do_post_process(struct cpt_vf
*cptvf
, struct cpt_info_buffer
*info
)
318 struct pci_dev
*pdev
= cptvf
->pdev
;
321 dev_err(&pdev
->dev
, "incorrect cpt_info_buffer for post processing\n");
325 do_request_cleanup(cptvf
, info
);
328 static inline void process_pending_queue(struct cpt_vf
*cptvf
,
329 struct pending_qinfo
*pqinfo
,
332 struct pci_dev
*pdev
= cptvf
->pdev
;
333 struct pending_queue
*pqueue
= &pqinfo
->queue
[qno
];
334 struct pending_entry
*pentry
= NULL
;
335 struct cpt_info_buffer
*info
= NULL
;
336 union cpt_res_s
*status
= NULL
;
340 spin_lock_bh(&pqueue
->lock
);
341 pentry
= &pqueue
->head
[pqueue
->front
];
342 if (unlikely(!pentry
->busy
)) {
343 spin_unlock_bh(&pqueue
->lock
);
347 info
= (struct cpt_info_buffer
*)pentry
->post_arg
;
348 if (unlikely(!info
)) {
349 dev_err(&pdev
->dev
, "Pending Entry post arg NULL\n");
350 pending_queue_inc_front(pqinfo
, qno
);
351 spin_unlock_bh(&pqueue
->lock
);
355 status
= (union cpt_res_s
*)pentry
->completion_addr
;
356 ccode
= status
->s
.compcode
;
357 if ((status
->s
.compcode
== CPT_COMP_E_FAULT
) ||
358 (status
->s
.compcode
== CPT_COMP_E_SWERR
)) {
359 dev_err(&pdev
->dev
, "Request failed with %s\n",
360 (status
->s
.compcode
== CPT_COMP_E_FAULT
) ?
361 "DMA Fault" : "Software error");
362 pentry
->completion_addr
= NULL
;
363 pentry
->busy
= false;
364 atomic64_dec((&pqueue
->pending_count
));
365 pentry
->post_arg
= NULL
;
366 pending_queue_inc_front(pqinfo
, qno
);
367 do_request_cleanup(cptvf
, info
);
368 spin_unlock_bh(&pqueue
->lock
);
370 } else if (status
->s
.compcode
== COMPLETION_CODE_INIT
) {
371 /* check for timeout */
372 if (time_after_eq(jiffies
,
374 (CPT_COMMAND_TIMEOUT
* HZ
)))) {
375 dev_err(&pdev
->dev
, "Request timed out");
376 pentry
->completion_addr
= NULL
;
377 pentry
->busy
= false;
378 atomic64_dec((&pqueue
->pending_count
));
379 pentry
->post_arg
= NULL
;
380 pending_queue_inc_front(pqinfo
, qno
);
381 do_request_cleanup(cptvf
, info
);
382 spin_unlock_bh(&pqueue
->lock
);
384 } else if ((*info
->alternate_caddr
==
385 (~COMPLETION_CODE_INIT
)) &&
386 (info
->extra_time
< TIME_IN_RESET_COUNT
)) {
387 info
->time_in
= jiffies
;
389 spin_unlock_bh(&pqueue
->lock
);
394 pentry
->completion_addr
= NULL
;
395 pentry
->busy
= false;
396 pentry
->post_arg
= NULL
;
397 atomic64_dec((&pqueue
->pending_count
));
398 pending_queue_inc_front(pqinfo
, qno
);
399 spin_unlock_bh(&pqueue
->lock
);
401 do_post_process(info
->cptvf
, info
);
403 * Calling callback after we find
404 * that the request has been serviced
406 pentry
->callback(ccode
, pentry
->callback_arg
);
410 int process_request(struct cpt_vf
*cptvf
, struct cpt_request_info
*req
)
412 int ret
= 0, clear
= 0, queue
= 0;
413 struct cpt_info_buffer
*info
= NULL
;
414 struct cptvf_request
*cpt_req
= NULL
;
415 union ctrl_info
*ctrl
= NULL
;
416 union cpt_res_s
*result
= NULL
;
417 struct pending_entry
*pentry
= NULL
;
418 struct pending_queue
*pqueue
= NULL
;
419 struct pci_dev
*pdev
= cptvf
->pdev
;
421 struct cpt_vq_command vq_cmd
;
422 union cpt_inst_s cptinst
;
424 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
425 if (unlikely(!info
)) {
426 dev_err(&pdev
->dev
, "Unable to allocate memory for info_buffer\n");
430 cpt_req
= (struct cptvf_request
*)&req
->req
;
431 ctrl
= (union ctrl_info
*)&req
->ctrl
;
435 ret
= setup_sgio_list(cptvf
, info
, req
);
437 dev_err(&pdev
->dev
, "Setting up SG list failed");
438 goto request_cleanup
;
441 cpt_req
->dlen
= info
->dlen
;
443 * Get buffer for union cpt_res_s response
444 * structure and its physical address
446 info
->completion_addr
= kzalloc(sizeof(union cpt_res_s
), GFP_KERNEL
);
447 if (unlikely(!info
->completion_addr
)) {
448 dev_err(&pdev
->dev
, "Unable to allocate memory for completion_addr\n");
450 goto request_cleanup
;
453 result
= (union cpt_res_s
*)info
->completion_addr
;
454 result
->s
.compcode
= COMPLETION_CODE_INIT
;
455 info
->comp_baddr
= dma_map_single(&pdev
->dev
,
456 (void *)info
->completion_addr
,
457 sizeof(union cpt_res_s
),
459 if (dma_mapping_error(&pdev
->dev
, info
->comp_baddr
)) {
460 dev_err(&pdev
->dev
, "mapping compptr Failed %lu\n",
461 sizeof(union cpt_res_s
));
463 goto request_cleanup
;
466 /* Fill the VQ command */
468 vq_cmd
.cmd
.s
.opcode
= cpu_to_be16(cpt_req
->opcode
.flags
);
469 vq_cmd
.cmd
.s
.param1
= cpu_to_be16(cpt_req
->param1
);
470 vq_cmd
.cmd
.s
.param2
= cpu_to_be16(cpt_req
->param2
);
471 vq_cmd
.cmd
.s
.dlen
= cpu_to_be16(cpt_req
->dlen
);
473 /* 64-bit swap for microcode data reads, not needed for addresses*/
474 vq_cmd
.cmd
.u64
= cpu_to_be64(vq_cmd
.cmd
.u64
);
475 vq_cmd
.dptr
= info
->dptr_baddr
;
476 vq_cmd
.rptr
= info
->rptr_baddr
;
478 vq_cmd
.cptr
.s
.grp
= group
;
479 /* Get Pending Entry to submit command */
480 /* Always queue 0, because 1 queue per VF */
482 pqueue
= &cptvf
->pqinfo
.queue
[queue
];
484 if (atomic64_read(&pqueue
->pending_count
) > PENDING_THOLD
) {
485 dev_err(&pdev
->dev
, "pending threshold reached\n");
486 process_pending_queue(cptvf
, &cptvf
->pqinfo
, queue
);
490 spin_lock_bh(&pqueue
->lock
);
491 pentry
= get_free_pending_entry(pqueue
, cptvf
->pqinfo
.qlen
);
492 if (unlikely(!pentry
)) {
493 spin_unlock_bh(&pqueue
->lock
);
495 process_pending_queue(cptvf
, &cptvf
->pqinfo
, queue
);
497 goto get_pending_entry
;
499 dev_err(&pdev
->dev
, "Get free entry failed\n");
500 dev_err(&pdev
->dev
, "queue: %d, rear: %d, front: %d\n",
501 queue
, pqueue
->rear
, pqueue
->front
);
503 goto request_cleanup
;
506 pentry
->completion_addr
= info
->completion_addr
;
507 pentry
->post_arg
= (void *)info
;
508 pentry
->callback
= req
->callback
;
509 pentry
->callback_arg
= req
->callback_arg
;
510 info
->pentry
= pentry
;
512 atomic64_inc(&pqueue
->pending_count
);
514 /* Send CPT command */
515 info
->pentry
= pentry
;
516 info
->time_in
= jiffies
;
519 /* Create the CPT_INST_S type command for HW intrepretation */
520 cptinst
.s
.doneint
= true;
521 cptinst
.s
.res_addr
= (u64
)info
->comp_baddr
;
524 cptinst
.s
.wq_ptr
= 0;
525 cptinst
.s
.ei0
= vq_cmd
.cmd
.u64
;
526 cptinst
.s
.ei1
= vq_cmd
.dptr
;
527 cptinst
.s
.ei2
= vq_cmd
.rptr
;
528 cptinst
.s
.ei3
= vq_cmd
.cptr
.u64
;
530 ret
= send_cpt_command(cptvf
, &cptinst
, queue
);
531 spin_unlock_bh(&pqueue
->lock
);
533 dev_err(&pdev
->dev
, "Send command failed for AE\n");
535 goto request_cleanup
;
541 dev_dbg(&pdev
->dev
, "Failed to submit CPT command\n");
542 do_request_cleanup(cptvf
, info
);
547 void vq_post_process(struct cpt_vf
*cptvf
, u32 qno
)
549 struct pci_dev
*pdev
= cptvf
->pdev
;
551 if (unlikely(qno
> cptvf
->nr_queues
)) {
552 dev_err(&pdev
->dev
, "Request for post processing on invalid pending queue: %u\n",
557 process_pending_queue(cptvf
, &cptvf
->pqinfo
, qno
);
560 int cptvf_do_request(void *vfdev
, struct cpt_request_info
*req
)
562 struct cpt_vf
*cptvf
= (struct cpt_vf
*)vfdev
;
563 struct pci_dev
*pdev
= cptvf
->pdev
;
565 if (!cpt_device_ready(cptvf
)) {
566 dev_err(&pdev
->dev
, "CPT Device is not ready");
570 if ((cptvf
->vftype
== SE_TYPES
) && (!req
->ctrl
.s
.se_req
)) {
571 dev_err(&pdev
->dev
, "CPTVF-%d of SE TYPE got AE request",
574 } else if ((cptvf
->vftype
== AE_TYPES
) && (req
->ctrl
.s
.se_req
)) {
575 dev_err(&pdev
->dev
, "CPTVF-%d of AE TYPE got SE request",
580 return process_request(cptvf
, req
);