1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Cavium, Inc.
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
11 #define DRV_NAME "thunder-cptvf"
12 #define DRV_VERSION "1.0"
15 struct tasklet_struct twork
;
20 struct cptvf_wqe_info
{
21 struct cptvf_wqe vq_wqe
[CPT_NUM_QS_PER_VF
];
24 static void vq_work_handler(unsigned long data
)
26 struct cptvf_wqe_info
*cwqe_info
= (struct cptvf_wqe_info
*)data
;
27 struct cptvf_wqe
*cwqe
= &cwqe_info
->vq_wqe
[0];
29 vq_post_process(cwqe
->cptvf
, cwqe
->qno
);
32 static int init_worker_threads(struct cpt_vf
*cptvf
)
34 struct pci_dev
*pdev
= cptvf
->pdev
;
35 struct cptvf_wqe_info
*cwqe_info
;
38 cwqe_info
= kzalloc(sizeof(*cwqe_info
), GFP_KERNEL
);
42 if (cptvf
->nr_queues
) {
43 dev_info(&pdev
->dev
, "Creating VQ worker threads (%d)\n",
47 for (i
= 0; i
< cptvf
->nr_queues
; i
++) {
48 tasklet_init(&cwqe_info
->vq_wqe
[i
].twork
, vq_work_handler
,
50 cwqe_info
->vq_wqe
[i
].qno
= i
;
51 cwqe_info
->vq_wqe
[i
].cptvf
= cptvf
;
54 cptvf
->wqe_info
= cwqe_info
;
59 static void cleanup_worker_threads(struct cpt_vf
*cptvf
)
61 struct cptvf_wqe_info
*cwqe_info
;
62 struct pci_dev
*pdev
= cptvf
->pdev
;
65 cwqe_info
= (struct cptvf_wqe_info
*)cptvf
->wqe_info
;
69 if (cptvf
->nr_queues
) {
70 dev_info(&pdev
->dev
, "Cleaning VQ worker threads (%u)\n",
74 for (i
= 0; i
< cptvf
->nr_queues
; i
++)
75 tasklet_kill(&cwqe_info
->vq_wqe
[i
].twork
);
77 kfree_sensitive(cwqe_info
);
78 cptvf
->wqe_info
= NULL
;
81 static void free_pending_queues(struct pending_qinfo
*pqinfo
)
84 struct pending_queue
*queue
;
86 for_each_pending_queue(pqinfo
, queue
, i
) {
90 /* free single queue */
91 kfree_sensitive((queue
->head
));
100 pqinfo
->nr_queues
= 0;
103 static int alloc_pending_queues(struct pending_qinfo
*pqinfo
, u32 qlen
,
108 struct pending_queue
*queue
= NULL
;
110 pqinfo
->nr_queues
= nr_queues
;
113 for_each_pending_queue(pqinfo
, queue
, i
) {
114 queue
->head
= kcalloc(qlen
, sizeof(*queue
->head
), GFP_KERNEL
);
122 atomic64_set((&queue
->pending_count
), (0));
124 /* init queue spin lock */
125 spin_lock_init(&queue
->lock
);
131 free_pending_queues(pqinfo
);
136 static int init_pending_queues(struct cpt_vf
*cptvf
, u32 qlen
, u32 nr_queues
)
138 struct pci_dev
*pdev
= cptvf
->pdev
;
144 ret
= alloc_pending_queues(&cptvf
->pqinfo
, qlen
, nr_queues
);
146 dev_err(&pdev
->dev
, "failed to setup pending queues (%u)\n",
154 static void cleanup_pending_queues(struct cpt_vf
*cptvf
)
156 struct pci_dev
*pdev
= cptvf
->pdev
;
158 if (!cptvf
->nr_queues
)
161 dev_info(&pdev
->dev
, "Cleaning VQ pending queue (%u)\n",
163 free_pending_queues(&cptvf
->pqinfo
);
166 static void free_command_queues(struct cpt_vf
*cptvf
,
167 struct command_qinfo
*cqinfo
)
170 struct command_queue
*queue
= NULL
;
171 struct command_chunk
*chunk
= NULL
;
172 struct pci_dev
*pdev
= cptvf
->pdev
;
173 struct hlist_node
*node
;
175 /* clean up for each queue */
176 for (i
= 0; i
< cptvf
->nr_queues
; i
++) {
177 queue
= &cqinfo
->queue
[i
];
178 if (hlist_empty(&cqinfo
->queue
[i
].chead
))
181 hlist_for_each_entry_safe(chunk
, node
, &cqinfo
->queue
[i
].chead
,
183 dma_free_coherent(&pdev
->dev
, chunk
->size
,
188 hlist_del(&chunk
->nextchunk
);
189 kfree_sensitive(chunk
);
197 cqinfo
->cmd_size
= 0;
200 static int alloc_command_queues(struct cpt_vf
*cptvf
,
201 struct command_qinfo
*cqinfo
, size_t cmd_size
,
206 struct command_queue
*queue
= NULL
;
207 struct pci_dev
*pdev
= cptvf
->pdev
;
210 cqinfo
->cmd_size
= cmd_size
;
211 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
212 cptvf
->qsize
= min(qlen
, cqinfo
->qchunksize
) *
213 CPT_NEXT_CHUNK_PTR_SIZE
+ 1;
214 /* Qsize in bytes to create space for alignment */
215 q_size
= qlen
* cqinfo
->cmd_size
;
217 /* per queue initialization */
218 for (i
= 0; i
< cptvf
->nr_queues
; i
++) {
220 size_t rem_q_size
= q_size
;
221 struct command_chunk
*curr
= NULL
, *first
= NULL
, *last
= NULL
;
222 u32 qcsize_bytes
= cqinfo
->qchunksize
* cqinfo
->cmd_size
;
224 queue
= &cqinfo
->queue
[i
];
225 INIT_HLIST_HEAD(&cqinfo
->queue
[i
].chead
);
227 curr
= kzalloc(sizeof(*curr
), GFP_KERNEL
);
231 c_size
= (rem_q_size
> qcsize_bytes
) ? qcsize_bytes
:
233 curr
->head
= dma_alloc_coherent(&pdev
->dev
,
234 c_size
+ CPT_NEXT_CHUNK_PTR_SIZE
,
238 dev_err(&pdev
->dev
, "Command Q (%d) chunk (%d) allocation failed\n",
245 if (queue
->nchunks
== 0) {
246 hlist_add_head(&curr
->nextchunk
,
247 &cqinfo
->queue
[i
].chead
);
250 hlist_add_behind(&curr
->nextchunk
,
255 rem_q_size
-= c_size
;
257 *((u64
*)(&last
->head
[last
->size
])) = (u64
)curr
->dma_addr
;
260 } while (rem_q_size
);
262 /* Make the queue circular */
263 /* Tie back last chunk entry to head */
265 *((u64
*)(&last
->head
[last
->size
])) = (u64
)curr
->dma_addr
;
267 spin_lock_init(&queue
->lock
);
272 free_command_queues(cptvf
, cqinfo
);
276 static int init_command_queues(struct cpt_vf
*cptvf
, u32 qlen
)
278 struct pci_dev
*pdev
= cptvf
->pdev
;
281 /* setup AE command queues */
282 ret
= alloc_command_queues(cptvf
, &cptvf
->cqinfo
, CPT_INST_SIZE
,
285 dev_err(&pdev
->dev
, "failed to allocate AE command queues (%u)\n",
293 static void cleanup_command_queues(struct cpt_vf
*cptvf
)
295 struct pci_dev
*pdev
= cptvf
->pdev
;
297 if (!cptvf
->nr_queues
)
300 dev_info(&pdev
->dev
, "Cleaning VQ command queue (%u)\n",
302 free_command_queues(cptvf
, &cptvf
->cqinfo
);
305 static void cptvf_sw_cleanup(struct cpt_vf
*cptvf
)
307 cleanup_worker_threads(cptvf
);
308 cleanup_pending_queues(cptvf
);
309 cleanup_command_queues(cptvf
);
312 static int cptvf_sw_init(struct cpt_vf
*cptvf
, u32 qlen
, u32 nr_queues
)
314 struct pci_dev
*pdev
= cptvf
->pdev
;
316 u32 max_dev_queues
= 0;
318 max_dev_queues
= CPT_NUM_QS_PER_VF
;
320 nr_queues
= min_t(u32
, nr_queues
, max_dev_queues
);
321 cptvf
->nr_queues
= nr_queues
;
323 ret
= init_command_queues(cptvf
, qlen
);
325 dev_err(&pdev
->dev
, "Failed to setup command queues (%u)\n",
330 ret
= init_pending_queues(cptvf
, qlen
, nr_queues
);
332 dev_err(&pdev
->dev
, "Failed to setup pending queues (%u)\n",
337 /* Create worker threads for BH processing */
338 ret
= init_worker_threads(cptvf
);
340 dev_err(&pdev
->dev
, "Failed to setup worker threads\n");
347 cleanup_worker_threads(cptvf
);
348 cleanup_pending_queues(cptvf
);
351 cleanup_command_queues(cptvf
);
356 static void cptvf_free_irq_affinity(struct cpt_vf
*cptvf
, int vec
)
358 irq_set_affinity_hint(pci_irq_vector(cptvf
->pdev
, vec
), NULL
);
359 free_cpumask_var(cptvf
->affinity_mask
[vec
]);
362 static void cptvf_write_vq_ctl(struct cpt_vf
*cptvf
, bool val
)
364 union cptx_vqx_ctl vqx_ctl
;
366 vqx_ctl
.u
= cpt_read_csr64(cptvf
->reg_base
, CPTX_VQX_CTL(0, 0));
368 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_CTL(0, 0), vqx_ctl
.u
);
371 void cptvf_write_vq_doorbell(struct cpt_vf
*cptvf
, u32 val
)
373 union cptx_vqx_doorbell vqx_dbell
;
375 vqx_dbell
.u
= cpt_read_csr64(cptvf
->reg_base
,
376 CPTX_VQX_DOORBELL(0, 0));
377 vqx_dbell
.s
.dbell_cnt
= val
* 8; /* Num of Instructions * 8 words */
378 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_DOORBELL(0, 0),
382 static void cptvf_write_vq_inprog(struct cpt_vf
*cptvf
, u8 val
)
384 union cptx_vqx_inprog vqx_inprg
;
386 vqx_inprg
.u
= cpt_read_csr64(cptvf
->reg_base
, CPTX_VQX_INPROG(0, 0));
387 vqx_inprg
.s
.inflight
= val
;
388 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_INPROG(0, 0), vqx_inprg
.u
);
391 static void cptvf_write_vq_done_numwait(struct cpt_vf
*cptvf
, u32 val
)
393 union cptx_vqx_done_wait vqx_dwait
;
395 vqx_dwait
.u
= cpt_read_csr64(cptvf
->reg_base
,
396 CPTX_VQX_DONE_WAIT(0, 0));
397 vqx_dwait
.s
.num_wait
= val
;
398 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_DONE_WAIT(0, 0),
402 static void cptvf_write_vq_done_timewait(struct cpt_vf
*cptvf
, u16 time
)
404 union cptx_vqx_done_wait vqx_dwait
;
406 vqx_dwait
.u
= cpt_read_csr64(cptvf
->reg_base
,
407 CPTX_VQX_DONE_WAIT(0, 0));
408 vqx_dwait
.s
.time_wait
= time
;
409 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_DONE_WAIT(0, 0),
413 static void cptvf_enable_swerr_interrupts(struct cpt_vf
*cptvf
)
415 union cptx_vqx_misc_ena_w1s vqx_misc_ena
;
417 vqx_misc_ena
.u
= cpt_read_csr64(cptvf
->reg_base
,
418 CPTX_VQX_MISC_ENA_W1S(0, 0));
419 /* Set mbox(0) interupts for the requested vf */
420 vqx_misc_ena
.s
.swerr
= 1;
421 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_ENA_W1S(0, 0),
425 static void cptvf_enable_mbox_interrupts(struct cpt_vf
*cptvf
)
427 union cptx_vqx_misc_ena_w1s vqx_misc_ena
;
429 vqx_misc_ena
.u
= cpt_read_csr64(cptvf
->reg_base
,
430 CPTX_VQX_MISC_ENA_W1S(0, 0));
431 /* Set mbox(0) interupts for the requested vf */
432 vqx_misc_ena
.s
.mbox
= 1;
433 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_ENA_W1S(0, 0),
437 static void cptvf_enable_done_interrupts(struct cpt_vf
*cptvf
)
439 union cptx_vqx_done_ena_w1s vqx_done_ena
;
441 vqx_done_ena
.u
= cpt_read_csr64(cptvf
->reg_base
,
442 CPTX_VQX_DONE_ENA_W1S(0, 0));
443 /* Set DONE interrupt for the requested vf */
444 vqx_done_ena
.s
.done
= 1;
445 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_DONE_ENA_W1S(0, 0),
449 static void cptvf_clear_dovf_intr(struct cpt_vf
*cptvf
)
451 union cptx_vqx_misc_int vqx_misc_int
;
453 vqx_misc_int
.u
= cpt_read_csr64(cptvf
->reg_base
,
454 CPTX_VQX_MISC_INT(0, 0));
456 vqx_misc_int
.s
.dovf
= 1;
457 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_INT(0, 0),
461 static void cptvf_clear_irde_intr(struct cpt_vf
*cptvf
)
463 union cptx_vqx_misc_int vqx_misc_int
;
465 vqx_misc_int
.u
= cpt_read_csr64(cptvf
->reg_base
,
466 CPTX_VQX_MISC_INT(0, 0));
468 vqx_misc_int
.s
.irde
= 1;
469 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_INT(0, 0),
473 static void cptvf_clear_nwrp_intr(struct cpt_vf
*cptvf
)
475 union cptx_vqx_misc_int vqx_misc_int
;
477 vqx_misc_int
.u
= cpt_read_csr64(cptvf
->reg_base
,
478 CPTX_VQX_MISC_INT(0, 0));
480 vqx_misc_int
.s
.nwrp
= 1;
481 cpt_write_csr64(cptvf
->reg_base
,
482 CPTX_VQX_MISC_INT(0, 0), vqx_misc_int
.u
);
485 static void cptvf_clear_mbox_intr(struct cpt_vf
*cptvf
)
487 union cptx_vqx_misc_int vqx_misc_int
;
489 vqx_misc_int
.u
= cpt_read_csr64(cptvf
->reg_base
,
490 CPTX_VQX_MISC_INT(0, 0));
492 vqx_misc_int
.s
.mbox
= 1;
493 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_INT(0, 0),
497 static void cptvf_clear_swerr_intr(struct cpt_vf
*cptvf
)
499 union cptx_vqx_misc_int vqx_misc_int
;
501 vqx_misc_int
.u
= cpt_read_csr64(cptvf
->reg_base
,
502 CPTX_VQX_MISC_INT(0, 0));
504 vqx_misc_int
.s
.swerr
= 1;
505 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_INT(0, 0),
509 static u64
cptvf_read_vf_misc_intr_status(struct cpt_vf
*cptvf
)
511 return cpt_read_csr64(cptvf
->reg_base
, CPTX_VQX_MISC_INT(0, 0));
514 static irqreturn_t
cptvf_misc_intr_handler(int irq
, void *cptvf_irq
)
516 struct cpt_vf
*cptvf
= (struct cpt_vf
*)cptvf_irq
;
517 struct pci_dev
*pdev
= cptvf
->pdev
;
520 intr
= cptvf_read_vf_misc_intr_status(cptvf
);
521 /*Check for MISC interrupt types*/
522 if (likely(intr
& CPT_VF_INTR_MBOX_MASK
)) {
523 dev_dbg(&pdev
->dev
, "Mailbox interrupt 0x%llx on CPT VF %d\n",
525 cptvf_handle_mbox_intr(cptvf
);
526 cptvf_clear_mbox_intr(cptvf
);
527 } else if (unlikely(intr
& CPT_VF_INTR_DOVF_MASK
)) {
528 cptvf_clear_dovf_intr(cptvf
);
529 /*Clear doorbell count*/
530 cptvf_write_vq_doorbell(cptvf
, 0);
531 dev_err(&pdev
->dev
, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
533 } else if (unlikely(intr
& CPT_VF_INTR_IRDE_MASK
)) {
534 cptvf_clear_irde_intr(cptvf
);
535 dev_err(&pdev
->dev
, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
537 } else if (unlikely(intr
& CPT_VF_INTR_NWRP_MASK
)) {
538 cptvf_clear_nwrp_intr(cptvf
);
539 dev_err(&pdev
->dev
, "NCB response write error interrupt 0x%llx on CPT VF %d\n",
541 } else if (unlikely(intr
& CPT_VF_INTR_SERR_MASK
)) {
542 cptvf_clear_swerr_intr(cptvf
);
543 dev_err(&pdev
->dev
, "Software error interrupt 0x%llx on CPT VF %d\n",
546 dev_err(&pdev
->dev
, "Unhandled interrupt in CPT VF %d\n",
553 static inline struct cptvf_wqe
*get_cptvf_vq_wqe(struct cpt_vf
*cptvf
,
556 struct cptvf_wqe_info
*nwqe_info
;
558 if (unlikely(qno
>= cptvf
->nr_queues
))
560 nwqe_info
= (struct cptvf_wqe_info
*)cptvf
->wqe_info
;
562 return &nwqe_info
->vq_wqe
[qno
];
565 static inline u32
cptvf_read_vq_done_count(struct cpt_vf
*cptvf
)
567 union cptx_vqx_done vqx_done
;
569 vqx_done
.u
= cpt_read_csr64(cptvf
->reg_base
, CPTX_VQX_DONE(0, 0));
570 return vqx_done
.s
.done
;
573 static inline void cptvf_write_vq_done_ack(struct cpt_vf
*cptvf
,
576 union cptx_vqx_done_ack vqx_dack_cnt
;
578 vqx_dack_cnt
.u
= cpt_read_csr64(cptvf
->reg_base
,
579 CPTX_VQX_DONE_ACK(0, 0));
580 vqx_dack_cnt
.s
.done_ack
= ackcnt
;
581 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_DONE_ACK(0, 0),
585 static irqreturn_t
cptvf_done_intr_handler(int irq
, void *cptvf_irq
)
587 struct cpt_vf
*cptvf
= (struct cpt_vf
*)cptvf_irq
;
588 struct pci_dev
*pdev
= cptvf
->pdev
;
589 /* Read the number of completions */
590 u32 intr
= cptvf_read_vq_done_count(cptvf
);
593 struct cptvf_wqe
*wqe
;
595 /* Acknowledge the number of
596 * scheduled completions for processing
598 cptvf_write_vq_done_ack(cptvf
, intr
);
599 wqe
= get_cptvf_vq_wqe(cptvf
, 0);
600 if (unlikely(!wqe
)) {
601 dev_err(&pdev
->dev
, "No work to schedule for VF (%d)",
605 tasklet_hi_schedule(&wqe
->twork
);
611 static void cptvf_set_irq_affinity(struct cpt_vf
*cptvf
, int vec
)
613 struct pci_dev
*pdev
= cptvf
->pdev
;
616 if (!zalloc_cpumask_var(&cptvf
->affinity_mask
[vec
],
618 dev_err(&pdev
->dev
, "Allocation failed for affinity_mask for VF %d",
623 cpu
= cptvf
->vfid
% num_online_cpus();
624 cpumask_set_cpu(cpumask_local_spread(cpu
, cptvf
->node
),
625 cptvf
->affinity_mask
[vec
]);
626 irq_set_affinity_hint(pci_irq_vector(pdev
, vec
),
627 cptvf
->affinity_mask
[vec
]);
630 static void cptvf_write_vq_saddr(struct cpt_vf
*cptvf
, u64 val
)
632 union cptx_vqx_saddr vqx_saddr
;
635 cpt_write_csr64(cptvf
->reg_base
, CPTX_VQX_SADDR(0, 0), vqx_saddr
.u
);
638 static void cptvf_device_init(struct cpt_vf
*cptvf
)
643 cptvf_write_vq_ctl(cptvf
, 0);
644 /* Reset the doorbell */
645 cptvf_write_vq_doorbell(cptvf
, 0);
647 cptvf_write_vq_inprog(cptvf
, 0);
649 /* TODO: for now only one queue, so hard coded */
650 base_addr
= (u64
)(cptvf
->cqinfo
.queue
[0].qhead
->dma_addr
);
651 cptvf_write_vq_saddr(cptvf
, base_addr
);
652 /* Configure timerhold / coalescence */
653 cptvf_write_vq_done_timewait(cptvf
, CPT_TIMER_THOLD
);
654 cptvf_write_vq_done_numwait(cptvf
, 1);
656 cptvf_write_vq_ctl(cptvf
, 1);
657 /* Flag the VF ready */
658 cptvf
->flags
|= CPT_FLAG_DEVICE_READY
;
661 static int cptvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
663 struct device
*dev
= &pdev
->dev
;
664 struct cpt_vf
*cptvf
;
667 cptvf
= devm_kzalloc(dev
, sizeof(*cptvf
), GFP_KERNEL
);
671 pci_set_drvdata(pdev
, cptvf
);
673 err
= pci_enable_device(pdev
);
675 dev_err(dev
, "Failed to enable PCI device\n");
676 pci_set_drvdata(pdev
, NULL
);
680 err
= pci_request_regions(pdev
, DRV_NAME
);
682 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
683 goto cptvf_err_disable_device
;
685 /* Mark as VF driver */
686 cptvf
->flags
|= CPT_FLAG_VF_DRIVER
;
687 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(48));
689 dev_err(dev
, "Unable to get usable 48-bit DMA configuration\n");
690 goto cptvf_err_release_regions
;
693 /* MAP PF's configuration registers */
694 cptvf
->reg_base
= pcim_iomap(pdev
, 0, 0);
695 if (!cptvf
->reg_base
) {
696 dev_err(dev
, "Cannot map config register space, aborting\n");
698 goto cptvf_err_release_regions
;
701 cptvf
->node
= dev_to_node(&pdev
->dev
);
702 err
= pci_alloc_irq_vectors(pdev
, CPT_VF_MSIX_VECTORS
,
703 CPT_VF_MSIX_VECTORS
, PCI_IRQ_MSIX
);
705 dev_err(dev
, "Request for #%d msix vectors failed\n",
706 CPT_VF_MSIX_VECTORS
);
707 goto cptvf_err_release_regions
;
710 err
= request_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
),
711 cptvf_misc_intr_handler
, 0, "CPT VF misc intr",
714 dev_err(dev
, "Request misc irq failed");
715 goto cptvf_free_vectors
;
718 /* Enable mailbox interrupt */
719 cptvf_enable_mbox_interrupts(cptvf
);
720 cptvf_enable_swerr_interrupts(cptvf
);
722 /* Check ready with PF */
723 /* Gets chip ID / device Id from PF if ready */
724 err
= cptvf_check_pf_ready(cptvf
);
726 dev_err(dev
, "PF not responding to READY msg");
727 goto cptvf_free_misc_irq
;
730 /* CPT VF software resources initialization */
731 cptvf
->cqinfo
.qchunksize
= CPT_CMD_QCHUNK_SIZE
;
732 err
= cptvf_sw_init(cptvf
, CPT_CMD_QLEN
, CPT_NUM_QS_PER_VF
);
734 dev_err(dev
, "cptvf_sw_init() failed");
735 goto cptvf_free_misc_irq
;
737 /* Convey VQ LEN to PF */
738 err
= cptvf_send_vq_size_msg(cptvf
);
740 dev_err(dev
, "PF not responding to QLEN msg");
741 goto cptvf_free_misc_irq
;
744 /* CPT VF device initialization */
745 cptvf_device_init(cptvf
);
746 /* Send msg to PF to assign currnet Q to required group */
748 err
= cptvf_send_vf_to_grp_msg(cptvf
);
750 dev_err(dev
, "PF not responding to VF_GRP msg");
751 goto cptvf_free_misc_irq
;
755 err
= cptvf_send_vf_priority_msg(cptvf
);
757 dev_err(dev
, "PF not responding to VF_PRIO msg");
758 goto cptvf_free_misc_irq
;
761 err
= request_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
),
762 cptvf_done_intr_handler
, 0, "CPT VF done intr",
765 dev_err(dev
, "Request done irq failed\n");
766 goto cptvf_free_misc_irq
;
769 /* Enable mailbox interrupt */
770 cptvf_enable_done_interrupts(cptvf
);
772 /* Set irq affinity masks */
773 cptvf_set_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
774 cptvf_set_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
776 err
= cptvf_send_vf_up(cptvf
);
778 dev_err(dev
, "PF not responding to UP msg");
779 goto cptvf_free_irq_affinity
;
781 err
= cvm_crypto_init(cptvf
);
783 dev_err(dev
, "Algorithm register failed\n");
784 goto cptvf_free_irq_affinity
;
788 cptvf_free_irq_affinity
:
789 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
790 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
792 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
), cptvf
);
794 pci_free_irq_vectors(cptvf
->pdev
);
795 cptvf_err_release_regions
:
796 pci_release_regions(pdev
);
797 cptvf_err_disable_device
:
798 pci_disable_device(pdev
);
799 pci_set_drvdata(pdev
, NULL
);
804 static void cptvf_remove(struct pci_dev
*pdev
)
806 struct cpt_vf
*cptvf
= pci_get_drvdata(pdev
);
809 dev_err(&pdev
->dev
, "Invalid CPT-VF device\n");
813 /* Convey DOWN to PF */
814 if (cptvf_send_vf_down(cptvf
)) {
815 dev_err(&pdev
->dev
, "PF not responding to DOWN msg");
817 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
818 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
819 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
), cptvf
);
820 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
), cptvf
);
821 pci_free_irq_vectors(cptvf
->pdev
);
822 cptvf_sw_cleanup(cptvf
);
823 pci_set_drvdata(pdev
, NULL
);
824 pci_release_regions(pdev
);
825 pci_disable_device(pdev
);
830 static void cptvf_shutdown(struct pci_dev
*pdev
)
835 /* Supported devices */
836 static const struct pci_device_id cptvf_id_table
[] = {
837 {PCI_VDEVICE(CAVIUM
, CPT_81XX_PCI_VF_DEVICE_ID
), 0},
838 { 0, } /* end of table */
841 static struct pci_driver cptvf_pci_driver
= {
843 .id_table
= cptvf_id_table
,
844 .probe
= cptvf_probe
,
845 .remove
= cptvf_remove
,
846 .shutdown
= cptvf_shutdown
,
849 module_pci_driver(cptvf_pci_driver
);
851 MODULE_AUTHOR("George Cherian <george.cherian@cavium.com>");
852 MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver");
853 MODULE_LICENSE("GPL v2");
854 MODULE_VERSION(DRV_VERSION
);
855 MODULE_DEVICE_TABLE(pci
, cptvf_id_table
);