1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include "otx_cptvf.h"
14 #include "otx_cptvf_algs.h"
15 #include "otx_cptvf_reqmgr.h"
17 #define DRV_NAME "octeontx-cptvf"
18 #define DRV_VERSION "1.0"
20 static void vq_work_handler(unsigned long data
)
22 struct otx_cptvf_wqe_info
*cwqe_info
=
23 (struct otx_cptvf_wqe_info
*) data
;
25 otx_cpt_post_process(&cwqe_info
->vq_wqe
[0]);
28 static int init_worker_threads(struct otx_cptvf
*cptvf
)
30 struct pci_dev
*pdev
= cptvf
->pdev
;
31 struct otx_cptvf_wqe_info
*cwqe_info
;
34 cwqe_info
= kzalloc(sizeof(*cwqe_info
), GFP_KERNEL
);
38 if (cptvf
->num_queues
) {
39 dev_dbg(&pdev
->dev
, "Creating VQ worker threads (%d)\n",
43 for (i
= 0; i
< cptvf
->num_queues
; i
++) {
44 tasklet_init(&cwqe_info
->vq_wqe
[i
].twork
, vq_work_handler
,
46 cwqe_info
->vq_wqe
[i
].cptvf
= cptvf
;
48 cptvf
->wqe_info
= cwqe_info
;
53 static void cleanup_worker_threads(struct otx_cptvf
*cptvf
)
55 struct pci_dev
*pdev
= cptvf
->pdev
;
56 struct otx_cptvf_wqe_info
*cwqe_info
;
59 cwqe_info
= (struct otx_cptvf_wqe_info
*)cptvf
->wqe_info
;
63 if (cptvf
->num_queues
) {
64 dev_dbg(&pdev
->dev
, "Cleaning VQ worker threads (%u)\n",
68 for (i
= 0; i
< cptvf
->num_queues
; i
++)
69 tasklet_kill(&cwqe_info
->vq_wqe
[i
].twork
);
71 kfree_sensitive(cwqe_info
);
72 cptvf
->wqe_info
= NULL
;
75 static void free_pending_queues(struct otx_cpt_pending_qinfo
*pqinfo
)
77 struct otx_cpt_pending_queue
*queue
;
80 for_each_pending_queue(pqinfo
, queue
, i
) {
84 /* free single queue */
85 kfree_sensitive((queue
->head
));
90 pqinfo
->num_queues
= 0;
93 static int alloc_pending_queues(struct otx_cpt_pending_qinfo
*pqinfo
, u32 qlen
,
96 struct otx_cpt_pending_queue
*queue
= NULL
;
100 pqinfo
->num_queues
= num_queues
;
102 for_each_pending_queue(pqinfo
, queue
, i
) {
103 queue
->head
= kcalloc(qlen
, sizeof(*queue
->head
), GFP_KERNEL
);
109 queue
->pending_count
= 0;
114 /* init queue spin lock */
115 spin_lock_init(&queue
->lock
);
120 free_pending_queues(pqinfo
);
125 static int init_pending_queues(struct otx_cptvf
*cptvf
, u32 qlen
,
128 struct pci_dev
*pdev
= cptvf
->pdev
;
134 ret
= alloc_pending_queues(&cptvf
->pqinfo
, qlen
, num_queues
);
136 dev_err(&pdev
->dev
, "Failed to setup pending queues (%u)\n",
143 static void cleanup_pending_queues(struct otx_cptvf
*cptvf
)
145 struct pci_dev
*pdev
= cptvf
->pdev
;
147 if (!cptvf
->num_queues
)
150 dev_dbg(&pdev
->dev
, "Cleaning VQ pending queue (%u)\n",
152 free_pending_queues(&cptvf
->pqinfo
);
155 static void free_command_queues(struct otx_cptvf
*cptvf
,
156 struct otx_cpt_cmd_qinfo
*cqinfo
)
158 struct otx_cpt_cmd_queue
*queue
= NULL
;
159 struct otx_cpt_cmd_chunk
*chunk
= NULL
;
160 struct pci_dev
*pdev
= cptvf
->pdev
;
163 /* clean up for each queue */
164 for (i
= 0; i
< cptvf
->num_queues
; i
++) {
165 queue
= &cqinfo
->queue
[i
];
167 while (!list_empty(&cqinfo
->queue
[i
].chead
)) {
168 chunk
= list_first_entry(&cqinfo
->queue
[i
].chead
,
169 struct otx_cpt_cmd_chunk
, nextchunk
);
171 dma_free_coherent(&pdev
->dev
, chunk
->size
,
176 list_del(&chunk
->nextchunk
);
177 kfree_sensitive(chunk
);
179 queue
->num_chunks
= 0;
185 static int alloc_command_queues(struct otx_cptvf
*cptvf
,
186 struct otx_cpt_cmd_qinfo
*cqinfo
,
189 struct otx_cpt_cmd_chunk
*curr
, *first
, *last
;
190 struct otx_cpt_cmd_queue
*queue
= NULL
;
191 struct pci_dev
*pdev
= cptvf
->pdev
;
192 size_t q_size
, c_size
, rem_q_size
;
197 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
198 cptvf
->qsize
= min(qlen
, cqinfo
->qchunksize
) *
199 OTX_CPT_NEXT_CHUNK_PTR_SIZE
+ 1;
200 /* Qsize in bytes to create space for alignment */
201 q_size
= qlen
* OTX_CPT_INST_SIZE
;
203 qcsize_bytes
= cqinfo
->qchunksize
* OTX_CPT_INST_SIZE
;
205 /* per queue initialization */
206 for (i
= 0; i
< cptvf
->num_queues
; i
++) {
211 queue
= &cqinfo
->queue
[i
];
212 INIT_LIST_HEAD(&queue
->chead
);
214 curr
= kzalloc(sizeof(*curr
), GFP_KERNEL
);
218 c_size
= (rem_q_size
> qcsize_bytes
) ? qcsize_bytes
:
220 curr
->head
= dma_alloc_coherent(&pdev
->dev
,
221 c_size
+ OTX_CPT_NEXT_CHUNK_PTR_SIZE
,
222 &curr
->dma_addr
, GFP_KERNEL
);
225 "Command Q (%d) chunk (%d) allocation failed\n",
226 i
, queue
->num_chunks
);
231 if (queue
->num_chunks
== 0) {
235 list_add_tail(&curr
->nextchunk
,
236 &cqinfo
->queue
[i
].chead
);
239 rem_q_size
-= c_size
;
241 *((u64
*)(&last
->head
[last
->size
])) =
245 } while (rem_q_size
);
248 * Make the queue circular, tie back last chunk entry to head
251 *((u64
*)(&last
->head
[last
->size
])) = (u64
)curr
->dma_addr
;
258 free_command_queues(cptvf
, cqinfo
);
262 static int init_command_queues(struct otx_cptvf
*cptvf
, u32 qlen
)
264 struct pci_dev
*pdev
= cptvf
->pdev
;
267 /* setup command queues */
268 ret
= alloc_command_queues(cptvf
, &cptvf
->cqinfo
, qlen
);
270 dev_err(&pdev
->dev
, "Failed to allocate command queues (%u)\n",
277 static void cleanup_command_queues(struct otx_cptvf
*cptvf
)
279 struct pci_dev
*pdev
= cptvf
->pdev
;
281 if (!cptvf
->num_queues
)
284 dev_dbg(&pdev
->dev
, "Cleaning VQ command queue (%u)\n",
286 free_command_queues(cptvf
, &cptvf
->cqinfo
);
289 static void cptvf_sw_cleanup(struct otx_cptvf
*cptvf
)
291 cleanup_worker_threads(cptvf
);
292 cleanup_pending_queues(cptvf
);
293 cleanup_command_queues(cptvf
);
296 static int cptvf_sw_init(struct otx_cptvf
*cptvf
, u32 qlen
, u32 num_queues
)
298 struct pci_dev
*pdev
= cptvf
->pdev
;
299 u32 max_dev_queues
= 0;
302 max_dev_queues
= OTX_CPT_NUM_QS_PER_VF
;
304 num_queues
= min_t(u32
, num_queues
, max_dev_queues
);
305 cptvf
->num_queues
= num_queues
;
307 ret
= init_command_queues(cptvf
, qlen
);
309 dev_err(&pdev
->dev
, "Failed to setup command queues (%u)\n",
314 ret
= init_pending_queues(cptvf
, qlen
, num_queues
);
316 dev_err(&pdev
->dev
, "Failed to setup pending queues (%u)\n",
321 /* Create worker threads for BH processing */
322 ret
= init_worker_threads(cptvf
);
324 dev_err(&pdev
->dev
, "Failed to setup worker threads\n");
330 cleanup_worker_threads(cptvf
);
331 cleanup_pending_queues(cptvf
);
334 cleanup_command_queues(cptvf
);
339 static void cptvf_free_irq_affinity(struct otx_cptvf
*cptvf
, int vec
)
341 irq_set_affinity_hint(pci_irq_vector(cptvf
->pdev
, vec
), NULL
);
342 free_cpumask_var(cptvf
->affinity_mask
[vec
]);
345 static void cptvf_write_vq_ctl(struct otx_cptvf
*cptvf
, bool val
)
347 union otx_cptx_vqx_ctl vqx_ctl
;
349 vqx_ctl
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_CTL(0));
351 writeq(vqx_ctl
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_CTL(0));
354 void otx_cptvf_write_vq_doorbell(struct otx_cptvf
*cptvf
, u32 val
)
356 union otx_cptx_vqx_doorbell vqx_dbell
;
358 vqx_dbell
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DOORBELL(0));
359 vqx_dbell
.s
.dbell_cnt
= val
* 8; /* Num of Instructions * 8 words */
360 writeq(vqx_dbell
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DOORBELL(0));
363 static void cptvf_write_vq_inprog(struct otx_cptvf
*cptvf
, u8 val
)
365 union otx_cptx_vqx_inprog vqx_inprg
;
367 vqx_inprg
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_INPROG(0));
368 vqx_inprg
.s
.inflight
= val
;
369 writeq(vqx_inprg
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_INPROG(0));
372 static void cptvf_write_vq_done_numwait(struct otx_cptvf
*cptvf
, u32 val
)
374 union otx_cptx_vqx_done_wait vqx_dwait
;
376 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
377 vqx_dwait
.s
.num_wait
= val
;
378 writeq(vqx_dwait
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
381 static u32
cptvf_read_vq_done_numwait(struct otx_cptvf
*cptvf
)
383 union otx_cptx_vqx_done_wait vqx_dwait
;
385 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
386 return vqx_dwait
.s
.num_wait
;
389 static void cptvf_write_vq_done_timewait(struct otx_cptvf
*cptvf
, u16 time
)
391 union otx_cptx_vqx_done_wait vqx_dwait
;
393 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
394 vqx_dwait
.s
.time_wait
= time
;
395 writeq(vqx_dwait
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
399 static u16
cptvf_read_vq_done_timewait(struct otx_cptvf
*cptvf
)
401 union otx_cptx_vqx_done_wait vqx_dwait
;
403 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
404 return vqx_dwait
.s
.time_wait
;
407 static void cptvf_enable_swerr_interrupts(struct otx_cptvf
*cptvf
)
409 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena
;
411 vqx_misc_ena
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
412 /* Enable SWERR interrupts for the requested VF */
413 vqx_misc_ena
.s
.swerr
= 1;
414 writeq(vqx_misc_ena
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
417 static void cptvf_enable_mbox_interrupts(struct otx_cptvf
*cptvf
)
419 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena
;
421 vqx_misc_ena
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
422 /* Enable MBOX interrupt for the requested VF */
423 vqx_misc_ena
.s
.mbox
= 1;
424 writeq(vqx_misc_ena
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
427 static void cptvf_enable_done_interrupts(struct otx_cptvf
*cptvf
)
429 union otx_cptx_vqx_done_ena_w1s vqx_done_ena
;
431 vqx_done_ena
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ENA_W1S(0));
432 /* Enable DONE interrupt for the requested VF */
433 vqx_done_ena
.s
.done
= 1;
434 writeq(vqx_done_ena
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ENA_W1S(0));
437 static void cptvf_clear_dovf_intr(struct otx_cptvf
*cptvf
)
439 union otx_cptx_vqx_misc_int vqx_misc_int
;
441 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
443 vqx_misc_int
.s
.dovf
= 1;
444 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
447 static void cptvf_clear_irde_intr(struct otx_cptvf
*cptvf
)
449 union otx_cptx_vqx_misc_int vqx_misc_int
;
451 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
453 vqx_misc_int
.s
.irde
= 1;
454 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
457 static void cptvf_clear_nwrp_intr(struct otx_cptvf
*cptvf
)
459 union otx_cptx_vqx_misc_int vqx_misc_int
;
461 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
463 vqx_misc_int
.s
.nwrp
= 1;
464 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
467 static void cptvf_clear_mbox_intr(struct otx_cptvf
*cptvf
)
469 union otx_cptx_vqx_misc_int vqx_misc_int
;
471 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
473 vqx_misc_int
.s
.mbox
= 1;
474 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
477 static void cptvf_clear_swerr_intr(struct otx_cptvf
*cptvf
)
479 union otx_cptx_vqx_misc_int vqx_misc_int
;
481 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
483 vqx_misc_int
.s
.swerr
= 1;
484 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
487 static u64
cptvf_read_vf_misc_intr_status(struct otx_cptvf
*cptvf
)
489 return readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
492 static irqreturn_t
cptvf_misc_intr_handler(int __always_unused irq
,
495 struct otx_cptvf
*cptvf
= arg
;
496 struct pci_dev
*pdev
= cptvf
->pdev
;
499 intr
= cptvf_read_vf_misc_intr_status(cptvf
);
500 /* Check for MISC interrupt types */
501 if (likely(intr
& OTX_CPT_VF_INTR_MBOX_MASK
)) {
502 dev_dbg(&pdev
->dev
, "Mailbox interrupt 0x%llx on CPT VF %d\n",
504 otx_cptvf_handle_mbox_intr(cptvf
);
505 cptvf_clear_mbox_intr(cptvf
);
506 } else if (unlikely(intr
& OTX_CPT_VF_INTR_DOVF_MASK
)) {
507 cptvf_clear_dovf_intr(cptvf
);
508 /* Clear doorbell count */
509 otx_cptvf_write_vq_doorbell(cptvf
, 0);
511 "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
513 } else if (unlikely(intr
& OTX_CPT_VF_INTR_IRDE_MASK
)) {
514 cptvf_clear_irde_intr(cptvf
);
516 "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
518 } else if (unlikely(intr
& OTX_CPT_VF_INTR_NWRP_MASK
)) {
519 cptvf_clear_nwrp_intr(cptvf
);
521 "NCB response write error interrupt 0x%llx on CPT VF %d\n",
523 } else if (unlikely(intr
& OTX_CPT_VF_INTR_SERR_MASK
)) {
524 cptvf_clear_swerr_intr(cptvf
);
526 "Software error interrupt 0x%llx on CPT VF %d\n",
529 dev_err(&pdev
->dev
, "Unhandled interrupt in OTX_CPT VF %d\n",
536 static inline struct otx_cptvf_wqe
*get_cptvf_vq_wqe(struct otx_cptvf
*cptvf
,
539 struct otx_cptvf_wqe_info
*nwqe_info
;
541 if (unlikely(qno
>= cptvf
->num_queues
))
543 nwqe_info
= (struct otx_cptvf_wqe_info
*)cptvf
->wqe_info
;
545 return &nwqe_info
->vq_wqe
[qno
];
548 static inline u32
cptvf_read_vq_done_count(struct otx_cptvf
*cptvf
)
550 union otx_cptx_vqx_done vqx_done
;
552 vqx_done
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE(0));
553 return vqx_done
.s
.done
;
556 static inline void cptvf_write_vq_done_ack(struct otx_cptvf
*cptvf
,
559 union otx_cptx_vqx_done_ack vqx_dack_cnt
;
561 vqx_dack_cnt
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ACK(0));
562 vqx_dack_cnt
.s
.done_ack
= ackcnt
;
563 writeq(vqx_dack_cnt
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ACK(0));
566 static irqreturn_t
cptvf_done_intr_handler(int __always_unused irq
,
569 struct otx_cptvf
*cptvf
= (struct otx_cptvf
*)cptvf_dev
;
570 struct pci_dev
*pdev
= cptvf
->pdev
;
571 /* Read the number of completions */
572 u32 intr
= cptvf_read_vq_done_count(cptvf
);
575 struct otx_cptvf_wqe
*wqe
;
578 * Acknowledge the number of scheduled completions for
581 cptvf_write_vq_done_ack(cptvf
, intr
);
582 wqe
= get_cptvf_vq_wqe(cptvf
, 0);
583 if (unlikely(!wqe
)) {
584 dev_err(&pdev
->dev
, "No work to schedule for VF (%d)\n",
588 tasklet_hi_schedule(&wqe
->twork
);
594 static void cptvf_set_irq_affinity(struct otx_cptvf
*cptvf
, int vec
)
596 struct pci_dev
*pdev
= cptvf
->pdev
;
599 if (!zalloc_cpumask_var(&cptvf
->affinity_mask
[vec
],
602 "Allocation failed for affinity_mask for VF %d\n",
607 cpu
= cptvf
->vfid
% num_online_cpus();
608 cpumask_set_cpu(cpumask_local_spread(cpu
, cptvf
->node
),
609 cptvf
->affinity_mask
[vec
]);
610 irq_set_affinity_hint(pci_irq_vector(pdev
, vec
),
611 cptvf
->affinity_mask
[vec
]);
614 static void cptvf_write_vq_saddr(struct otx_cptvf
*cptvf
, u64 val
)
616 union otx_cptx_vqx_saddr vqx_saddr
;
619 writeq(vqx_saddr
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_SADDR(0));
622 static void cptvf_device_init(struct otx_cptvf
*cptvf
)
627 cptvf_write_vq_ctl(cptvf
, 0);
628 /* Reset the doorbell */
629 otx_cptvf_write_vq_doorbell(cptvf
, 0);
631 cptvf_write_vq_inprog(cptvf
, 0);
633 base_addr
= (u64
)(cptvf
->cqinfo
.queue
[0].qhead
->dma_addr
);
634 cptvf_write_vq_saddr(cptvf
, base_addr
);
635 /* Configure timerhold / coalescence */
636 cptvf_write_vq_done_timewait(cptvf
, OTX_CPT_TIMER_HOLD
);
637 cptvf_write_vq_done_numwait(cptvf
, OTX_CPT_COUNT_HOLD
);
639 cptvf_write_vq_ctl(cptvf
, 1);
640 /* Flag the VF ready */
641 cptvf
->flags
|= OTX_CPT_FLAG_DEVICE_READY
;
644 static ssize_t
vf_type_show(struct device
*dev
,
645 struct device_attribute
*attr
,
648 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
651 switch (cptvf
->vftype
) {
652 case OTX_CPT_AE_TYPES
:
656 case OTX_CPT_SE_TYPES
:
664 return sysfs_emit(buf
, "%s\n", msg
);
667 static ssize_t
vf_engine_group_show(struct device
*dev
,
668 struct device_attribute
*attr
,
671 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
673 return sysfs_emit(buf
, "%d\n", cptvf
->vfgrp
);
676 static ssize_t
vf_engine_group_store(struct device
*dev
,
677 struct device_attribute
*attr
,
678 const char *buf
, size_t count
)
680 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
683 ret
= kstrtoint(buf
, 10, &val
);
690 if (val
>= OTX_CPT_MAX_ENGINE_GROUPS
) {
691 dev_err(dev
, "Engine group >= than max available groups %d\n",
692 OTX_CPT_MAX_ENGINE_GROUPS
);
696 ret
= otx_cptvf_send_vf_to_grp_msg(cptvf
, val
);
703 static ssize_t
vf_coalesc_time_wait_show(struct device
*dev
,
704 struct device_attribute
*attr
,
707 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
709 return sysfs_emit(buf
, "%d\n",
710 cptvf_read_vq_done_timewait(cptvf
));
713 static ssize_t
vf_coalesc_num_wait_show(struct device
*dev
,
714 struct device_attribute
*attr
,
717 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
719 return sysfs_emit(buf
, "%d\n",
720 cptvf_read_vq_done_numwait(cptvf
));
723 static ssize_t
vf_coalesc_time_wait_store(struct device
*dev
,
724 struct device_attribute
*attr
,
725 const char *buf
, size_t count
)
727 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
731 ret
= kstrtol(buf
, 10, &val
);
735 if (val
< OTX_CPT_COALESC_MIN_TIME_WAIT
||
736 val
> OTX_CPT_COALESC_MAX_TIME_WAIT
)
739 cptvf_write_vq_done_timewait(cptvf
, val
);
743 static ssize_t
vf_coalesc_num_wait_store(struct device
*dev
,
744 struct device_attribute
*attr
,
745 const char *buf
, size_t count
)
747 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
751 ret
= kstrtol(buf
, 10, &val
);
755 if (val
< OTX_CPT_COALESC_MIN_NUM_WAIT
||
756 val
> OTX_CPT_COALESC_MAX_NUM_WAIT
)
759 cptvf_write_vq_done_numwait(cptvf
, val
);
763 static DEVICE_ATTR_RO(vf_type
);
764 static DEVICE_ATTR_RW(vf_engine_group
);
765 static DEVICE_ATTR_RW(vf_coalesc_time_wait
);
766 static DEVICE_ATTR_RW(vf_coalesc_num_wait
);
768 static struct attribute
*otx_cptvf_attrs
[] = {
769 &dev_attr_vf_type
.attr
,
770 &dev_attr_vf_engine_group
.attr
,
771 &dev_attr_vf_coalesc_time_wait
.attr
,
772 &dev_attr_vf_coalesc_num_wait
.attr
,
776 static const struct attribute_group otx_cptvf_sysfs_group
= {
777 .attrs
= otx_cptvf_attrs
,
780 static int otx_cptvf_probe(struct pci_dev
*pdev
,
781 const struct pci_device_id
*ent
)
783 struct device
*dev
= &pdev
->dev
;
784 struct otx_cptvf
*cptvf
;
787 cptvf
= devm_kzalloc(dev
, sizeof(*cptvf
), GFP_KERNEL
);
791 pci_set_drvdata(pdev
, cptvf
);
794 err
= pci_enable_device(pdev
);
796 dev_err(dev
, "Failed to enable PCI device\n");
799 err
= pci_request_regions(pdev
, DRV_NAME
);
801 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
804 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(48));
806 dev_err(dev
, "Unable to get usable 48-bit DMA configuration\n");
807 goto release_regions
;
810 /* MAP PF's configuration registers */
811 cptvf
->reg_base
= pci_iomap(pdev
, OTX_CPT_VF_PCI_CFG_BAR
, 0);
812 if (!cptvf
->reg_base
) {
813 dev_err(dev
, "Cannot map config register space, aborting\n");
815 goto release_regions
;
818 cptvf
->node
= dev_to_node(&pdev
->dev
);
819 err
= pci_alloc_irq_vectors(pdev
, OTX_CPT_VF_MSIX_VECTORS
,
820 OTX_CPT_VF_MSIX_VECTORS
, PCI_IRQ_MSIX
);
822 dev_err(dev
, "Request for #%d msix vectors failed\n",
823 OTX_CPT_VF_MSIX_VECTORS
);
827 err
= request_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
),
828 cptvf_misc_intr_handler
, 0, "CPT VF misc intr",
831 dev_err(dev
, "Failed to request misc irq\n");
835 /* Enable mailbox interrupt */
836 cptvf_enable_mbox_interrupts(cptvf
);
837 cptvf_enable_swerr_interrupts(cptvf
);
839 /* Check cpt pf status, gets chip ID / device Id from PF if ready */
840 err
= otx_cptvf_check_pf_ready(cptvf
);
844 /* CPT VF software resources initialization */
845 cptvf
->cqinfo
.qchunksize
= OTX_CPT_CMD_QCHUNK_SIZE
;
846 err
= cptvf_sw_init(cptvf
, OTX_CPT_CMD_QLEN
, OTX_CPT_NUM_QS_PER_VF
);
848 dev_err(dev
, "cptvf_sw_init() failed\n");
851 /* Convey VQ LEN to PF */
852 err
= otx_cptvf_send_vq_size_msg(cptvf
);
856 /* CPT VF device initialization */
857 cptvf_device_init(cptvf
);
858 /* Send msg to PF to assign currnet Q to required group */
859 err
= otx_cptvf_send_vf_to_grp_msg(cptvf
, cptvf
->vfgrp
);
864 err
= otx_cptvf_send_vf_priority_msg(cptvf
);
868 err
= request_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
),
869 cptvf_done_intr_handler
, 0, "CPT VF done intr",
872 dev_err(dev
, "Failed to request done irq\n");
876 /* Enable done interrupt */
877 cptvf_enable_done_interrupts(cptvf
);
879 /* Set irq affinity masks */
880 cptvf_set_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
881 cptvf_set_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
883 err
= otx_cptvf_send_vf_up(cptvf
);
885 goto free_irq_affinity
;
887 /* Initialize algorithms and set ops */
888 err
= otx_cpt_crypto_init(pdev
, THIS_MODULE
,
889 cptvf
->vftype
== OTX_CPT_SE_TYPES
? OTX_CPT_SE
: OTX_CPT_AE
,
890 cptvf
->vftype
, 1, cptvf
->num_vfs
);
892 dev_err(dev
, "Failed to register crypto algs\n");
893 goto free_irq_affinity
;
896 err
= sysfs_create_group(&dev
->kobj
, &otx_cptvf_sysfs_group
);
898 dev_err(dev
, "Creating sysfs entries failed\n");
905 otx_cpt_crypto_exit(pdev
, THIS_MODULE
, cptvf
->vftype
);
907 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
908 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
910 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
), cptvf
);
912 cptvf_sw_cleanup(cptvf
);
914 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
), cptvf
);
916 pci_free_irq_vectors(cptvf
->pdev
);
918 pci_iounmap(pdev
, cptvf
->reg_base
);
920 pci_release_regions(pdev
);
922 pci_disable_device(pdev
);
924 pci_set_drvdata(pdev
, NULL
);
929 static void otx_cptvf_remove(struct pci_dev
*pdev
)
931 struct otx_cptvf
*cptvf
= pci_get_drvdata(pdev
);
934 dev_err(&pdev
->dev
, "Invalid CPT-VF device\n");
938 /* Convey DOWN to PF */
939 if (otx_cptvf_send_vf_down(cptvf
)) {
940 dev_err(&pdev
->dev
, "PF not responding to DOWN msg\n");
942 sysfs_remove_group(&pdev
->dev
.kobj
, &otx_cptvf_sysfs_group
);
943 otx_cpt_crypto_exit(pdev
, THIS_MODULE
, cptvf
->vftype
);
944 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
945 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
946 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
), cptvf
);
947 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
), cptvf
);
948 cptvf_sw_cleanup(cptvf
);
949 pci_free_irq_vectors(cptvf
->pdev
);
950 pci_iounmap(pdev
, cptvf
->reg_base
);
951 pci_release_regions(pdev
);
952 pci_disable_device(pdev
);
953 pci_set_drvdata(pdev
, NULL
);
957 /* Supported devices */
958 static const struct pci_device_id otx_cptvf_id_table
[] = {
959 {PCI_VDEVICE(CAVIUM
, OTX_CPT_PCI_VF_DEVICE_ID
), 0},
960 { 0, } /* end of table */
963 static struct pci_driver otx_cptvf_pci_driver
= {
965 .id_table
= otx_cptvf_id_table
,
966 .probe
= otx_cptvf_probe
,
967 .remove
= otx_cptvf_remove
,
970 module_pci_driver(otx_cptvf_pci_driver
);
972 MODULE_AUTHOR("Marvell International Ltd.");
973 MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
974 MODULE_LICENSE("GPL v2");
975 MODULE_VERSION(DRV_VERSION
);
976 MODULE_DEVICE_TABLE(pci
, otx_cptvf_id_table
);