1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include "otx_cptvf.h"
14 #include "otx_cptvf_algs.h"
15 #include "otx_cptvf_reqmgr.h"
17 #define DRV_NAME "octeontx-cptvf"
18 #define DRV_VERSION "1.0"
20 static void vq_work_handler(unsigned long data
)
22 struct otx_cptvf_wqe_info
*cwqe_info
=
23 (struct otx_cptvf_wqe_info
*) data
;
25 otx_cpt_post_process(&cwqe_info
->vq_wqe
[0]);
28 static int init_worker_threads(struct otx_cptvf
*cptvf
)
30 struct pci_dev
*pdev
= cptvf
->pdev
;
31 struct otx_cptvf_wqe_info
*cwqe_info
;
34 cwqe_info
= kzalloc(sizeof(*cwqe_info
), GFP_KERNEL
);
38 if (cptvf
->num_queues
) {
39 dev_dbg(&pdev
->dev
, "Creating VQ worker threads (%d)\n",
43 for (i
= 0; i
< cptvf
->num_queues
; i
++) {
44 tasklet_init(&cwqe_info
->vq_wqe
[i
].twork
, vq_work_handler
,
46 cwqe_info
->vq_wqe
[i
].cptvf
= cptvf
;
48 cptvf
->wqe_info
= cwqe_info
;
53 static void cleanup_worker_threads(struct otx_cptvf
*cptvf
)
55 struct pci_dev
*pdev
= cptvf
->pdev
;
56 struct otx_cptvf_wqe_info
*cwqe_info
;
59 cwqe_info
= (struct otx_cptvf_wqe_info
*)cptvf
->wqe_info
;
63 if (cptvf
->num_queues
) {
64 dev_dbg(&pdev
->dev
, "Cleaning VQ worker threads (%u)\n",
68 for (i
= 0; i
< cptvf
->num_queues
; i
++)
69 tasklet_kill(&cwqe_info
->vq_wqe
[i
].twork
);
72 cptvf
->wqe_info
= NULL
;
75 static void free_pending_queues(struct otx_cpt_pending_qinfo
*pqinfo
)
77 struct otx_cpt_pending_queue
*queue
;
80 for_each_pending_queue(pqinfo
, queue
, i
) {
84 /* free single queue */
85 kzfree((queue
->head
));
90 pqinfo
->num_queues
= 0;
93 static int alloc_pending_queues(struct otx_cpt_pending_qinfo
*pqinfo
, u32 qlen
,
96 struct otx_cpt_pending_queue
*queue
= NULL
;
101 pqinfo
->num_queues
= num_queues
;
102 size
= (qlen
* sizeof(struct otx_cpt_pending_entry
));
104 for_each_pending_queue(pqinfo
, queue
, i
) {
105 queue
->head
= kzalloc((size
), GFP_KERNEL
);
111 queue
->pending_count
= 0;
116 /* init queue spin lock */
117 spin_lock_init(&queue
->lock
);
122 free_pending_queues(pqinfo
);
127 static int init_pending_queues(struct otx_cptvf
*cptvf
, u32 qlen
,
130 struct pci_dev
*pdev
= cptvf
->pdev
;
136 ret
= alloc_pending_queues(&cptvf
->pqinfo
, qlen
, num_queues
);
138 dev_err(&pdev
->dev
, "Failed to setup pending queues (%u)\n",
145 static void cleanup_pending_queues(struct otx_cptvf
*cptvf
)
147 struct pci_dev
*pdev
= cptvf
->pdev
;
149 if (!cptvf
->num_queues
)
152 dev_dbg(&pdev
->dev
, "Cleaning VQ pending queue (%u)\n",
154 free_pending_queues(&cptvf
->pqinfo
);
157 static void free_command_queues(struct otx_cptvf
*cptvf
,
158 struct otx_cpt_cmd_qinfo
*cqinfo
)
160 struct otx_cpt_cmd_queue
*queue
= NULL
;
161 struct otx_cpt_cmd_chunk
*chunk
= NULL
;
162 struct pci_dev
*pdev
= cptvf
->pdev
;
165 /* clean up for each queue */
166 for (i
= 0; i
< cptvf
->num_queues
; i
++) {
167 queue
= &cqinfo
->queue
[i
];
169 while (!list_empty(&cqinfo
->queue
[i
].chead
)) {
170 chunk
= list_first_entry(&cqinfo
->queue
[i
].chead
,
171 struct otx_cpt_cmd_chunk
, nextchunk
);
173 dma_free_coherent(&pdev
->dev
, chunk
->size
,
178 list_del(&chunk
->nextchunk
);
181 queue
->num_chunks
= 0;
187 static int alloc_command_queues(struct otx_cptvf
*cptvf
,
188 struct otx_cpt_cmd_qinfo
*cqinfo
,
191 struct otx_cpt_cmd_chunk
*curr
, *first
, *last
;
192 struct otx_cpt_cmd_queue
*queue
= NULL
;
193 struct pci_dev
*pdev
= cptvf
->pdev
;
194 size_t q_size
, c_size
, rem_q_size
;
199 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
200 cptvf
->qsize
= min(qlen
, cqinfo
->qchunksize
) *
201 OTX_CPT_NEXT_CHUNK_PTR_SIZE
+ 1;
202 /* Qsize in bytes to create space for alignment */
203 q_size
= qlen
* OTX_CPT_INST_SIZE
;
205 qcsize_bytes
= cqinfo
->qchunksize
* OTX_CPT_INST_SIZE
;
207 /* per queue initialization */
208 for (i
= 0; i
< cptvf
->num_queues
; i
++) {
214 queue
= &cqinfo
->queue
[i
];
215 INIT_LIST_HEAD(&queue
->chead
);
217 curr
= kzalloc(sizeof(*curr
), GFP_KERNEL
);
221 c_size
= (rem_q_size
> qcsize_bytes
) ? qcsize_bytes
:
223 curr
->head
= dma_alloc_coherent(&pdev
->dev
,
224 c_size
+ OTX_CPT_NEXT_CHUNK_PTR_SIZE
,
225 &curr
->dma_addr
, GFP_KERNEL
);
228 "Command Q (%d) chunk (%d) allocation failed\n",
229 i
, queue
->num_chunks
);
234 if (queue
->num_chunks
== 0) {
238 list_add_tail(&curr
->nextchunk
,
239 &cqinfo
->queue
[i
].chead
);
242 rem_q_size
-= c_size
;
244 *((u64
*)(&last
->head
[last
->size
])) =
248 } while (rem_q_size
);
251 * Make the queue circular, tie back last chunk entry to head
254 *((u64
*)(&last
->head
[last
->size
])) = (u64
)curr
->dma_addr
;
261 free_command_queues(cptvf
, cqinfo
);
265 static int init_command_queues(struct otx_cptvf
*cptvf
, u32 qlen
)
267 struct pci_dev
*pdev
= cptvf
->pdev
;
270 /* setup command queues */
271 ret
= alloc_command_queues(cptvf
, &cptvf
->cqinfo
, qlen
);
273 dev_err(&pdev
->dev
, "Failed to allocate command queues (%u)\n",
280 static void cleanup_command_queues(struct otx_cptvf
*cptvf
)
282 struct pci_dev
*pdev
= cptvf
->pdev
;
284 if (!cptvf
->num_queues
)
287 dev_dbg(&pdev
->dev
, "Cleaning VQ command queue (%u)\n",
289 free_command_queues(cptvf
, &cptvf
->cqinfo
);
292 static void cptvf_sw_cleanup(struct otx_cptvf
*cptvf
)
294 cleanup_worker_threads(cptvf
);
295 cleanup_pending_queues(cptvf
);
296 cleanup_command_queues(cptvf
);
299 static int cptvf_sw_init(struct otx_cptvf
*cptvf
, u32 qlen
, u32 num_queues
)
301 struct pci_dev
*pdev
= cptvf
->pdev
;
302 u32 max_dev_queues
= 0;
305 max_dev_queues
= OTX_CPT_NUM_QS_PER_VF
;
307 num_queues
= min_t(u32
, num_queues
, max_dev_queues
);
308 cptvf
->num_queues
= num_queues
;
310 ret
= init_command_queues(cptvf
, qlen
);
312 dev_err(&pdev
->dev
, "Failed to setup command queues (%u)\n",
317 ret
= init_pending_queues(cptvf
, qlen
, num_queues
);
319 dev_err(&pdev
->dev
, "Failed to setup pending queues (%u)\n",
324 /* Create worker threads for BH processing */
325 ret
= init_worker_threads(cptvf
);
327 dev_err(&pdev
->dev
, "Failed to setup worker threads\n");
333 cleanup_worker_threads(cptvf
);
334 cleanup_pending_queues(cptvf
);
337 cleanup_command_queues(cptvf
);
342 static void cptvf_free_irq_affinity(struct otx_cptvf
*cptvf
, int vec
)
344 irq_set_affinity_hint(pci_irq_vector(cptvf
->pdev
, vec
), NULL
);
345 free_cpumask_var(cptvf
->affinity_mask
[vec
]);
348 static void cptvf_write_vq_ctl(struct otx_cptvf
*cptvf
, bool val
)
350 union otx_cptx_vqx_ctl vqx_ctl
;
352 vqx_ctl
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_CTL(0));
354 writeq(vqx_ctl
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_CTL(0));
357 void otx_cptvf_write_vq_doorbell(struct otx_cptvf
*cptvf
, u32 val
)
359 union otx_cptx_vqx_doorbell vqx_dbell
;
361 vqx_dbell
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DOORBELL(0));
362 vqx_dbell
.s
.dbell_cnt
= val
* 8; /* Num of Instructions * 8 words */
363 writeq(vqx_dbell
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DOORBELL(0));
366 static void cptvf_write_vq_inprog(struct otx_cptvf
*cptvf
, u8 val
)
368 union otx_cptx_vqx_inprog vqx_inprg
;
370 vqx_inprg
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_INPROG(0));
371 vqx_inprg
.s
.inflight
= val
;
372 writeq(vqx_inprg
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_INPROG(0));
375 static void cptvf_write_vq_done_numwait(struct otx_cptvf
*cptvf
, u32 val
)
377 union otx_cptx_vqx_done_wait vqx_dwait
;
379 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
380 vqx_dwait
.s
.num_wait
= val
;
381 writeq(vqx_dwait
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
384 static u32
cptvf_read_vq_done_numwait(struct otx_cptvf
*cptvf
)
386 union otx_cptx_vqx_done_wait vqx_dwait
;
388 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
389 return vqx_dwait
.s
.num_wait
;
392 static void cptvf_write_vq_done_timewait(struct otx_cptvf
*cptvf
, u16 time
)
394 union otx_cptx_vqx_done_wait vqx_dwait
;
396 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
397 vqx_dwait
.s
.time_wait
= time
;
398 writeq(vqx_dwait
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
402 static u16
cptvf_read_vq_done_timewait(struct otx_cptvf
*cptvf
)
404 union otx_cptx_vqx_done_wait vqx_dwait
;
406 vqx_dwait
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_WAIT(0));
407 return vqx_dwait
.s
.time_wait
;
410 static void cptvf_enable_swerr_interrupts(struct otx_cptvf
*cptvf
)
412 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena
;
414 vqx_misc_ena
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
415 /* Enable SWERR interrupts for the requested VF */
416 vqx_misc_ena
.s
.swerr
= 1;
417 writeq(vqx_misc_ena
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
420 static void cptvf_enable_mbox_interrupts(struct otx_cptvf
*cptvf
)
422 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena
;
424 vqx_misc_ena
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
425 /* Enable MBOX interrupt for the requested VF */
426 vqx_misc_ena
.s
.mbox
= 1;
427 writeq(vqx_misc_ena
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_ENA_W1S(0));
430 static void cptvf_enable_done_interrupts(struct otx_cptvf
*cptvf
)
432 union otx_cptx_vqx_done_ena_w1s vqx_done_ena
;
434 vqx_done_ena
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ENA_W1S(0));
435 /* Enable DONE interrupt for the requested VF */
436 vqx_done_ena
.s
.done
= 1;
437 writeq(vqx_done_ena
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ENA_W1S(0));
440 static void cptvf_clear_dovf_intr(struct otx_cptvf
*cptvf
)
442 union otx_cptx_vqx_misc_int vqx_misc_int
;
444 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
446 vqx_misc_int
.s
.dovf
= 1;
447 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
450 static void cptvf_clear_irde_intr(struct otx_cptvf
*cptvf
)
452 union otx_cptx_vqx_misc_int vqx_misc_int
;
454 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
456 vqx_misc_int
.s
.irde
= 1;
457 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
460 static void cptvf_clear_nwrp_intr(struct otx_cptvf
*cptvf
)
462 union otx_cptx_vqx_misc_int vqx_misc_int
;
464 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
466 vqx_misc_int
.s
.nwrp
= 1;
467 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
470 static void cptvf_clear_mbox_intr(struct otx_cptvf
*cptvf
)
472 union otx_cptx_vqx_misc_int vqx_misc_int
;
474 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
476 vqx_misc_int
.s
.mbox
= 1;
477 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
480 static void cptvf_clear_swerr_intr(struct otx_cptvf
*cptvf
)
482 union otx_cptx_vqx_misc_int vqx_misc_int
;
484 vqx_misc_int
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
486 vqx_misc_int
.s
.swerr
= 1;
487 writeq(vqx_misc_int
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
490 static u64
cptvf_read_vf_misc_intr_status(struct otx_cptvf
*cptvf
)
492 return readq(cptvf
->reg_base
+ OTX_CPT_VQX_MISC_INT(0));
495 static irqreturn_t
cptvf_misc_intr_handler(int __always_unused irq
,
498 struct otx_cptvf
*cptvf
= arg
;
499 struct pci_dev
*pdev
= cptvf
->pdev
;
502 intr
= cptvf_read_vf_misc_intr_status(cptvf
);
503 /* Check for MISC interrupt types */
504 if (likely(intr
& OTX_CPT_VF_INTR_MBOX_MASK
)) {
505 dev_dbg(&pdev
->dev
, "Mailbox interrupt 0x%llx on CPT VF %d\n",
507 otx_cptvf_handle_mbox_intr(cptvf
);
508 cptvf_clear_mbox_intr(cptvf
);
509 } else if (unlikely(intr
& OTX_CPT_VF_INTR_DOVF_MASK
)) {
510 cptvf_clear_dovf_intr(cptvf
);
511 /* Clear doorbell count */
512 otx_cptvf_write_vq_doorbell(cptvf
, 0);
514 "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
516 } else if (unlikely(intr
& OTX_CPT_VF_INTR_IRDE_MASK
)) {
517 cptvf_clear_irde_intr(cptvf
);
519 "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
521 } else if (unlikely(intr
& OTX_CPT_VF_INTR_NWRP_MASK
)) {
522 cptvf_clear_nwrp_intr(cptvf
);
524 "NCB response write error interrupt 0x%llx on CPT VF %d\n",
526 } else if (unlikely(intr
& OTX_CPT_VF_INTR_SERR_MASK
)) {
527 cptvf_clear_swerr_intr(cptvf
);
529 "Software error interrupt 0x%llx on CPT VF %d\n",
532 dev_err(&pdev
->dev
, "Unhandled interrupt in OTX_CPT VF %d\n",
539 static inline struct otx_cptvf_wqe
*get_cptvf_vq_wqe(struct otx_cptvf
*cptvf
,
542 struct otx_cptvf_wqe_info
*nwqe_info
;
544 if (unlikely(qno
>= cptvf
->num_queues
))
546 nwqe_info
= (struct otx_cptvf_wqe_info
*)cptvf
->wqe_info
;
548 return &nwqe_info
->vq_wqe
[qno
];
551 static inline u32
cptvf_read_vq_done_count(struct otx_cptvf
*cptvf
)
553 union otx_cptx_vqx_done vqx_done
;
555 vqx_done
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE(0));
556 return vqx_done
.s
.done
;
559 static inline void cptvf_write_vq_done_ack(struct otx_cptvf
*cptvf
,
562 union otx_cptx_vqx_done_ack vqx_dack_cnt
;
564 vqx_dack_cnt
.u
= readq(cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ACK(0));
565 vqx_dack_cnt
.s
.done_ack
= ackcnt
;
566 writeq(vqx_dack_cnt
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_DONE_ACK(0));
569 static irqreturn_t
cptvf_done_intr_handler(int __always_unused irq
,
572 struct otx_cptvf
*cptvf
= (struct otx_cptvf
*)cptvf_dev
;
573 struct pci_dev
*pdev
= cptvf
->pdev
;
574 /* Read the number of completions */
575 u32 intr
= cptvf_read_vq_done_count(cptvf
);
578 struct otx_cptvf_wqe
*wqe
;
581 * Acknowledge the number of scheduled completions for
584 cptvf_write_vq_done_ack(cptvf
, intr
);
585 wqe
= get_cptvf_vq_wqe(cptvf
, 0);
586 if (unlikely(!wqe
)) {
587 dev_err(&pdev
->dev
, "No work to schedule for VF (%d)",
591 tasklet_hi_schedule(&wqe
->twork
);
597 static void cptvf_set_irq_affinity(struct otx_cptvf
*cptvf
, int vec
)
599 struct pci_dev
*pdev
= cptvf
->pdev
;
602 if (!zalloc_cpumask_var(&cptvf
->affinity_mask
[vec
],
605 "Allocation failed for affinity_mask for VF %d",
610 cpu
= cptvf
->vfid
% num_online_cpus();
611 cpumask_set_cpu(cpumask_local_spread(cpu
, cptvf
->node
),
612 cptvf
->affinity_mask
[vec
]);
613 irq_set_affinity_hint(pci_irq_vector(pdev
, vec
),
614 cptvf
->affinity_mask
[vec
]);
617 static void cptvf_write_vq_saddr(struct otx_cptvf
*cptvf
, u64 val
)
619 union otx_cptx_vqx_saddr vqx_saddr
;
622 writeq(vqx_saddr
.u
, cptvf
->reg_base
+ OTX_CPT_VQX_SADDR(0));
625 static void cptvf_device_init(struct otx_cptvf
*cptvf
)
630 cptvf_write_vq_ctl(cptvf
, 0);
631 /* Reset the doorbell */
632 otx_cptvf_write_vq_doorbell(cptvf
, 0);
634 cptvf_write_vq_inprog(cptvf
, 0);
636 base_addr
= (u64
)(cptvf
->cqinfo
.queue
[0].qhead
->dma_addr
);
637 cptvf_write_vq_saddr(cptvf
, base_addr
);
638 /* Configure timerhold / coalescence */
639 cptvf_write_vq_done_timewait(cptvf
, OTX_CPT_TIMER_HOLD
);
640 cptvf_write_vq_done_numwait(cptvf
, OTX_CPT_COUNT_HOLD
);
642 cptvf_write_vq_ctl(cptvf
, 1);
643 /* Flag the VF ready */
644 cptvf
->flags
|= OTX_CPT_FLAG_DEVICE_READY
;
647 static ssize_t
vf_type_show(struct device
*dev
,
648 struct device_attribute
*attr
,
651 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
654 switch (cptvf
->vftype
) {
655 case OTX_CPT_AE_TYPES
:
659 case OTX_CPT_SE_TYPES
:
667 return scnprintf(buf
, PAGE_SIZE
, "%s\n", msg
);
670 static ssize_t
vf_engine_group_show(struct device
*dev
,
671 struct device_attribute
*attr
,
674 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
676 return scnprintf(buf
, PAGE_SIZE
, "%d\n", cptvf
->vfgrp
);
679 static ssize_t
vf_engine_group_store(struct device
*dev
,
680 struct device_attribute
*attr
,
681 const char *buf
, size_t count
)
683 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
686 ret
= kstrtoint(buf
, 10, &val
);
693 if (val
>= OTX_CPT_MAX_ENGINE_GROUPS
) {
694 dev_err(dev
, "Engine group >= than max available groups %d",
695 OTX_CPT_MAX_ENGINE_GROUPS
);
699 ret
= otx_cptvf_send_vf_to_grp_msg(cptvf
, val
);
706 static ssize_t
vf_coalesc_time_wait_show(struct device
*dev
,
707 struct device_attribute
*attr
,
710 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
712 return scnprintf(buf
, PAGE_SIZE
, "%d\n",
713 cptvf_read_vq_done_timewait(cptvf
));
716 static ssize_t
vf_coalesc_num_wait_show(struct device
*dev
,
717 struct device_attribute
*attr
,
720 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
722 return scnprintf(buf
, PAGE_SIZE
, "%d\n",
723 cptvf_read_vq_done_numwait(cptvf
));
726 static ssize_t
vf_coalesc_time_wait_store(struct device
*dev
,
727 struct device_attribute
*attr
,
728 const char *buf
, size_t count
)
730 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
734 ret
= kstrtol(buf
, 10, &val
);
738 if (val
< OTX_CPT_COALESC_MIN_TIME_WAIT
||
739 val
> OTX_CPT_COALESC_MAX_TIME_WAIT
)
742 cptvf_write_vq_done_timewait(cptvf
, val
);
746 static ssize_t
vf_coalesc_num_wait_store(struct device
*dev
,
747 struct device_attribute
*attr
,
748 const char *buf
, size_t count
)
750 struct otx_cptvf
*cptvf
= dev_get_drvdata(dev
);
754 ret
= kstrtol(buf
, 10, &val
);
758 if (val
< OTX_CPT_COALESC_MIN_NUM_WAIT
||
759 val
> OTX_CPT_COALESC_MAX_NUM_WAIT
)
762 cptvf_write_vq_done_numwait(cptvf
, val
);
766 static DEVICE_ATTR_RO(vf_type
);
767 static DEVICE_ATTR_RW(vf_engine_group
);
768 static DEVICE_ATTR_RW(vf_coalesc_time_wait
);
769 static DEVICE_ATTR_RW(vf_coalesc_num_wait
);
771 static struct attribute
*otx_cptvf_attrs
[] = {
772 &dev_attr_vf_type
.attr
,
773 &dev_attr_vf_engine_group
.attr
,
774 &dev_attr_vf_coalesc_time_wait
.attr
,
775 &dev_attr_vf_coalesc_num_wait
.attr
,
779 static const struct attribute_group otx_cptvf_sysfs_group
= {
780 .attrs
= otx_cptvf_attrs
,
783 static int otx_cptvf_probe(struct pci_dev
*pdev
,
784 const struct pci_device_id
*ent
)
786 struct device
*dev
= &pdev
->dev
;
787 struct otx_cptvf
*cptvf
;
790 cptvf
= devm_kzalloc(dev
, sizeof(*cptvf
), GFP_KERNEL
);
794 pci_set_drvdata(pdev
, cptvf
);
797 err
= pci_enable_device(pdev
);
799 dev_err(dev
, "Failed to enable PCI device\n");
802 err
= pci_request_regions(pdev
, DRV_NAME
);
804 dev_err(dev
, "PCI request regions failed 0x%x\n", err
);
807 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(48));
809 dev_err(dev
, "Unable to get usable DMA configuration\n");
810 goto release_regions
;
813 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(48));
815 dev_err(dev
, "Unable to get 48-bit DMA for consistent allocations\n");
816 goto release_regions
;
819 /* MAP PF's configuration registers */
820 cptvf
->reg_base
= pci_iomap(pdev
, OTX_CPT_VF_PCI_CFG_BAR
, 0);
821 if (!cptvf
->reg_base
) {
822 dev_err(dev
, "Cannot map config register space, aborting\n");
824 goto release_regions
;
827 cptvf
->node
= dev_to_node(&pdev
->dev
);
828 err
= pci_alloc_irq_vectors(pdev
, OTX_CPT_VF_MSIX_VECTORS
,
829 OTX_CPT_VF_MSIX_VECTORS
, PCI_IRQ_MSIX
);
831 dev_err(dev
, "Request for #%d msix vectors failed\n",
832 OTX_CPT_VF_MSIX_VECTORS
);
836 err
= request_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
),
837 cptvf_misc_intr_handler
, 0, "CPT VF misc intr",
840 dev_err(dev
, "Failed to request misc irq");
844 /* Enable mailbox interrupt */
845 cptvf_enable_mbox_interrupts(cptvf
);
846 cptvf_enable_swerr_interrupts(cptvf
);
848 /* Check cpt pf status, gets chip ID / device Id from PF if ready */
849 err
= otx_cptvf_check_pf_ready(cptvf
);
853 /* CPT VF software resources initialization */
854 cptvf
->cqinfo
.qchunksize
= OTX_CPT_CMD_QCHUNK_SIZE
;
855 err
= cptvf_sw_init(cptvf
, OTX_CPT_CMD_QLEN
, OTX_CPT_NUM_QS_PER_VF
);
857 dev_err(dev
, "cptvf_sw_init() failed");
860 /* Convey VQ LEN to PF */
861 err
= otx_cptvf_send_vq_size_msg(cptvf
);
865 /* CPT VF device initialization */
866 cptvf_device_init(cptvf
);
867 /* Send msg to PF to assign currnet Q to required group */
868 err
= otx_cptvf_send_vf_to_grp_msg(cptvf
, cptvf
->vfgrp
);
873 err
= otx_cptvf_send_vf_priority_msg(cptvf
);
877 err
= request_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
),
878 cptvf_done_intr_handler
, 0, "CPT VF done intr",
881 dev_err(dev
, "Failed to request done irq\n");
885 /* Enable done interrupt */
886 cptvf_enable_done_interrupts(cptvf
);
888 /* Set irq affinity masks */
889 cptvf_set_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
890 cptvf_set_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
892 err
= otx_cptvf_send_vf_up(cptvf
);
894 goto free_irq_affinity
;
896 /* Initialize algorithms and set ops */
897 err
= otx_cpt_crypto_init(pdev
, THIS_MODULE
,
898 cptvf
->vftype
== OTX_CPT_SE_TYPES
? OTX_CPT_SE
: OTX_CPT_AE
,
899 cptvf
->vftype
, 1, cptvf
->num_vfs
);
901 dev_err(dev
, "Failed to register crypto algs\n");
902 goto free_irq_affinity
;
905 err
= sysfs_create_group(&dev
->kobj
, &otx_cptvf_sysfs_group
);
907 dev_err(dev
, "Creating sysfs entries failed\n");
914 otx_cpt_crypto_exit(pdev
, THIS_MODULE
, cptvf
->vftype
);
916 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
917 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
919 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
), cptvf
);
921 cptvf_sw_cleanup(cptvf
);
923 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
), cptvf
);
925 pci_free_irq_vectors(cptvf
->pdev
);
927 pci_iounmap(pdev
, cptvf
->reg_base
);
929 pci_release_regions(pdev
);
931 pci_disable_device(pdev
);
933 pci_set_drvdata(pdev
, NULL
);
938 static void otx_cptvf_remove(struct pci_dev
*pdev
)
940 struct otx_cptvf
*cptvf
= pci_get_drvdata(pdev
);
943 dev_err(&pdev
->dev
, "Invalid CPT-VF device\n");
947 /* Convey DOWN to PF */
948 if (otx_cptvf_send_vf_down(cptvf
)) {
949 dev_err(&pdev
->dev
, "PF not responding to DOWN msg");
951 sysfs_remove_group(&pdev
->dev
.kobj
, &otx_cptvf_sysfs_group
);
952 otx_cpt_crypto_exit(pdev
, THIS_MODULE
, cptvf
->vftype
);
953 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_DONE
);
954 cptvf_free_irq_affinity(cptvf
, CPT_VF_INT_VEC_E_MISC
);
955 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_DONE
), cptvf
);
956 free_irq(pci_irq_vector(pdev
, CPT_VF_INT_VEC_E_MISC
), cptvf
);
957 cptvf_sw_cleanup(cptvf
);
958 pci_free_irq_vectors(cptvf
->pdev
);
959 pci_iounmap(pdev
, cptvf
->reg_base
);
960 pci_release_regions(pdev
);
961 pci_disable_device(pdev
);
962 pci_set_drvdata(pdev
, NULL
);
966 /* Supported devices */
967 static const struct pci_device_id otx_cptvf_id_table
[] = {
968 {PCI_VDEVICE(CAVIUM
, OTX_CPT_PCI_VF_DEVICE_ID
), 0},
969 { 0, } /* end of table */
972 static struct pci_driver otx_cptvf_pci_driver
= {
974 .id_table
= otx_cptvf_id_table
,
975 .probe
= otx_cptvf_probe
,
976 .remove
= otx_cptvf_remove
,
979 module_pci_driver(otx_cptvf_pci_driver
);
981 MODULE_AUTHOR("Marvell International Ltd.");
982 MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
983 MODULE_LICENSE("GPL v2");
984 MODULE_VERSION(DRV_VERSION
);
985 MODULE_DEVICE_TABLE(pci
, otx_cptvf_id_table
);