WIP FPC-III support
[linux/fpc-iii.git] / drivers / crypto / marvell / octeontx / otx_cptvf_main.c
blobc076d0b3ad5f1bdc0a63b349b7f0935599a161bc
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include "otx_cptvf.h"
14 #include "otx_cptvf_algs.h"
15 #include "otx_cptvf_reqmgr.h"
17 #define DRV_NAME "octeontx-cptvf"
18 #define DRV_VERSION "1.0"
20 static void vq_work_handler(unsigned long data)
22 struct otx_cptvf_wqe_info *cwqe_info =
23 (struct otx_cptvf_wqe_info *) data;
25 otx_cpt_post_process(&cwqe_info->vq_wqe[0]);
28 static int init_worker_threads(struct otx_cptvf *cptvf)
30 struct pci_dev *pdev = cptvf->pdev;
31 struct otx_cptvf_wqe_info *cwqe_info;
32 int i;
34 cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL);
35 if (!cwqe_info)
36 return -ENOMEM;
38 if (cptvf->num_queues) {
39 dev_dbg(&pdev->dev, "Creating VQ worker threads (%d)\n",
40 cptvf->num_queues);
43 for (i = 0; i < cptvf->num_queues; i++) {
44 tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler,
45 (u64)cwqe_info);
46 cwqe_info->vq_wqe[i].cptvf = cptvf;
48 cptvf->wqe_info = cwqe_info;
50 return 0;
53 static void cleanup_worker_threads(struct otx_cptvf *cptvf)
55 struct pci_dev *pdev = cptvf->pdev;
56 struct otx_cptvf_wqe_info *cwqe_info;
57 int i;
59 cwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
60 if (!cwqe_info)
61 return;
63 if (cptvf->num_queues) {
64 dev_dbg(&pdev->dev, "Cleaning VQ worker threads (%u)\n",
65 cptvf->num_queues);
68 for (i = 0; i < cptvf->num_queues; i++)
69 tasklet_kill(&cwqe_info->vq_wqe[i].twork);
71 kfree_sensitive(cwqe_info);
72 cptvf->wqe_info = NULL;
75 static void free_pending_queues(struct otx_cpt_pending_qinfo *pqinfo)
77 struct otx_cpt_pending_queue *queue;
78 int i;
80 for_each_pending_queue(pqinfo, queue, i) {
81 if (!queue->head)
82 continue;
84 /* free single queue */
85 kfree_sensitive((queue->head));
86 queue->front = 0;
87 queue->rear = 0;
88 queue->qlen = 0;
90 pqinfo->num_queues = 0;
93 static int alloc_pending_queues(struct otx_cpt_pending_qinfo *pqinfo, u32 qlen,
94 u32 num_queues)
96 struct otx_cpt_pending_queue *queue = NULL;
97 size_t size;
98 int ret;
99 u32 i;
101 pqinfo->num_queues = num_queues;
102 size = (qlen * sizeof(struct otx_cpt_pending_entry));
104 for_each_pending_queue(pqinfo, queue, i) {
105 queue->head = kzalloc((size), GFP_KERNEL);
106 if (!queue->head) {
107 ret = -ENOMEM;
108 goto pending_qfail;
111 queue->pending_count = 0;
112 queue->front = 0;
113 queue->rear = 0;
114 queue->qlen = qlen;
116 /* init queue spin lock */
117 spin_lock_init(&queue->lock);
119 return 0;
121 pending_qfail:
122 free_pending_queues(pqinfo);
124 return ret;
127 static int init_pending_queues(struct otx_cptvf *cptvf, u32 qlen,
128 u32 num_queues)
130 struct pci_dev *pdev = cptvf->pdev;
131 int ret;
133 if (!num_queues)
134 return 0;
136 ret = alloc_pending_queues(&cptvf->pqinfo, qlen, num_queues);
137 if (ret) {
138 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
139 num_queues);
140 return ret;
142 return 0;
145 static void cleanup_pending_queues(struct otx_cptvf *cptvf)
147 struct pci_dev *pdev = cptvf->pdev;
149 if (!cptvf->num_queues)
150 return;
152 dev_dbg(&pdev->dev, "Cleaning VQ pending queue (%u)\n",
153 cptvf->num_queues);
154 free_pending_queues(&cptvf->pqinfo);
157 static void free_command_queues(struct otx_cptvf *cptvf,
158 struct otx_cpt_cmd_qinfo *cqinfo)
160 struct otx_cpt_cmd_queue *queue = NULL;
161 struct otx_cpt_cmd_chunk *chunk = NULL;
162 struct pci_dev *pdev = cptvf->pdev;
163 int i;
165 /* clean up for each queue */
166 for (i = 0; i < cptvf->num_queues; i++) {
167 queue = &cqinfo->queue[i];
169 while (!list_empty(&cqinfo->queue[i].chead)) {
170 chunk = list_first_entry(&cqinfo->queue[i].chead,
171 struct otx_cpt_cmd_chunk, nextchunk);
173 dma_free_coherent(&pdev->dev, chunk->size,
174 chunk->head,
175 chunk->dma_addr);
176 chunk->head = NULL;
177 chunk->dma_addr = 0;
178 list_del(&chunk->nextchunk);
179 kfree_sensitive(chunk);
181 queue->num_chunks = 0;
182 queue->idx = 0;
187 static int alloc_command_queues(struct otx_cptvf *cptvf,
188 struct otx_cpt_cmd_qinfo *cqinfo,
189 u32 qlen)
191 struct otx_cpt_cmd_chunk *curr, *first, *last;
192 struct otx_cpt_cmd_queue *queue = NULL;
193 struct pci_dev *pdev = cptvf->pdev;
194 size_t q_size, c_size, rem_q_size;
195 u32 qcsize_bytes;
196 int i;
199 /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */
200 cptvf->qsize = min(qlen, cqinfo->qchunksize) *
201 OTX_CPT_NEXT_CHUNK_PTR_SIZE + 1;
202 /* Qsize in bytes to create space for alignment */
203 q_size = qlen * OTX_CPT_INST_SIZE;
205 qcsize_bytes = cqinfo->qchunksize * OTX_CPT_INST_SIZE;
207 /* per queue initialization */
208 for (i = 0; i < cptvf->num_queues; i++) {
209 c_size = 0;
210 rem_q_size = q_size;
211 first = NULL;
212 last = NULL;
214 queue = &cqinfo->queue[i];
215 INIT_LIST_HEAD(&queue->chead);
216 do {
217 curr = kzalloc(sizeof(*curr), GFP_KERNEL);
218 if (!curr)
219 goto cmd_qfail;
221 c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes :
222 rem_q_size;
223 curr->head = dma_alloc_coherent(&pdev->dev,
224 c_size + OTX_CPT_NEXT_CHUNK_PTR_SIZE,
225 &curr->dma_addr, GFP_KERNEL);
226 if (!curr->head) {
227 dev_err(&pdev->dev,
228 "Command Q (%d) chunk (%d) allocation failed\n",
229 i, queue->num_chunks);
230 goto free_curr;
232 curr->size = c_size;
234 if (queue->num_chunks == 0) {
235 first = curr;
236 queue->base = first;
238 list_add_tail(&curr->nextchunk,
239 &cqinfo->queue[i].chead);
241 queue->num_chunks++;
242 rem_q_size -= c_size;
243 if (last)
244 *((u64 *)(&last->head[last->size])) =
245 (u64)curr->dma_addr;
247 last = curr;
248 } while (rem_q_size);
251 * Make the queue circular, tie back last chunk entry to head
253 curr = first;
254 *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr;
255 queue->qhead = curr;
257 return 0;
258 free_curr:
259 kfree(curr);
260 cmd_qfail:
261 free_command_queues(cptvf, cqinfo);
262 return -ENOMEM;
265 static int init_command_queues(struct otx_cptvf *cptvf, u32 qlen)
267 struct pci_dev *pdev = cptvf->pdev;
268 int ret;
270 /* setup command queues */
271 ret = alloc_command_queues(cptvf, &cptvf->cqinfo, qlen);
272 if (ret) {
273 dev_err(&pdev->dev, "Failed to allocate command queues (%u)\n",
274 cptvf->num_queues);
275 return ret;
277 return ret;
280 static void cleanup_command_queues(struct otx_cptvf *cptvf)
282 struct pci_dev *pdev = cptvf->pdev;
284 if (!cptvf->num_queues)
285 return;
287 dev_dbg(&pdev->dev, "Cleaning VQ command queue (%u)\n",
288 cptvf->num_queues);
289 free_command_queues(cptvf, &cptvf->cqinfo);
292 static void cptvf_sw_cleanup(struct otx_cptvf *cptvf)
294 cleanup_worker_threads(cptvf);
295 cleanup_pending_queues(cptvf);
296 cleanup_command_queues(cptvf);
299 static int cptvf_sw_init(struct otx_cptvf *cptvf, u32 qlen, u32 num_queues)
301 struct pci_dev *pdev = cptvf->pdev;
302 u32 max_dev_queues = 0;
303 int ret;
305 max_dev_queues = OTX_CPT_NUM_QS_PER_VF;
306 /* possible cpus */
307 num_queues = min_t(u32, num_queues, max_dev_queues);
308 cptvf->num_queues = num_queues;
310 ret = init_command_queues(cptvf, qlen);
311 if (ret) {
312 dev_err(&pdev->dev, "Failed to setup command queues (%u)\n",
313 num_queues);
314 return ret;
317 ret = init_pending_queues(cptvf, qlen, num_queues);
318 if (ret) {
319 dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n",
320 num_queues);
321 goto setup_pqfail;
324 /* Create worker threads for BH processing */
325 ret = init_worker_threads(cptvf);
326 if (ret) {
327 dev_err(&pdev->dev, "Failed to setup worker threads\n");
328 goto init_work_fail;
330 return 0;
332 init_work_fail:
333 cleanup_worker_threads(cptvf);
334 cleanup_pending_queues(cptvf);
336 setup_pqfail:
337 cleanup_command_queues(cptvf);
339 return ret;
342 static void cptvf_free_irq_affinity(struct otx_cptvf *cptvf, int vec)
344 irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL);
345 free_cpumask_var(cptvf->affinity_mask[vec]);
348 static void cptvf_write_vq_ctl(struct otx_cptvf *cptvf, bool val)
350 union otx_cptx_vqx_ctl vqx_ctl;
352 vqx_ctl.u = readq(cptvf->reg_base + OTX_CPT_VQX_CTL(0));
353 vqx_ctl.s.ena = val;
354 writeq(vqx_ctl.u, cptvf->reg_base + OTX_CPT_VQX_CTL(0));
357 void otx_cptvf_write_vq_doorbell(struct otx_cptvf *cptvf, u32 val)
359 union otx_cptx_vqx_doorbell vqx_dbell;
361 vqx_dbell.u = readq(cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
362 vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
363 writeq(vqx_dbell.u, cptvf->reg_base + OTX_CPT_VQX_DOORBELL(0));
366 static void cptvf_write_vq_inprog(struct otx_cptvf *cptvf, u8 val)
368 union otx_cptx_vqx_inprog vqx_inprg;
370 vqx_inprg.u = readq(cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
371 vqx_inprg.s.inflight = val;
372 writeq(vqx_inprg.u, cptvf->reg_base + OTX_CPT_VQX_INPROG(0));
375 static void cptvf_write_vq_done_numwait(struct otx_cptvf *cptvf, u32 val)
377 union otx_cptx_vqx_done_wait vqx_dwait;
379 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
380 vqx_dwait.s.num_wait = val;
381 writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
384 static u32 cptvf_read_vq_done_numwait(struct otx_cptvf *cptvf)
386 union otx_cptx_vqx_done_wait vqx_dwait;
388 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
389 return vqx_dwait.s.num_wait;
392 static void cptvf_write_vq_done_timewait(struct otx_cptvf *cptvf, u16 time)
394 union otx_cptx_vqx_done_wait vqx_dwait;
396 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
397 vqx_dwait.s.time_wait = time;
398 writeq(vqx_dwait.u, cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
402 static u16 cptvf_read_vq_done_timewait(struct otx_cptvf *cptvf)
404 union otx_cptx_vqx_done_wait vqx_dwait;
406 vqx_dwait.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_WAIT(0));
407 return vqx_dwait.s.time_wait;
410 static void cptvf_enable_swerr_interrupts(struct otx_cptvf *cptvf)
412 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
414 vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
415 /* Enable SWERR interrupts for the requested VF */
416 vqx_misc_ena.s.swerr = 1;
417 writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
420 static void cptvf_enable_mbox_interrupts(struct otx_cptvf *cptvf)
422 union otx_cptx_vqx_misc_ena_w1s vqx_misc_ena;
424 vqx_misc_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
425 /* Enable MBOX interrupt for the requested VF */
426 vqx_misc_ena.s.mbox = 1;
427 writeq(vqx_misc_ena.u, cptvf->reg_base + OTX_CPT_VQX_MISC_ENA_W1S(0));
430 static void cptvf_enable_done_interrupts(struct otx_cptvf *cptvf)
432 union otx_cptx_vqx_done_ena_w1s vqx_done_ena;
434 vqx_done_ena.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
435 /* Enable DONE interrupt for the requested VF */
436 vqx_done_ena.s.done = 1;
437 writeq(vqx_done_ena.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ENA_W1S(0));
440 static void cptvf_clear_dovf_intr(struct otx_cptvf *cptvf)
442 union otx_cptx_vqx_misc_int vqx_misc_int;
444 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
445 /* W1C for the VF */
446 vqx_misc_int.s.dovf = 1;
447 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
450 static void cptvf_clear_irde_intr(struct otx_cptvf *cptvf)
452 union otx_cptx_vqx_misc_int vqx_misc_int;
454 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
455 /* W1C for the VF */
456 vqx_misc_int.s.irde = 1;
457 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
460 static void cptvf_clear_nwrp_intr(struct otx_cptvf *cptvf)
462 union otx_cptx_vqx_misc_int vqx_misc_int;
464 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
465 /* W1C for the VF */
466 vqx_misc_int.s.nwrp = 1;
467 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
470 static void cptvf_clear_mbox_intr(struct otx_cptvf *cptvf)
472 union otx_cptx_vqx_misc_int vqx_misc_int;
474 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
475 /* W1C for the VF */
476 vqx_misc_int.s.mbox = 1;
477 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
480 static void cptvf_clear_swerr_intr(struct otx_cptvf *cptvf)
482 union otx_cptx_vqx_misc_int vqx_misc_int;
484 vqx_misc_int.u = readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
485 /* W1C for the VF */
486 vqx_misc_int.s.swerr = 1;
487 writeq(vqx_misc_int.u, cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
490 static u64 cptvf_read_vf_misc_intr_status(struct otx_cptvf *cptvf)
492 return readq(cptvf->reg_base + OTX_CPT_VQX_MISC_INT(0));
495 static irqreturn_t cptvf_misc_intr_handler(int __always_unused irq,
496 void *arg)
498 struct otx_cptvf *cptvf = arg;
499 struct pci_dev *pdev = cptvf->pdev;
500 u64 intr;
502 intr = cptvf_read_vf_misc_intr_status(cptvf);
503 /* Check for MISC interrupt types */
504 if (likely(intr & OTX_CPT_VF_INTR_MBOX_MASK)) {
505 dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n",
506 intr, cptvf->vfid);
507 otx_cptvf_handle_mbox_intr(cptvf);
508 cptvf_clear_mbox_intr(cptvf);
509 } else if (unlikely(intr & OTX_CPT_VF_INTR_DOVF_MASK)) {
510 cptvf_clear_dovf_intr(cptvf);
511 /* Clear doorbell count */
512 otx_cptvf_write_vq_doorbell(cptvf, 0);
513 dev_err(&pdev->dev,
514 "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n",
515 intr, cptvf->vfid);
516 } else if (unlikely(intr & OTX_CPT_VF_INTR_IRDE_MASK)) {
517 cptvf_clear_irde_intr(cptvf);
518 dev_err(&pdev->dev,
519 "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n",
520 intr, cptvf->vfid);
521 } else if (unlikely(intr & OTX_CPT_VF_INTR_NWRP_MASK)) {
522 cptvf_clear_nwrp_intr(cptvf);
523 dev_err(&pdev->dev,
524 "NCB response write error interrupt 0x%llx on CPT VF %d\n",
525 intr, cptvf->vfid);
526 } else if (unlikely(intr & OTX_CPT_VF_INTR_SERR_MASK)) {
527 cptvf_clear_swerr_intr(cptvf);
528 dev_err(&pdev->dev,
529 "Software error interrupt 0x%llx on CPT VF %d\n",
530 intr, cptvf->vfid);
531 } else {
532 dev_err(&pdev->dev, "Unhandled interrupt in OTX_CPT VF %d\n",
533 cptvf->vfid);
536 return IRQ_HANDLED;
539 static inline struct otx_cptvf_wqe *get_cptvf_vq_wqe(struct otx_cptvf *cptvf,
540 int qno)
542 struct otx_cptvf_wqe_info *nwqe_info;
544 if (unlikely(qno >= cptvf->num_queues))
545 return NULL;
546 nwqe_info = (struct otx_cptvf_wqe_info *)cptvf->wqe_info;
548 return &nwqe_info->vq_wqe[qno];
551 static inline u32 cptvf_read_vq_done_count(struct otx_cptvf *cptvf)
553 union otx_cptx_vqx_done vqx_done;
555 vqx_done.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE(0));
556 return vqx_done.s.done;
559 static inline void cptvf_write_vq_done_ack(struct otx_cptvf *cptvf,
560 u32 ackcnt)
562 union otx_cptx_vqx_done_ack vqx_dack_cnt;
564 vqx_dack_cnt.u = readq(cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
565 vqx_dack_cnt.s.done_ack = ackcnt;
566 writeq(vqx_dack_cnt.u, cptvf->reg_base + OTX_CPT_VQX_DONE_ACK(0));
569 static irqreturn_t cptvf_done_intr_handler(int __always_unused irq,
570 void *cptvf_dev)
572 struct otx_cptvf *cptvf = (struct otx_cptvf *)cptvf_dev;
573 struct pci_dev *pdev = cptvf->pdev;
574 /* Read the number of completions */
575 u32 intr = cptvf_read_vq_done_count(cptvf);
577 if (intr) {
578 struct otx_cptvf_wqe *wqe;
581 * Acknowledge the number of scheduled completions for
582 * processing
584 cptvf_write_vq_done_ack(cptvf, intr);
585 wqe = get_cptvf_vq_wqe(cptvf, 0);
586 if (unlikely(!wqe)) {
587 dev_err(&pdev->dev, "No work to schedule for VF (%d)\n",
588 cptvf->vfid);
589 return IRQ_NONE;
591 tasklet_hi_schedule(&wqe->twork);
594 return IRQ_HANDLED;
597 static void cptvf_set_irq_affinity(struct otx_cptvf *cptvf, int vec)
599 struct pci_dev *pdev = cptvf->pdev;
600 int cpu;
602 if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec],
603 GFP_KERNEL)) {
604 dev_err(&pdev->dev,
605 "Allocation failed for affinity_mask for VF %d\n",
606 cptvf->vfid);
607 return;
610 cpu = cptvf->vfid % num_online_cpus();
611 cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node),
612 cptvf->affinity_mask[vec]);
613 irq_set_affinity_hint(pci_irq_vector(pdev, vec),
614 cptvf->affinity_mask[vec]);
617 static void cptvf_write_vq_saddr(struct otx_cptvf *cptvf, u64 val)
619 union otx_cptx_vqx_saddr vqx_saddr;
621 vqx_saddr.u = val;
622 writeq(vqx_saddr.u, cptvf->reg_base + OTX_CPT_VQX_SADDR(0));
625 static void cptvf_device_init(struct otx_cptvf *cptvf)
627 u64 base_addr = 0;
629 /* Disable the VQ */
630 cptvf_write_vq_ctl(cptvf, 0);
631 /* Reset the doorbell */
632 otx_cptvf_write_vq_doorbell(cptvf, 0);
633 /* Clear inflight */
634 cptvf_write_vq_inprog(cptvf, 0);
635 /* Write VQ SADDR */
636 base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr);
637 cptvf_write_vq_saddr(cptvf, base_addr);
638 /* Configure timerhold / coalescence */
639 cptvf_write_vq_done_timewait(cptvf, OTX_CPT_TIMER_HOLD);
640 cptvf_write_vq_done_numwait(cptvf, OTX_CPT_COUNT_HOLD);
641 /* Enable the VQ */
642 cptvf_write_vq_ctl(cptvf, 1);
643 /* Flag the VF ready */
644 cptvf->flags |= OTX_CPT_FLAG_DEVICE_READY;
647 static ssize_t vf_type_show(struct device *dev,
648 struct device_attribute *attr,
649 char *buf)
651 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
652 char *msg;
654 switch (cptvf->vftype) {
655 case OTX_CPT_AE_TYPES:
656 msg = "AE";
657 break;
659 case OTX_CPT_SE_TYPES:
660 msg = "SE";
661 break;
663 default:
664 msg = "Invalid";
667 return scnprintf(buf, PAGE_SIZE, "%s\n", msg);
670 static ssize_t vf_engine_group_show(struct device *dev,
671 struct device_attribute *attr,
672 char *buf)
674 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
676 return scnprintf(buf, PAGE_SIZE, "%d\n", cptvf->vfgrp);
679 static ssize_t vf_engine_group_store(struct device *dev,
680 struct device_attribute *attr,
681 const char *buf, size_t count)
683 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
684 int val, ret;
686 ret = kstrtoint(buf, 10, &val);
687 if (ret)
688 return ret;
690 if (val < 0)
691 return -EINVAL;
693 if (val >= OTX_CPT_MAX_ENGINE_GROUPS) {
694 dev_err(dev, "Engine group >= than max available groups %d\n",
695 OTX_CPT_MAX_ENGINE_GROUPS);
696 return -EINVAL;
699 ret = otx_cptvf_send_vf_to_grp_msg(cptvf, val);
700 if (ret)
701 return ret;
703 return count;
706 static ssize_t vf_coalesc_time_wait_show(struct device *dev,
707 struct device_attribute *attr,
708 char *buf)
710 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
712 return scnprintf(buf, PAGE_SIZE, "%d\n",
713 cptvf_read_vq_done_timewait(cptvf));
716 static ssize_t vf_coalesc_num_wait_show(struct device *dev,
717 struct device_attribute *attr,
718 char *buf)
720 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
722 return scnprintf(buf, PAGE_SIZE, "%d\n",
723 cptvf_read_vq_done_numwait(cptvf));
726 static ssize_t vf_coalesc_time_wait_store(struct device *dev,
727 struct device_attribute *attr,
728 const char *buf, size_t count)
730 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
731 long val;
732 int ret;
734 ret = kstrtol(buf, 10, &val);
735 if (ret != 0)
736 return ret;
738 if (val < OTX_CPT_COALESC_MIN_TIME_WAIT ||
739 val > OTX_CPT_COALESC_MAX_TIME_WAIT)
740 return -EINVAL;
742 cptvf_write_vq_done_timewait(cptvf, val);
743 return count;
746 static ssize_t vf_coalesc_num_wait_store(struct device *dev,
747 struct device_attribute *attr,
748 const char *buf, size_t count)
750 struct otx_cptvf *cptvf = dev_get_drvdata(dev);
751 long val;
752 int ret;
754 ret = kstrtol(buf, 10, &val);
755 if (ret != 0)
756 return ret;
758 if (val < OTX_CPT_COALESC_MIN_NUM_WAIT ||
759 val > OTX_CPT_COALESC_MAX_NUM_WAIT)
760 return -EINVAL;
762 cptvf_write_vq_done_numwait(cptvf, val);
763 return count;
766 static DEVICE_ATTR_RO(vf_type);
767 static DEVICE_ATTR_RW(vf_engine_group);
768 static DEVICE_ATTR_RW(vf_coalesc_time_wait);
769 static DEVICE_ATTR_RW(vf_coalesc_num_wait);
771 static struct attribute *otx_cptvf_attrs[] = {
772 &dev_attr_vf_type.attr,
773 &dev_attr_vf_engine_group.attr,
774 &dev_attr_vf_coalesc_time_wait.attr,
775 &dev_attr_vf_coalesc_num_wait.attr,
776 NULL
779 static const struct attribute_group otx_cptvf_sysfs_group = {
780 .attrs = otx_cptvf_attrs,
783 static int otx_cptvf_probe(struct pci_dev *pdev,
784 const struct pci_device_id *ent)
786 struct device *dev = &pdev->dev;
787 struct otx_cptvf *cptvf;
788 int err;
790 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL);
791 if (!cptvf)
792 return -ENOMEM;
794 pci_set_drvdata(pdev, cptvf);
795 cptvf->pdev = pdev;
797 err = pci_enable_device(pdev);
798 if (err) {
799 dev_err(dev, "Failed to enable PCI device\n");
800 goto clear_drvdata;
802 err = pci_request_regions(pdev, DRV_NAME);
803 if (err) {
804 dev_err(dev, "PCI request regions failed 0x%x\n", err);
805 goto disable_device;
807 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
808 if (err) {
809 dev_err(dev, "Unable to get usable 48-bit DMA configuration\n");
810 goto release_regions;
813 /* MAP PF's configuration registers */
814 cptvf->reg_base = pci_iomap(pdev, OTX_CPT_VF_PCI_CFG_BAR, 0);
815 if (!cptvf->reg_base) {
816 dev_err(dev, "Cannot map config register space, aborting\n");
817 err = -ENOMEM;
818 goto release_regions;
821 cptvf->node = dev_to_node(&pdev->dev);
822 err = pci_alloc_irq_vectors(pdev, OTX_CPT_VF_MSIX_VECTORS,
823 OTX_CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX);
824 if (err < 0) {
825 dev_err(dev, "Request for #%d msix vectors failed\n",
826 OTX_CPT_VF_MSIX_VECTORS);
827 goto unmap_region;
830 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC),
831 cptvf_misc_intr_handler, 0, "CPT VF misc intr",
832 cptvf);
833 if (err) {
834 dev_err(dev, "Failed to request misc irq\n");
835 goto free_vectors;
838 /* Enable mailbox interrupt */
839 cptvf_enable_mbox_interrupts(cptvf);
840 cptvf_enable_swerr_interrupts(cptvf);
842 /* Check cpt pf status, gets chip ID / device Id from PF if ready */
843 err = otx_cptvf_check_pf_ready(cptvf);
844 if (err)
845 goto free_misc_irq;
847 /* CPT VF software resources initialization */
848 cptvf->cqinfo.qchunksize = OTX_CPT_CMD_QCHUNK_SIZE;
849 err = cptvf_sw_init(cptvf, OTX_CPT_CMD_QLEN, OTX_CPT_NUM_QS_PER_VF);
850 if (err) {
851 dev_err(dev, "cptvf_sw_init() failed\n");
852 goto free_misc_irq;
854 /* Convey VQ LEN to PF */
855 err = otx_cptvf_send_vq_size_msg(cptvf);
856 if (err)
857 goto sw_cleanup;
859 /* CPT VF device initialization */
860 cptvf_device_init(cptvf);
861 /* Send msg to PF to assign currnet Q to required group */
862 err = otx_cptvf_send_vf_to_grp_msg(cptvf, cptvf->vfgrp);
863 if (err)
864 goto sw_cleanup;
866 cptvf->priority = 1;
867 err = otx_cptvf_send_vf_priority_msg(cptvf);
868 if (err)
869 goto sw_cleanup;
871 err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE),
872 cptvf_done_intr_handler, 0, "CPT VF done intr",
873 cptvf);
874 if (err) {
875 dev_err(dev, "Failed to request done irq\n");
876 goto free_done_irq;
879 /* Enable done interrupt */
880 cptvf_enable_done_interrupts(cptvf);
882 /* Set irq affinity masks */
883 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
884 cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
886 err = otx_cptvf_send_vf_up(cptvf);
887 if (err)
888 goto free_irq_affinity;
890 /* Initialize algorithms and set ops */
891 err = otx_cpt_crypto_init(pdev, THIS_MODULE,
892 cptvf->vftype == OTX_CPT_SE_TYPES ? OTX_CPT_SE : OTX_CPT_AE,
893 cptvf->vftype, 1, cptvf->num_vfs);
894 if (err) {
895 dev_err(dev, "Failed to register crypto algs\n");
896 goto free_irq_affinity;
899 err = sysfs_create_group(&dev->kobj, &otx_cptvf_sysfs_group);
900 if (err) {
901 dev_err(dev, "Creating sysfs entries failed\n");
902 goto crypto_exit;
905 return 0;
907 crypto_exit:
908 otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
909 free_irq_affinity:
910 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
911 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
912 free_done_irq:
913 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
914 sw_cleanup:
915 cptvf_sw_cleanup(cptvf);
916 free_misc_irq:
917 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
918 free_vectors:
919 pci_free_irq_vectors(cptvf->pdev);
920 unmap_region:
921 pci_iounmap(pdev, cptvf->reg_base);
922 release_regions:
923 pci_release_regions(pdev);
924 disable_device:
925 pci_disable_device(pdev);
926 clear_drvdata:
927 pci_set_drvdata(pdev, NULL);
929 return err;
932 static void otx_cptvf_remove(struct pci_dev *pdev)
934 struct otx_cptvf *cptvf = pci_get_drvdata(pdev);
936 if (!cptvf) {
937 dev_err(&pdev->dev, "Invalid CPT-VF device\n");
938 return;
941 /* Convey DOWN to PF */
942 if (otx_cptvf_send_vf_down(cptvf)) {
943 dev_err(&pdev->dev, "PF not responding to DOWN msg\n");
944 } else {
945 sysfs_remove_group(&pdev->dev.kobj, &otx_cptvf_sysfs_group);
946 otx_cpt_crypto_exit(pdev, THIS_MODULE, cptvf->vftype);
947 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE);
948 cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC);
949 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf);
950 free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf);
951 cptvf_sw_cleanup(cptvf);
952 pci_free_irq_vectors(cptvf->pdev);
953 pci_iounmap(pdev, cptvf->reg_base);
954 pci_release_regions(pdev);
955 pci_disable_device(pdev);
956 pci_set_drvdata(pdev, NULL);
960 /* Supported devices */
961 static const struct pci_device_id otx_cptvf_id_table[] = {
962 {PCI_VDEVICE(CAVIUM, OTX_CPT_PCI_VF_DEVICE_ID), 0},
963 { 0, } /* end of table */
966 static struct pci_driver otx_cptvf_pci_driver = {
967 .name = DRV_NAME,
968 .id_table = otx_cptvf_id_table,
969 .probe = otx_cptvf_probe,
970 .remove = otx_cptvf_remove,
973 module_pci_driver(otx_cptvf_pci_driver);
975 MODULE_AUTHOR("Marvell International Ltd.");
976 MODULE_DESCRIPTION("Marvell OcteonTX CPT Virtual Function Driver");
977 MODULE_LICENSE("GPL v2");
978 MODULE_VERSION(DRV_VERSION);
979 MODULE_DEVICE_TABLE(pci, otx_cptvf_id_table);