1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include <linux/firmware.h>
5 #include "otx2_cpt_hw_types.h"
6 #include "otx2_cpt_common.h"
7 #include "otx2_cpt_devlink.h"
8 #include "otx2_cptpf_ucode.h"
9 #include "otx2_cptpf.h"
10 #include "cn10k_cpt.h"
13 #define OTX2_CPT_DRV_NAME "rvu_cptpf"
14 #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
16 #define CPT_UC_RID_CN9K_B0 1
17 #define CPT_UC_RID_CN10K_A 4
18 #define CPT_UC_RID_CN10K_B 5
20 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev
*cptpf
,
25 /* Clear any pending interrupts */
26 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
27 RVU_PF_VFPF_MBOX_INTX(0), ~0x0ULL
);
28 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
29 RVU_PF_VFPF_MBOX_INTX(1), ~0x0ULL
);
31 /* Enable VF interrupts for VFs from 0 to 63 */
32 ena_bits
= ((num_vfs
- 1) % 64);
33 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
34 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0),
35 GENMASK_ULL(ena_bits
, 0));
38 /* Enable VF interrupts for VFs from 64 to 127 */
39 ena_bits
= num_vfs
- 64 - 1;
40 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
41 RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
42 GENMASK_ULL(ena_bits
, 0));
46 static void cptpf_disable_vfpf_mbox_intr(struct otx2_cptpf_dev
*cptpf
,
51 /* Disable VF-PF interrupts */
52 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
53 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ULL);
54 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
55 RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ULL);
56 /* Clear any pending interrupts */
57 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
58 RVU_PF_VFPF_MBOX_INTX(0), ~0ULL);
60 vector
= pci_irq_vector(cptpf
->pdev
, RVU_PF_INT_VEC_VFPF_MBOX0
);
61 free_irq(vector
, cptpf
);
64 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
65 RVU_PF_VFPF_MBOX_INTX(1), ~0ULL);
66 vector
= pci_irq_vector(cptpf
->pdev
, RVU_PF_INT_VEC_VFPF_MBOX1
);
67 free_irq(vector
, cptpf
);
71 static void cptpf_enable_vf_flr_me_intrs(struct otx2_cptpf_dev
*cptpf
,
74 /* Clear FLR interrupt if any */
75 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_VFFLR_INTX(0),
78 /* Enable VF FLR interrupts */
79 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
80 RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(num_vfs
));
81 /* Clear ME interrupt if any */
82 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_VFME_INTX(0),
84 /* Enable VF ME interrupts */
85 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
86 RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(num_vfs
));
91 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_VFFLR_INTX(1),
92 INTR_MASK(num_vfs
- 64));
93 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
94 RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(num_vfs
- 64));
96 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_VFME_INTX(1),
97 INTR_MASK(num_vfs
- 64));
98 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
99 RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(num_vfs
- 64));
102 static void cptpf_disable_vf_flr_me_intrs(struct otx2_cptpf_dev
*cptpf
,
107 /* Disable VF FLR interrupts */
108 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
109 RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(num_vfs
));
110 vector
= pci_irq_vector(cptpf
->pdev
, RVU_PF_INT_VEC_VFFLR0
);
111 free_irq(vector
, cptpf
);
113 /* Disable VF ME interrupts */
114 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
115 RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(num_vfs
));
116 vector
= pci_irq_vector(cptpf
->pdev
, RVU_PF_INT_VEC_VFME0
);
117 free_irq(vector
, cptpf
);
122 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
123 RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(num_vfs
- 64));
124 vector
= pci_irq_vector(cptpf
->pdev
, RVU_PF_INT_VEC_VFFLR1
);
125 free_irq(vector
, cptpf
);
127 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
128 RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(num_vfs
- 64));
129 vector
= pci_irq_vector(cptpf
->pdev
, RVU_PF_INT_VEC_VFME1
);
130 free_irq(vector
, cptpf
);
133 static void cptpf_flr_wq_handler(struct work_struct
*work
)
135 struct cptpf_flr_work
*flr_work
;
136 struct otx2_cptpf_dev
*pf
;
137 struct mbox_msghdr
*req
;
138 struct otx2_mbox
*mbox
;
141 flr_work
= container_of(work
, struct cptpf_flr_work
, work
);
143 mbox
= &pf
->afpf_mbox
;
145 vf
= flr_work
- pf
->flr_work
;
147 mutex_lock(&pf
->lock
);
148 req
= otx2_mbox_alloc_msg_rsp(mbox
, 0, sizeof(*req
),
149 sizeof(struct msg_rsp
));
151 mutex_unlock(&pf
->lock
);
155 req
->sig
= OTX2_MBOX_REQ_SIG
;
156 req
->id
= MBOX_MSG_VF_FLR
;
157 req
->pcifunc
&= RVU_PFVF_FUNC_MASK
;
158 req
->pcifunc
|= (vf
+ 1) & RVU_PFVF_FUNC_MASK
;
160 otx2_cpt_send_mbox_msg(mbox
, pf
->pdev
);
161 if (!otx2_cpt_sync_mbox_msg(&pf
->afpf_mbox
)) {
167 /* Clear transaction pending register */
168 otx2_cpt_write64(pf
->reg_base
, BLKADDR_RVUM
, 0,
169 RVU_PF_VFTRPENDX(reg
), BIT_ULL(vf
));
170 otx2_cpt_write64(pf
->reg_base
, BLKADDR_RVUM
, 0,
171 RVU_PF_VFFLR_INT_ENA_W1SX(reg
), BIT_ULL(vf
));
173 mutex_unlock(&pf
->lock
);
176 static irqreturn_t
cptpf_vf_flr_intr(int __always_unused irq
, void *arg
)
178 int reg
, dev
, vf
, start_vf
, num_reg
= 1;
179 struct otx2_cptpf_dev
*cptpf
= arg
;
182 if (cptpf
->max_vfs
> 64)
185 for (reg
= 0; reg
< num_reg
; reg
++) {
186 intr
= otx2_cpt_read64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
187 RVU_PF_VFFLR_INTX(reg
));
191 for (vf
= 0; vf
< 64; vf
++) {
192 if (!(intr
& BIT_ULL(vf
)))
195 queue_work(cptpf
->flr_wq
, &cptpf
->flr_work
[dev
].work
);
196 /* Clear interrupt */
197 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
198 RVU_PF_VFFLR_INTX(reg
), BIT_ULL(vf
));
199 /* Disable the interrupt */
200 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
201 RVU_PF_VFFLR_INT_ENA_W1CX(reg
),
208 static irqreturn_t
cptpf_vf_me_intr(int __always_unused irq
, void *arg
)
210 struct otx2_cptpf_dev
*cptpf
= arg
;
211 int reg
, vf
, num_reg
= 1;
214 if (cptpf
->max_vfs
> 64)
217 for (reg
= 0; reg
< num_reg
; reg
++) {
218 intr
= otx2_cpt_read64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
219 RVU_PF_VFME_INTX(reg
));
222 for (vf
= 0; vf
< 64; vf
++) {
223 if (!(intr
& BIT_ULL(vf
)))
225 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
226 RVU_PF_VFTRPENDX(reg
), BIT_ULL(vf
));
227 /* Clear interrupt */
228 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
229 RVU_PF_VFME_INTX(reg
), BIT_ULL(vf
));
235 static void cptpf_unregister_vfpf_intr(struct otx2_cptpf_dev
*cptpf
,
238 cptpf_disable_vfpf_mbox_intr(cptpf
, num_vfs
);
239 cptpf_disable_vf_flr_me_intrs(cptpf
, num_vfs
);
242 static int cptpf_register_vfpf_intr(struct otx2_cptpf_dev
*cptpf
, int num_vfs
)
244 struct pci_dev
*pdev
= cptpf
->pdev
;
245 struct device
*dev
= &pdev
->dev
;
248 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFPF_MBOX0
);
249 /* Register VF-PF mailbox interrupt handler */
250 ret
= request_irq(vector
, otx2_cptpf_vfpf_mbox_intr
, 0, "CPTVFPF Mbox0",
254 "IRQ registration failed for PFVF mbox0 irq\n");
257 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFFLR0
);
258 /* Register VF FLR interrupt handler */
259 ret
= request_irq(vector
, cptpf_vf_flr_intr
, 0, "CPTPF FLR0", cptpf
);
262 "IRQ registration failed for VFFLR0 irq\n");
265 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFME0
);
266 /* Register VF ME interrupt handler */
267 ret
= request_irq(vector
, cptpf_vf_me_intr
, 0, "CPTPF ME0", cptpf
);
270 "IRQ registration failed for PFVF mbox0 irq\n");
275 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFPF_MBOX1
);
276 ret
= request_irq(vector
, otx2_cptpf_vfpf_mbox_intr
, 0,
277 "CPTVFPF Mbox1", cptpf
);
280 "IRQ registration failed for PFVF mbox1 irq\n");
283 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFFLR1
);
284 /* Register VF FLR interrupt handler */
285 ret
= request_irq(vector
, cptpf_vf_flr_intr
, 0, "CPTPF FLR1",
289 "IRQ registration failed for VFFLR1 irq\n");
292 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFME1
);
293 /* Register VF FLR interrupt handler */
294 ret
= request_irq(vector
, cptpf_vf_me_intr
, 0, "CPTPF ME1",
298 "IRQ registration failed for VFFLR1 irq\n");
302 cptpf_enable_vfpf_mbox_intr(cptpf
, num_vfs
);
303 cptpf_enable_vf_flr_me_intrs(cptpf
, num_vfs
);
308 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFFLR1
);
309 free_irq(vector
, cptpf
);
311 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFPF_MBOX1
);
312 free_irq(vector
, cptpf
);
314 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFME0
);
315 free_irq(vector
, cptpf
);
317 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFFLR0
);
318 free_irq(vector
, cptpf
);
320 vector
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_VFPF_MBOX0
);
321 free_irq(vector
, cptpf
);
325 static void cptpf_flr_wq_destroy(struct otx2_cptpf_dev
*pf
)
329 destroy_workqueue(pf
->flr_wq
);
334 static int cptpf_flr_wq_init(struct otx2_cptpf_dev
*cptpf
, int num_vfs
)
338 cptpf
->flr_wq
= alloc_ordered_workqueue("cptpf_flr_wq", 0);
342 cptpf
->flr_work
= kcalloc(num_vfs
, sizeof(struct cptpf_flr_work
),
344 if (!cptpf
->flr_work
)
347 for (vf
= 0; vf
< num_vfs
; vf
++) {
348 cptpf
->flr_work
[vf
].pf
= cptpf
;
349 INIT_WORK(&cptpf
->flr_work
[vf
].work
, cptpf_flr_wq_handler
);
354 destroy_workqueue(cptpf
->flr_wq
);
358 static int cptpf_vfpf_mbox_init(struct otx2_cptpf_dev
*cptpf
, int num_vfs
)
360 struct device
*dev
= &cptpf
->pdev
->dev
;
364 cptpf
->vfpf_mbox_wq
=
365 alloc_ordered_workqueue("cpt_vfpf_mailbox",
366 WQ_HIGHPRI
| WQ_MEM_RECLAIM
);
367 if (!cptpf
->vfpf_mbox_wq
)
370 /* Map VF-PF mailbox memory */
371 if (test_bit(CN10K_MBOX
, &cptpf
->cap_flag
))
372 vfpf_mbox_base
= readq(cptpf
->reg_base
+ RVU_PF_VF_MBOX_ADDR
);
374 vfpf_mbox_base
= readq(cptpf
->reg_base
+ RVU_PF_VF_BAR4_ADDR
);
376 if (!vfpf_mbox_base
) {
377 dev_err(dev
, "VF-PF mailbox address not configured\n");
381 cptpf
->vfpf_mbox_base
= devm_ioremap_wc(dev
, vfpf_mbox_base
,
382 MBOX_SIZE
* cptpf
->max_vfs
);
383 if (!cptpf
->vfpf_mbox_base
) {
384 dev_err(dev
, "Mapping of VF-PF mailbox address failed\n");
388 err
= otx2_mbox_init(&cptpf
->vfpf_mbox
, cptpf
->vfpf_mbox_base
,
389 cptpf
->pdev
, cptpf
->reg_base
, MBOX_DIR_PFVF
,
394 for (i
= 0; i
< num_vfs
; i
++) {
395 cptpf
->vf
[i
].vf_id
= i
;
396 cptpf
->vf
[i
].cptpf
= cptpf
;
397 cptpf
->vf
[i
].intr_idx
= i
% 64;
398 INIT_WORK(&cptpf
->vf
[i
].vfpf_mbox_work
,
399 otx2_cptpf_vfpf_mbox_handler
);
404 destroy_workqueue(cptpf
->vfpf_mbox_wq
);
408 static void cptpf_vfpf_mbox_destroy(struct otx2_cptpf_dev
*cptpf
)
410 destroy_workqueue(cptpf
->vfpf_mbox_wq
);
411 otx2_mbox_destroy(&cptpf
->vfpf_mbox
);
414 static void cptpf_disable_afpf_mbox_intr(struct otx2_cptpf_dev
*cptpf
)
416 /* Disable AF-PF interrupt */
417 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_INT_ENA_W1C
,
419 /* Clear interrupt if any */
420 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_INT
, 0x1ULL
);
423 static int cptpf_register_afpf_mbox_intr(struct otx2_cptpf_dev
*cptpf
)
425 struct pci_dev
*pdev
= cptpf
->pdev
;
426 struct device
*dev
= &pdev
->dev
;
429 irq
= pci_irq_vector(pdev
, RVU_PF_INT_VEC_AFPF_MBOX
);
430 /* Register AF-PF mailbox interrupt handler */
431 ret
= devm_request_irq(dev
, irq
, otx2_cptpf_afpf_mbox_intr
, 0,
432 "CPTAFPF Mbox", cptpf
);
435 "IRQ registration failed for PFAF mbox irq\n");
438 /* Clear interrupt if any, to avoid spurious interrupts */
439 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_INT
, 0x1ULL
);
440 /* Enable AF-PF interrupt */
441 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_INT_ENA_W1S
,
444 ret
= otx2_cpt_send_ready_msg(&cptpf
->afpf_mbox
, cptpf
->pdev
);
447 "AF not responding to mailbox, deferring probe\n");
448 cptpf_disable_afpf_mbox_intr(cptpf
);
449 return -EPROBE_DEFER
;
454 static int cptpf_afpf_mbox_init(struct otx2_cptpf_dev
*cptpf
)
456 struct pci_dev
*pdev
= cptpf
->pdev
;
457 resource_size_t offset
;
460 cptpf
->afpf_mbox_wq
=
461 alloc_ordered_workqueue("cpt_afpf_mailbox",
462 WQ_HIGHPRI
| WQ_MEM_RECLAIM
);
463 if (!cptpf
->afpf_mbox_wq
)
466 offset
= pci_resource_start(pdev
, PCI_MBOX_BAR_NUM
);
467 /* Map AF-PF mailbox memory */
468 cptpf
->afpf_mbox_base
= devm_ioremap_wc(&pdev
->dev
, offset
, MBOX_SIZE
);
469 if (!cptpf
->afpf_mbox_base
) {
470 dev_err(&pdev
->dev
, "Unable to map BAR4\n");
475 err
= otx2_mbox_init(&cptpf
->afpf_mbox
, cptpf
->afpf_mbox_base
,
476 pdev
, cptpf
->reg_base
, MBOX_DIR_PFAF
, 1);
480 err
= otx2_mbox_init(&cptpf
->afpf_mbox_up
, cptpf
->afpf_mbox_base
,
481 pdev
, cptpf
->reg_base
, MBOX_DIR_PFAF_UP
, 1);
485 INIT_WORK(&cptpf
->afpf_mbox_work
, otx2_cptpf_afpf_mbox_handler
);
486 INIT_WORK(&cptpf
->afpf_mbox_up_work
, otx2_cptpf_afpf_mbox_up_handler
);
487 mutex_init(&cptpf
->lock
);
492 otx2_mbox_destroy(&cptpf
->afpf_mbox
);
494 destroy_workqueue(cptpf
->afpf_mbox_wq
);
498 static void cptpf_afpf_mbox_destroy(struct otx2_cptpf_dev
*cptpf
)
500 destroy_workqueue(cptpf
->afpf_mbox_wq
);
501 otx2_mbox_destroy(&cptpf
->afpf_mbox
);
502 otx2_mbox_destroy(&cptpf
->afpf_mbox_up
);
505 static ssize_t
sso_pf_func_ovrd_show(struct device
*dev
,
506 struct device_attribute
*attr
, char *buf
)
508 struct otx2_cptpf_dev
*cptpf
= dev_get_drvdata(dev
);
510 return sprintf(buf
, "%d\n", cptpf
->sso_pf_func_ovrd
);
513 static ssize_t
sso_pf_func_ovrd_store(struct device
*dev
,
514 struct device_attribute
*attr
,
515 const char *buf
, size_t count
)
517 struct otx2_cptpf_dev
*cptpf
= dev_get_drvdata(dev
);
520 if (!(cptpf
->pdev
->revision
== CPT_UC_RID_CN9K_B0
))
523 if (kstrtou8(buf
, 0, &sso_pf_func_ovrd
))
526 cptpf
->sso_pf_func_ovrd
= sso_pf_func_ovrd
;
531 static ssize_t
kvf_limits_show(struct device
*dev
,
532 struct device_attribute
*attr
, char *buf
)
534 struct otx2_cptpf_dev
*cptpf
= dev_get_drvdata(dev
);
536 return sprintf(buf
, "%d\n", cptpf
->kvf_limits
);
539 static ssize_t
kvf_limits_store(struct device
*dev
,
540 struct device_attribute
*attr
,
541 const char *buf
, size_t count
)
543 struct otx2_cptpf_dev
*cptpf
= dev_get_drvdata(dev
);
547 ret
= kstrtoint(buf
, 0, &lfs_num
);
550 if (lfs_num
< 1 || lfs_num
> num_online_cpus()) {
551 dev_err(dev
, "lfs count %d must be in range [1 - %d]\n",
552 lfs_num
, num_online_cpus());
555 cptpf
->kvf_limits
= lfs_num
;
560 static DEVICE_ATTR_RW(kvf_limits
);
561 static DEVICE_ATTR_RW(sso_pf_func_ovrd
);
563 static struct attribute
*cptpf_attrs
[] = {
564 &dev_attr_kvf_limits
.attr
,
565 &dev_attr_sso_pf_func_ovrd
.attr
,
569 static const struct attribute_group cptpf_sysfs_group
= {
570 .attrs
= cptpf_attrs
,
573 static int cpt_is_pf_usable(struct otx2_cptpf_dev
*cptpf
)
577 rev
= otx2_cpt_read64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
578 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM
));
579 rev
= (rev
>> 12) & 0xFF;
581 * Check if AF has setup revision for RVUM block, otherwise
582 * driver probe should be deferred until AF driver comes up
585 dev_warn(&cptpf
->pdev
->dev
,
586 "AF is not initialized, deferring probe\n");
587 return -EPROBE_DEFER
;
592 static void cptpf_get_rid(struct pci_dev
*pdev
, struct otx2_cptpf_dev
*cptpf
)
594 struct otx2_cpt_eng_grps
*eng_grps
= &cptpf
->eng_grps
;
597 if (is_dev_otx2(pdev
)) {
598 eng_grps
->rid
= pdev
->revision
;
601 otx2_cpt_read_af_reg(&cptpf
->afpf_mbox
, pdev
, CPT_AF_CTL
, ®_val
,
603 if ((cpt_feature_sgv2(pdev
) && (reg_val
& BIT_ULL(18))) ||
604 is_dev_cn10ka_ax(pdev
))
605 eng_grps
->rid
= CPT_UC_RID_CN10K_A
;
606 else if (cpt_feature_sgv2(pdev
))
607 eng_grps
->rid
= CPT_UC_RID_CN10K_B
;
610 static void cptpf_check_block_implemented(struct otx2_cptpf_dev
*cptpf
)
614 cfg
= otx2_cpt_read64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
615 RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_CPT1
));
616 if (cfg
& BIT_ULL(11))
617 cptpf
->has_cpt1
= true;
620 static int cptpf_device_init(struct otx2_cptpf_dev
*cptpf
)
622 union otx2_cptx_af_constants1 af_cnsts1
= {0};
625 /* check if 'implemented' bit is set for block BLKADDR_CPT1 */
626 cptpf_check_block_implemented(cptpf
);
628 /* Get number of SE, IE and AE engines */
629 ret
= otx2_cpt_read_af_reg(&cptpf
->afpf_mbox
, cptpf
->pdev
,
630 CPT_AF_CONSTANTS1
, &af_cnsts1
.u
,
635 cptpf
->eng_grps
.avail
.max_se_cnt
= af_cnsts1
.s
.se
;
636 cptpf
->eng_grps
.avail
.max_ie_cnt
= af_cnsts1
.s
.ie
;
637 cptpf
->eng_grps
.avail
.max_ae_cnt
= af_cnsts1
.s
.ae
;
639 /* Disable all cores */
640 ret
= otx2_cpt_disable_all_cores(cptpf
);
645 static int cptpf_sriov_disable(struct pci_dev
*pdev
)
647 struct otx2_cptpf_dev
*cptpf
= pci_get_drvdata(pdev
);
648 int num_vfs
= pci_num_vf(pdev
);
653 pci_disable_sriov(pdev
);
654 cptpf_unregister_vfpf_intr(cptpf
, num_vfs
);
655 cptpf_flr_wq_destroy(cptpf
);
656 cptpf_vfpf_mbox_destroy(cptpf
);
657 module_put(THIS_MODULE
);
658 cptpf
->enabled_vfs
= 0;
663 static int cptpf_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
665 struct otx2_cptpf_dev
*cptpf
= pci_get_drvdata(pdev
);
668 /* Initialize VF<=>PF mailbox */
669 ret
= cptpf_vfpf_mbox_init(cptpf
, num_vfs
);
673 ret
= cptpf_flr_wq_init(cptpf
, num_vfs
);
676 /* Register VF<=>PF mailbox interrupt */
677 ret
= cptpf_register_vfpf_intr(cptpf
, num_vfs
);
681 cptpf_get_rid(pdev
, cptpf
);
682 /* Get CPT HW capabilities using LOAD_FVC operation. */
683 ret
= otx2_cpt_discover_eng_capabilities(cptpf
);
687 ret
= otx2_cpt_create_eng_grps(cptpf
, &cptpf
->eng_grps
);
691 cptpf
->enabled_vfs
= num_vfs
;
692 ret
= pci_enable_sriov(pdev
, num_vfs
);
696 dev_notice(&cptpf
->pdev
->dev
, "VFs enabled: %d\n", num_vfs
);
698 try_module_get(THIS_MODULE
);
702 cptpf_unregister_vfpf_intr(cptpf
, num_vfs
);
703 cptpf
->enabled_vfs
= 0;
705 cptpf_flr_wq_destroy(cptpf
);
707 cptpf_vfpf_mbox_destroy(cptpf
);
711 static int otx2_cptpf_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
714 return cptpf_sriov_enable(pdev
, num_vfs
);
716 return cptpf_sriov_disable(pdev
);
720 static int otx2_cptpf_probe(struct pci_dev
*pdev
,
721 const struct pci_device_id
*ent
)
723 struct device
*dev
= &pdev
->dev
;
724 struct otx2_cptpf_dev
*cptpf
;
727 cptpf
= devm_kzalloc(dev
, sizeof(*cptpf
), GFP_KERNEL
);
731 err
= pcim_enable_device(pdev
);
733 dev_err(dev
, "Failed to enable PCI device\n");
737 err
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(48));
739 dev_err(dev
, "Unable to get usable DMA configuration\n");
742 err
= pcim_request_all_regions(pdev
, OTX2_CPT_DRV_NAME
);
744 dev_err(dev
, "Couldn't request PCI resources 0x%x\n", err
);
747 pci_set_master(pdev
);
748 pci_set_drvdata(pdev
, cptpf
);
751 /* Map PF's configuration registers */
752 cptpf
->reg_base
= pcim_iomap(pdev
, PCI_PF_REG_BAR_NUM
, 0);
753 if (!cptpf
->reg_base
) {
755 dev_err(dev
, "Couldn't ioremap PCI resource 0x%x\n", err
);
759 /* Check if AF driver is up, otherwise defer probe */
760 err
= cpt_is_pf_usable(cptpf
);
764 num_vec
= pci_msix_vec_count(cptpf
->pdev
);
770 err
= pci_alloc_irq_vectors(pdev
, num_vec
, num_vec
, PCI_IRQ_MSIX
);
772 dev_err(dev
, "Request for %d msix vectors failed\n",
776 otx2_cpt_set_hw_caps(pdev
, &cptpf
->cap_flag
);
777 /* Initialize AF-PF mailbox */
778 err
= cptpf_afpf_mbox_init(cptpf
);
781 /* Register mailbox interrupt */
782 err
= cptpf_register_afpf_mbox_intr(cptpf
);
784 goto destroy_afpf_mbox
;
786 cptpf
->max_vfs
= pci_sriov_get_totalvfs(pdev
);
787 cptpf
->kvf_limits
= 1;
789 err
= cn10k_cptpf_lmtst_init(cptpf
);
791 goto unregister_intr
;
793 /* Initialize CPT PF device */
794 err
= cptpf_device_init(cptpf
);
796 goto unregister_intr
;
798 /* Initialize engine groups */
799 err
= otx2_cpt_init_eng_grps(pdev
, &cptpf
->eng_grps
);
801 goto unregister_intr
;
803 err
= sysfs_create_group(&dev
->kobj
, &cptpf_sysfs_group
);
805 goto cleanup_eng_grps
;
807 err
= otx2_cpt_register_dl(cptpf
);
814 sysfs_remove_group(&dev
->kobj
, &cptpf_sysfs_group
);
816 otx2_cpt_cleanup_eng_grps(pdev
, &cptpf
->eng_grps
);
818 cptpf_disable_afpf_mbox_intr(cptpf
);
820 cptpf_afpf_mbox_destroy(cptpf
);
822 pci_set_drvdata(pdev
, NULL
);
826 static void otx2_cptpf_remove(struct pci_dev
*pdev
)
828 struct otx2_cptpf_dev
*cptpf
= pci_get_drvdata(pdev
);
833 cptpf_sriov_disable(pdev
);
834 otx2_cpt_unregister_dl(cptpf
);
836 /* Cleanup Inline CPT LF's if attached */
837 if (cptpf
->lfs
.lfs_num
)
838 otx2_inline_cptlf_cleanup(&cptpf
->lfs
);
840 if (cptpf
->cpt1_lfs
.lfs_num
)
841 otx2_inline_cptlf_cleanup(&cptpf
->cpt1_lfs
);
843 /* Delete sysfs entry created for kernel VF limits */
844 sysfs_remove_group(&pdev
->dev
.kobj
, &cptpf_sysfs_group
);
845 /* Cleanup engine groups */
846 otx2_cpt_cleanup_eng_grps(pdev
, &cptpf
->eng_grps
);
847 /* Disable AF-PF mailbox interrupt */
848 cptpf_disable_afpf_mbox_intr(cptpf
);
849 /* Destroy AF-PF mbox */
850 cptpf_afpf_mbox_destroy(cptpf
);
851 pci_set_drvdata(pdev
, NULL
);
854 /* Supported devices */
855 static const struct pci_device_id otx2_cpt_id_table
[] = {
856 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OTX2_CPT_PCI_PF_DEVICE_ID
) },
857 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, CN10K_CPT_PCI_PF_DEVICE_ID
) },
858 { 0, } /* end of table */
861 static struct pci_driver otx2_cpt_pci_driver
= {
862 .name
= OTX2_CPT_DRV_NAME
,
863 .id_table
= otx2_cpt_id_table
,
864 .probe
= otx2_cptpf_probe
,
865 .remove
= otx2_cptpf_remove
,
866 .sriov_configure
= otx2_cptpf_sriov_configure
869 module_pci_driver(otx2_cpt_pci_driver
);
871 MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT");
873 MODULE_AUTHOR("Marvell");
874 MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING
);
875 MODULE_LICENSE("GPL v2");
876 MODULE_DEVICE_TABLE(pci
, otx2_cpt_id_table
);