1 // SPDX-License-Identifier: GPL-2.0-only
3 * SolidRun DPU driver for control plane
5 * Copyright (C) 2022-2023 SolidRun
7 * Author: Alvaro Karsz <alvaro.karsz@solid-run.com>
10 #include <linux/iopoll.h>
12 #include "snet_vdpa.h"
14 /* SNET DPU device ID */
15 #define SNET_DEVICE_ID 0x1000
17 #define SNET_SIGNATURE 0xD0D06363
18 /* Max. config version that we can work with */
19 #define SNET_CFG_VERSION 0x2
21 #define SNET_QUEUE_ALIGNMENT PAGE_SIZE
22 /* Kick value to notify that new data is available */
23 #define SNET_KICK_VAL 0x1
24 #define SNET_CONFIG_OFF 0x0
25 /* How long we are willing to wait for a SNET device */
26 #define SNET_DETECT_TIMEOUT 5000000
27 /* How long should we wait for the DPU to read our config */
28 #define SNET_READ_CFG_TIMEOUT 3000000
29 /* Size of configs written to the DPU */
30 #define SNET_GENERAL_CFG_LEN 36
31 #define SNET_GENERAL_CFG_VQ_LEN 40
33 static struct snet
*vdpa_to_snet(struct vdpa_device
*vdpa
)
35 return container_of(vdpa
, struct snet
, vdpa
);
38 static irqreturn_t
snet_cfg_irq_hndlr(int irq
, void *data
)
40 struct snet
*snet
= data
;
41 /* Call callback if any */
42 if (likely(snet
->cb
.callback
))
43 return snet
->cb
.callback(snet
->cb
.private);
48 static irqreturn_t
snet_vq_irq_hndlr(int irq
, void *data
)
50 struct snet_vq
*vq
= data
;
51 /* Call callback if any */
52 if (likely(vq
->cb
.callback
))
53 return vq
->cb
.callback(vq
->cb
.private);
58 static void snet_free_irqs(struct snet
*snet
)
60 struct psnet
*psnet
= snet
->psnet
;
64 /* Which Device allcoated the IRQs? */
65 if (PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_IRQ_PF
))
66 pdev
= snet
->pdev
->physfn
;
70 /* Free config's IRQ */
71 if (snet
->cfg_irq
!= -1) {
72 devm_free_irq(&pdev
->dev
, snet
->cfg_irq
, snet
);
76 for (i
= 0; i
< snet
->cfg
->vq_num
; i
++) {
77 if (snet
->vqs
[i
] && snet
->vqs
[i
]->irq
!= -1) {
78 devm_free_irq(&pdev
->dev
, snet
->vqs
[i
]->irq
, snet
->vqs
[i
]);
79 snet
->vqs
[i
]->irq
= -1;
83 /* IRQ vectors are freed when the pci remove callback is called */
86 static int snet_set_vq_address(struct vdpa_device
*vdev
, u16 idx
, u64 desc_area
,
87 u64 driver_area
, u64 device_area
)
89 struct snet
*snet
= vdpa_to_snet(vdev
);
90 /* save received parameters in vqueue sturct */
91 snet
->vqs
[idx
]->desc_area
= desc_area
;
92 snet
->vqs
[idx
]->driver_area
= driver_area
;
93 snet
->vqs
[idx
]->device_area
= device_area
;
98 static void snet_set_vq_num(struct vdpa_device
*vdev
, u16 idx
, u32 num
)
100 struct snet
*snet
= vdpa_to_snet(vdev
);
101 /* save num in vqueue */
102 snet
->vqs
[idx
]->num
= num
;
105 static void snet_kick_vq(struct vdpa_device
*vdev
, u16 idx
)
107 struct snet
*snet
= vdpa_to_snet(vdev
);
108 /* not ready - ignore */
109 if (unlikely(!snet
->vqs
[idx
]->ready
))
112 iowrite32(SNET_KICK_VAL
, snet
->vqs
[idx
]->kick_ptr
);
115 static void snet_kick_vq_with_data(struct vdpa_device
*vdev
, u32 data
)
117 struct snet
*snet
= vdpa_to_snet(vdev
);
118 u16 idx
= data
& 0xFFFF;
120 /* not ready - ignore */
121 if (unlikely(!snet
->vqs
[idx
]->ready
))
124 iowrite32((data
& 0xFFFF0000) | SNET_KICK_VAL
, snet
->vqs
[idx
]->kick_ptr
);
127 static void snet_set_vq_cb(struct vdpa_device
*vdev
, u16 idx
, struct vdpa_callback
*cb
)
129 struct snet
*snet
= vdpa_to_snet(vdev
);
131 snet
->vqs
[idx
]->cb
.callback
= cb
->callback
;
132 snet
->vqs
[idx
]->cb
.private = cb
->private;
135 static void snet_set_vq_ready(struct vdpa_device
*vdev
, u16 idx
, bool ready
)
137 struct snet
*snet
= vdpa_to_snet(vdev
);
139 snet
->vqs
[idx
]->ready
= ready
;
142 static bool snet_get_vq_ready(struct vdpa_device
*vdev
, u16 idx
)
144 struct snet
*snet
= vdpa_to_snet(vdev
);
146 return snet
->vqs
[idx
]->ready
;
149 static bool snet_vq_state_is_initial(struct snet
*snet
, const struct vdpa_vq_state
*state
)
151 if (SNET_HAS_FEATURE(snet
, VIRTIO_F_RING_PACKED
)) {
152 const struct vdpa_vq_state_packed
*p
= &state
->packed
;
154 if (p
->last_avail_counter
== 1 && p
->last_used_counter
== 1 &&
155 p
->last_avail_idx
== 0 && p
->last_used_idx
== 0)
158 const struct vdpa_vq_state_split
*s
= &state
->split
;
160 if (s
->avail_index
== 0)
167 static int snet_set_vq_state(struct vdpa_device
*vdev
, u16 idx
, const struct vdpa_vq_state
*state
)
169 struct snet
*snet
= vdpa_to_snet(vdev
);
171 /* We can set any state for config version 2+ */
172 if (SNET_CFG_VER(snet
, 2)) {
173 memcpy(&snet
->vqs
[idx
]->vq_state
, state
, sizeof(*state
));
177 /* Older config - we can't set the VQ state.
178 * Return 0 only if this is the initial state we use in the DPU.
180 if (snet_vq_state_is_initial(snet
, state
))
186 static int snet_get_vq_state(struct vdpa_device
*vdev
, u16 idx
, struct vdpa_vq_state
*state
)
188 struct snet
*snet
= vdpa_to_snet(vdev
);
190 return snet_read_vq_state(snet
, idx
, state
);
193 static int snet_get_vq_irq(struct vdpa_device
*vdev
, u16 idx
)
195 struct snet
*snet
= vdpa_to_snet(vdev
);
197 return snet
->vqs
[idx
]->irq
;
200 static u32
snet_get_vq_align(struct vdpa_device
*vdev
)
202 return (u32
)SNET_QUEUE_ALIGNMENT
;
205 static int snet_reset_dev(struct snet
*snet
)
207 struct pci_dev
*pdev
= snet
->pdev
;
211 /* If status is 0, nothing to do */
215 /* If DPU started, destroy it */
216 if (snet
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)
217 ret
= snet_destroy_dev(snet
);
220 for (i
= 0; i
< snet
->cfg
->vq_num
; i
++) {
223 snet
->vqs
[i
]->cb
.callback
= NULL
;
224 snet
->vqs
[i
]->cb
.private = NULL
;
225 snet
->vqs
[i
]->desc_area
= 0;
226 snet
->vqs
[i
]->device_area
= 0;
227 snet
->vqs
[i
]->driver_area
= 0;
228 snet
->vqs
[i
]->ready
= false;
231 /* Clear config callback */
232 snet
->cb
.callback
= NULL
;
233 snet
->cb
.private = NULL
;
235 snet_free_irqs(snet
);
238 snet
->dpu_ready
= false;
241 SNET_WARN(pdev
, "Incomplete reset to SNET[%u] device, err: %d\n", snet
->sid
, ret
);
243 SNET_DBG(pdev
, "Reset SNET[%u] device\n", snet
->sid
);
248 static int snet_reset(struct vdpa_device
*vdev
)
250 struct snet
*snet
= vdpa_to_snet(vdev
);
252 return snet_reset_dev(snet
);
255 static size_t snet_get_config_size(struct vdpa_device
*vdev
)
257 struct snet
*snet
= vdpa_to_snet(vdev
);
259 return (size_t)snet
->cfg
->cfg_size
;
262 static u64
snet_get_features(struct vdpa_device
*vdev
)
264 struct snet
*snet
= vdpa_to_snet(vdev
);
266 return snet
->cfg
->features
;
269 static int snet_set_drv_features(struct vdpa_device
*vdev
, u64 features
)
271 struct snet
*snet
= vdpa_to_snet(vdev
);
273 snet
->negotiated_features
= snet
->cfg
->features
& features
;
277 static u64
snet_get_drv_features(struct vdpa_device
*vdev
)
279 struct snet
*snet
= vdpa_to_snet(vdev
);
281 return snet
->negotiated_features
;
284 static u16
snet_get_vq_num_max(struct vdpa_device
*vdev
)
286 struct snet
*snet
= vdpa_to_snet(vdev
);
288 return (u16
)snet
->cfg
->vq_size
;
291 static void snet_set_config_cb(struct vdpa_device
*vdev
, struct vdpa_callback
*cb
)
293 struct snet
*snet
= vdpa_to_snet(vdev
);
295 snet
->cb
.callback
= cb
->callback
;
296 snet
->cb
.private = cb
->private;
299 static u32
snet_get_device_id(struct vdpa_device
*vdev
)
301 struct snet
*snet
= vdpa_to_snet(vdev
);
303 return snet
->cfg
->virtio_id
;
306 static u32
snet_get_vendor_id(struct vdpa_device
*vdev
)
308 return (u32
)PCI_VENDOR_ID_SOLIDRUN
;
311 static u8
snet_get_status(struct vdpa_device
*vdev
)
313 struct snet
*snet
= vdpa_to_snet(vdev
);
318 static int snet_write_conf(struct snet
*snet
)
323 /* No need to write the config twice */
329 * General data: SNET_GENERAL_CFG_LEN bytes long
330 * 0 0x4 0x8 0xC 0x10 0x14 0x1C 0x24
331 * | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES | RSVD |
333 * For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long
335 * | VQ SID AND QUEUE SIZE | IRQ Index |
339 * | VQ STATE (CFG 2+) | RSVD |
341 * Magic number should be written last, this is the DPU indication that the data is ready
345 off
= snet
->psnet
->cfg
.host_cfg_off
;
347 /* Ignore magic number for now */
349 snet_write32(snet
, off
, snet
->psnet
->negotiated_cfg_ver
);
351 snet_write32(snet
, off
, snet
->sid
);
353 snet_write32(snet
, off
, snet
->cfg
->vq_num
);
355 snet_write32(snet
, off
, snet
->cfg_irq_idx
);
357 snet_write64(snet
, off
, snet
->negotiated_features
);
359 /* Ignore reserved */
362 for (i
= 0 ; i
< snet
->cfg
->vq_num
; i
++) {
363 tmp
= (i
<< 16) | (snet
->vqs
[i
]->num
& 0xFFFF);
364 snet_write32(snet
, off
, tmp
);
366 snet_write32(snet
, off
, snet
->vqs
[i
]->irq_idx
);
368 snet_write64(snet
, off
, snet
->vqs
[i
]->desc_area
);
370 snet_write64(snet
, off
, snet
->vqs
[i
]->device_area
);
372 snet_write64(snet
, off
, snet
->vqs
[i
]->driver_area
);
374 /* Write VQ state if config version is 2+ */
375 if (SNET_CFG_VER(snet
, 2))
376 snet_write32(snet
, off
, *(u32
*)&snet
->vqs
[i
]->vq_state
);
379 /* Ignore reserved */
383 /* Write magic number - data is ready */
384 snet_write32(snet
, snet
->psnet
->cfg
.host_cfg_off
, SNET_SIGNATURE
);
386 /* The DPU will ACK the config by clearing the signature */
387 ret
= readx_poll_timeout(ioread32
, snet
->bar
+ snet
->psnet
->cfg
.host_cfg_off
,
388 tmp
, !tmp
, 10, SNET_READ_CFG_TIMEOUT
);
390 SNET_ERR(snet
->pdev
, "Timeout waiting for the DPU to read the config\n");
395 snet
->dpu_ready
= true;
400 static int snet_request_irqs(struct pci_dev
*pdev
, struct snet
*snet
)
404 /* Request config IRQ */
405 irq
= pci_irq_vector(pdev
, snet
->cfg_irq_idx
);
406 ret
= devm_request_irq(&pdev
->dev
, irq
, snet_cfg_irq_hndlr
, 0,
407 snet
->cfg_irq_name
, snet
);
409 SNET_ERR(pdev
, "Failed to request IRQ\n");
414 /* Request IRQ for every VQ */
415 for (i
= 0; i
< snet
->cfg
->vq_num
; i
++) {
416 irq
= pci_irq_vector(pdev
, snet
->vqs
[i
]->irq_idx
);
417 ret
= devm_request_irq(&pdev
->dev
, irq
, snet_vq_irq_hndlr
, 0,
418 snet
->vqs
[i
]->irq_name
, snet
->vqs
[i
]);
420 SNET_ERR(pdev
, "Failed to request IRQ\n");
423 snet
->vqs
[i
]->irq
= irq
;
428 static void snet_set_status(struct vdpa_device
*vdev
, u8 status
)
430 struct snet
*snet
= vdpa_to_snet(vdev
);
431 struct psnet
*psnet
= snet
->psnet
;
432 struct pci_dev
*pdev
= snet
->pdev
;
436 if (status
== snet
->status
)
439 if ((status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
440 !(snet
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
442 pf_irqs
= PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_IRQ_PF
);
443 ret
= snet_request_irqs(pf_irqs
? pdev
->physfn
: pdev
, snet
);
447 /* Write config to the DPU */
448 if (snet_write_conf(snet
)) {
449 SNET_INFO(pdev
, "Create SNET[%u] device\n", snet
->sid
);
451 snet_free_irqs(snet
);
456 /* Save the new status */
457 snet
->status
= status
;
461 snet
->status
|= VIRTIO_CONFIG_S_FAILED
;
464 static void snet_get_config(struct vdpa_device
*vdev
, unsigned int offset
,
465 void *buf
, unsigned int len
)
467 struct snet
*snet
= vdpa_to_snet(vdev
);
468 void __iomem
*cfg_ptr
= snet
->cfg
->virtio_cfg
+ offset
;
472 /* check for offset error */
473 if (offset
+ len
> snet
->cfg
->cfg_size
)
476 /* Write into buffer */
477 for (i
= 0; i
< len
; i
++)
478 *buf_ptr
++ = ioread8(cfg_ptr
+ i
);
481 static void snet_set_config(struct vdpa_device
*vdev
, unsigned int offset
,
482 const void *buf
, unsigned int len
)
484 struct snet
*snet
= vdpa_to_snet(vdev
);
485 void __iomem
*cfg_ptr
= snet
->cfg
->virtio_cfg
+ offset
;
486 const u8
*buf_ptr
= buf
;
489 /* check for offset error */
490 if (offset
+ len
> snet
->cfg
->cfg_size
)
493 /* Write into PCI BAR */
494 for (i
= 0; i
< len
; i
++)
495 iowrite8(*buf_ptr
++, cfg_ptr
+ i
);
498 static int snet_suspend(struct vdpa_device
*vdev
)
500 struct snet
*snet
= vdpa_to_snet(vdev
);
503 ret
= snet_suspend_dev(snet
);
505 SNET_ERR(snet
->pdev
, "SNET[%u] suspend failed, err: %d\n", snet
->sid
, ret
);
507 SNET_DBG(snet
->pdev
, "Suspend SNET[%u] device\n", snet
->sid
);
512 static int snet_resume(struct vdpa_device
*vdev
)
514 struct snet
*snet
= vdpa_to_snet(vdev
);
517 ret
= snet_resume_dev(snet
);
519 SNET_ERR(snet
->pdev
, "SNET[%u] resume failed, err: %d\n", snet
->sid
, ret
);
521 SNET_DBG(snet
->pdev
, "Resume SNET[%u] device\n", snet
->sid
);
526 static const struct vdpa_config_ops snet_config_ops
= {
527 .set_vq_address
= snet_set_vq_address
,
528 .set_vq_num
= snet_set_vq_num
,
529 .kick_vq
= snet_kick_vq
,
530 .kick_vq_with_data
= snet_kick_vq_with_data
,
531 .set_vq_cb
= snet_set_vq_cb
,
532 .set_vq_ready
= snet_set_vq_ready
,
533 .get_vq_ready
= snet_get_vq_ready
,
534 .set_vq_state
= snet_set_vq_state
,
535 .get_vq_state
= snet_get_vq_state
,
536 .get_vq_irq
= snet_get_vq_irq
,
537 .get_vq_align
= snet_get_vq_align
,
539 .get_config_size
= snet_get_config_size
,
540 .get_device_features
= snet_get_features
,
541 .set_driver_features
= snet_set_drv_features
,
542 .get_driver_features
= snet_get_drv_features
,
543 .get_vq_num_min
= snet_get_vq_num_max
,
544 .get_vq_num_max
= snet_get_vq_num_max
,
545 .set_config_cb
= snet_set_config_cb
,
546 .get_device_id
= snet_get_device_id
,
547 .get_vendor_id
= snet_get_vendor_id
,
548 .get_status
= snet_get_status
,
549 .set_status
= snet_set_status
,
550 .get_config
= snet_get_config
,
551 .set_config
= snet_set_config
,
552 .suspend
= snet_suspend
,
553 .resume
= snet_resume
,
556 static int psnet_open_pf_bar(struct pci_dev
*pdev
, struct psnet
*psnet
)
559 int ret
, i
, mask
= 0;
560 /* We don't know which BAR will be used to communicate..
561 * We will map every bar with len > 0.
563 * Later, we will discover the BAR and unmap all other BARs.
565 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
566 if (pci_resource_len(pdev
, i
))
570 /* No BAR can be used.. */
572 SNET_ERR(pdev
, "Failed to find a PCI BAR\n");
576 name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "psnet[%s]-bars", pci_name(pdev
));
580 ret
= pcim_iomap_regions(pdev
, mask
, name
);
582 SNET_ERR(pdev
, "Failed to request and map PCI BARs\n");
586 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
588 psnet
->bars
[i
] = pcim_iomap_table(pdev
)[i
];
594 static int snet_open_vf_bar(struct pci_dev
*pdev
, struct snet
*snet
)
599 name
= devm_kasprintf(&pdev
->dev
, GFP_KERNEL
, "snet[%s]-bars", pci_name(pdev
));
603 /* Request and map BAR */
604 ret
= pcim_iomap_regions(pdev
, BIT(snet
->psnet
->cfg
.vf_bar
), name
);
606 SNET_ERR(pdev
, "Failed to request and map PCI BAR for a VF\n");
610 snet
->bar
= pcim_iomap_table(pdev
)[snet
->psnet
->cfg
.vf_bar
];
615 static void snet_free_cfg(struct snet_cfg
*cfg
)
623 for (i
= 0; i
< cfg
->devices_num
; i
++) {
629 /* Free pointers to devices */
633 /* Detect which BAR is used for communication with the device. */
634 static int psnet_detect_bar(struct psnet
*psnet
, u32 off
)
636 unsigned long exit_time
;
639 exit_time
= jiffies
+ usecs_to_jiffies(SNET_DETECT_TIMEOUT
);
641 /* SNET DPU will write SNET's signature when the config is ready. */
642 while (time_before(jiffies
, exit_time
)) {
643 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
644 /* Is this BAR mapped? */
648 if (ioread32(psnet
->bars
[i
] + off
) == SNET_SIGNATURE
)
651 usleep_range(1000, 10000);
657 static void psnet_unmap_unused_bars(struct pci_dev
*pdev
, struct psnet
*psnet
)
661 for (i
= 0; i
< PCI_STD_NUM_BARS
; i
++) {
662 if (psnet
->bars
[i
] && i
!= psnet
->barno
)
667 pcim_iounmap_regions(pdev
, mask
);
670 /* Read SNET config from PCI BAR */
671 static int psnet_read_cfg(struct pci_dev
*pdev
, struct psnet
*psnet
)
673 struct snet_cfg
*cfg
= &psnet
->cfg
;
677 /* Move to where the config starts */
678 off
= SNET_CONFIG_OFF
;
680 /* Find BAR used for communication */
681 barno
= psnet_detect_bar(psnet
, off
);
683 SNET_ERR(pdev
, "SNET config is not ready.\n");
687 /* Save used BAR number and unmap all other BARs */
688 psnet
->barno
= barno
;
689 SNET_DBG(pdev
, "Using BAR number %d\n", barno
);
691 psnet_unmap_unused_bars(pdev
, psnet
);
693 /* load config from BAR */
694 cfg
->key
= psnet_read32(psnet
, off
);
696 cfg
->cfg_size
= psnet_read32(psnet
, off
);
698 cfg
->cfg_ver
= psnet_read32(psnet
, off
);
700 /* The negotiated config version is the lower one between this driver's config
703 psnet
->negotiated_cfg_ver
= min_t(u32
, cfg
->cfg_ver
, SNET_CFG_VERSION
);
704 SNET_DBG(pdev
, "SNET config version %u\n", psnet
->negotiated_cfg_ver
);
706 cfg
->vf_num
= psnet_read32(psnet
, off
);
708 cfg
->vf_bar
= psnet_read32(psnet
, off
);
710 cfg
->host_cfg_off
= psnet_read32(psnet
, off
);
712 cfg
->max_size_host_cfg
= psnet_read32(psnet
, off
);
714 cfg
->virtio_cfg_off
= psnet_read32(psnet
, off
);
716 cfg
->kick_off
= psnet_read32(psnet
, off
);
718 cfg
->hwmon_off
= psnet_read32(psnet
, off
);
720 cfg
->ctrl_off
= psnet_read32(psnet
, off
);
722 cfg
->flags
= psnet_read32(psnet
, off
);
724 /* Ignore Reserved */
725 off
+= sizeof(cfg
->rsvd
);
727 cfg
->devices_num
= psnet_read32(psnet
, off
);
729 /* Allocate memory to hold pointer to the devices */
730 cfg
->devs
= kcalloc(cfg
->devices_num
, sizeof(void *), GFP_KERNEL
);
734 /* Load device configuration from BAR */
735 for (i
= 0; i
< cfg
->devices_num
; i
++) {
736 cfg
->devs
[i
] = kzalloc(sizeof(*cfg
->devs
[i
]), GFP_KERNEL
);
741 /* Read device config */
742 cfg
->devs
[i
]->virtio_id
= psnet_read32(psnet
, off
);
744 cfg
->devs
[i
]->vq_num
= psnet_read32(psnet
, off
);
746 cfg
->devs
[i
]->vq_size
= psnet_read32(psnet
, off
);
748 cfg
->devs
[i
]->vfid
= psnet_read32(psnet
, off
);
750 cfg
->devs
[i
]->features
= psnet_read64(psnet
, off
);
752 /* Ignore Reserved */
753 off
+= sizeof(cfg
->devs
[i
]->rsvd
);
755 cfg
->devs
[i
]->cfg_size
= psnet_read32(psnet
, off
);
758 /* Is the config witten to the DPU going to be too big? */
759 if (SNET_GENERAL_CFG_LEN
+ SNET_GENERAL_CFG_VQ_LEN
* cfg
->devs
[i
]->vq_num
>
760 cfg
->max_size_host_cfg
) {
761 SNET_ERR(pdev
, "Failed to read SNET config, the config is too big..\n");
769 static int psnet_alloc_irq_vector(struct pci_dev
*pdev
, struct psnet
*psnet
)
774 /* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */
775 for (i
= 0; i
< psnet
->cfg
.devices_num
; i
++)
776 irq_num
+= psnet
->cfg
.devs
[i
]->vq_num
+ 1;
778 ret
= pci_alloc_irq_vectors(pdev
, irq_num
, irq_num
, PCI_IRQ_MSIX
);
779 if (ret
!= irq_num
) {
780 SNET_ERR(pdev
, "Failed to allocate IRQ vectors\n");
783 SNET_DBG(pdev
, "Allocated %u IRQ vectors from physical function\n", irq_num
);
788 static int snet_alloc_irq_vector(struct pci_dev
*pdev
, struct snet_dev_cfg
*snet_cfg
)
793 /* We want 1 IRQ for every VQ + 1 for config change events */
794 irq_num
= snet_cfg
->vq_num
+ 1;
796 ret
= pci_alloc_irq_vectors(pdev
, irq_num
, irq_num
, PCI_IRQ_MSIX
);
798 SNET_ERR(pdev
, "Failed to allocate IRQ vectors\n");
805 static void snet_free_vqs(struct snet
*snet
)
812 for (i
= 0 ; i
< snet
->cfg
->vq_num
; i
++) {
821 static int snet_build_vqs(struct snet
*snet
)
824 /* Allocate the VQ pointers array */
825 snet
->vqs
= kcalloc(snet
->cfg
->vq_num
, sizeof(void *), GFP_KERNEL
);
829 /* Allocate the VQs */
830 for (i
= 0; i
< snet
->cfg
->vq_num
; i
++) {
831 snet
->vqs
[i
] = kzalloc(sizeof(*snet
->vqs
[i
]), GFP_KERNEL
);
837 snet
->vqs
[i
]->irq
= -1;
839 snet
->vqs
[i
]->sid
= i
;
840 /* Kick address - every VQ gets 4B */
841 snet
->vqs
[i
]->kick_ptr
= snet
->bar
+ snet
->psnet
->cfg
.kick_off
+
842 snet
->vqs
[i
]->sid
* 4;
843 /* Clear kick address for this VQ */
844 iowrite32(0, snet
->vqs
[i
]->kick_ptr
);
849 static int psnet_get_next_irq_num(struct psnet
*psnet
)
853 spin_lock(&psnet
->lock
);
854 irq
= psnet
->next_irq
++;
855 spin_unlock(&psnet
->lock
);
860 static void snet_reserve_irq_idx(struct pci_dev
*pdev
, struct snet
*snet
)
862 struct psnet
*psnet
= snet
->psnet
;
865 /* one IRQ for every VQ, and one for config changes */
866 snet
->cfg_irq_idx
= psnet_get_next_irq_num(psnet
);
867 snprintf(snet
->cfg_irq_name
, SNET_NAME_SIZE
, "snet[%s]-cfg[%d]",
868 pci_name(pdev
), snet
->cfg_irq_idx
);
870 for (i
= 0; i
< snet
->cfg
->vq_num
; i
++) {
871 /* Get next free IRQ ID */
872 snet
->vqs
[i
]->irq_idx
= psnet_get_next_irq_num(psnet
);
874 snprintf(snet
->vqs
[i
]->irq_name
, SNET_NAME_SIZE
, "snet[%s]-vq[%d]",
875 pci_name(pdev
), snet
->vqs
[i
]->irq_idx
);
879 /* Find a device config based on virtual function id */
880 static struct snet_dev_cfg
*snet_find_dev_cfg(struct snet_cfg
*cfg
, u32 vfid
)
884 for (i
= 0; i
< cfg
->devices_num
; i
++) {
885 if (cfg
->devs
[i
]->vfid
== vfid
)
888 /* Oppss.. no config found.. */
892 /* Probe function for a physical PCI function */
893 static int snet_vdpa_probe_pf(struct pci_dev
*pdev
)
897 bool pf_irqs
= false;
899 ret
= pcim_enable_device(pdev
);
901 SNET_ERR(pdev
, "Failed to enable PCI device\n");
905 /* Allocate a PCI physical function device */
906 psnet
= kzalloc(sizeof(*psnet
), GFP_KERNEL
);
910 /* Init PSNET spinlock */
911 spin_lock_init(&psnet
->lock
);
913 pci_set_master(pdev
);
914 pci_set_drvdata(pdev
, psnet
);
916 /* Open SNET MAIN BAR */
917 ret
= psnet_open_pf_bar(pdev
, psnet
);
921 /* Try to read SNET's config from PCI BAR */
922 ret
= psnet_read_cfg(pdev
, psnet
);
926 /* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use
929 pf_irqs
= PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_IRQ_PF
);
932 ret
= psnet_alloc_irq_vector(pdev
, psnet
);
937 SNET_DBG(pdev
, "Enable %u virtual functions\n", psnet
->cfg
.vf_num
);
938 ret
= pci_enable_sriov(pdev
, psnet
->cfg
.vf_num
);
940 SNET_ERR(pdev
, "Failed to enable SR-IOV\n");
944 /* Create HW monitor device */
945 if (PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_HWMON
)) {
946 #if IS_ENABLED(CONFIG_HWMON)
947 psnet_create_hwmon(pdev
);
949 SNET_WARN(pdev
, "Can't start HWMON, CONFIG_HWMON is not enabled\n");
957 pci_free_irq_vectors(pdev
);
959 snet_free_cfg(&psnet
->cfg
);
965 /* Probe function for a virtual PCI function */
966 static int snet_vdpa_probe_vf(struct pci_dev
*pdev
)
968 struct pci_dev
*pdev_pf
= pdev
->physfn
;
969 struct psnet
*psnet
= pci_get_drvdata(pdev_pf
);
970 struct snet_dev_cfg
*dev_cfg
;
974 bool pf_irqs
= false;
976 /* Get virtual function id.
977 * (the DPU counts the VFs from 1)
979 ret
= pci_iov_vf_id(pdev
);
981 SNET_ERR(pdev
, "Failed to find a VF id\n");
986 /* Find the snet_dev_cfg based on vfid */
987 dev_cfg
= snet_find_dev_cfg(&psnet
->cfg
, vfid
);
989 SNET_WARN(pdev
, "Failed to find a VF config..\n");
993 /* Which PCI device should allocate the IRQs?
994 * If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs
996 pf_irqs
= PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_IRQ_PF
);
998 ret
= pcim_enable_device(pdev
);
1000 SNET_ERR(pdev
, "Failed to enable PCI VF device\n");
1004 /* Request for MSI-X IRQs */
1006 ret
= snet_alloc_irq_vector(pdev
, dev_cfg
);
1011 /* Allocate vdpa device */
1012 snet
= vdpa_alloc_device(struct snet
, vdpa
, &pdev
->dev
, &snet_config_ops
, 1, 1, NULL
,
1015 SNET_ERR(pdev
, "Failed to allocate a vdpa device\n");
1020 /* Init control mutex and spinlock */
1021 mutex_init(&snet
->ctrl_lock
);
1022 spin_lock_init(&snet
->ctrl_spinlock
);
1024 /* Save pci device pointer */
1026 snet
->psnet
= psnet
;
1027 snet
->cfg
= dev_cfg
;
1028 snet
->dpu_ready
= false;
1030 /* Reset IRQ value */
1033 ret
= snet_open_vf_bar(pdev
, snet
);
1037 /* Create a VirtIO config pointer */
1038 snet
->cfg
->virtio_cfg
= snet
->bar
+ snet
->psnet
->cfg
.virtio_cfg_off
;
1040 /* Clear control registers */
1041 snet_ctrl_clear(snet
);
1043 pci_set_master(pdev
);
1044 pci_set_drvdata(pdev
, snet
);
1046 ret
= snet_build_vqs(snet
);
1050 /* Reserve IRQ indexes,
1051 * The IRQs may be requested and freed multiple times,
1052 * but the indexes won't change.
1054 snet_reserve_irq_idx(pf_irqs
? pdev_pf
: pdev
, snet
);
1057 snet
->vdpa
.dma_dev
= &pdev
->dev
;
1059 /* Register VDPA device */
1060 ret
= vdpa_register_device(&snet
->vdpa
, snet
->cfg
->vq_num
);
1062 SNET_ERR(pdev
, "Failed to register vdpa device\n");
1069 snet_free_vqs(snet
);
1071 put_device(&snet
->vdpa
.dev
);
1074 pci_free_irq_vectors(pdev
);
1078 static int snet_vdpa_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1080 if (pdev
->is_virtfn
)
1081 return snet_vdpa_probe_vf(pdev
);
1083 return snet_vdpa_probe_pf(pdev
);
1086 static void snet_vdpa_remove_pf(struct pci_dev
*pdev
)
1088 struct psnet
*psnet
= pci_get_drvdata(pdev
);
1090 pci_disable_sriov(pdev
);
1091 /* If IRQs are allocated from the PF, we should free the IRQs */
1092 if (PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_IRQ_PF
))
1093 pci_free_irq_vectors(pdev
);
1095 snet_free_cfg(&psnet
->cfg
);
1099 static void snet_vdpa_remove_vf(struct pci_dev
*pdev
)
1101 struct snet
*snet
= pci_get_drvdata(pdev
);
1102 struct psnet
*psnet
= snet
->psnet
;
1104 vdpa_unregister_device(&snet
->vdpa
);
1105 snet_free_vqs(snet
);
1106 /* If IRQs are allocated from the VF, we should free the IRQs */
1107 if (!PSNET_FLAG_ON(psnet
, SNET_CFG_FLAG_IRQ_PF
))
1108 pci_free_irq_vectors(pdev
);
1111 static void snet_vdpa_remove(struct pci_dev
*pdev
)
1113 if (pdev
->is_virtfn
)
1114 snet_vdpa_remove_vf(pdev
);
1116 snet_vdpa_remove_pf(pdev
);
1119 static struct pci_device_id snet_driver_pci_ids
[] = {
1120 { PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN
, SNET_DEVICE_ID
,
1121 PCI_VENDOR_ID_SOLIDRUN
, SNET_DEVICE_ID
) },
1125 MODULE_DEVICE_TABLE(pci
, snet_driver_pci_ids
);
1127 static struct pci_driver snet_vdpa_driver
= {
1128 .name
= "snet-vdpa-driver",
1129 .id_table
= snet_driver_pci_ids
,
1130 .probe
= snet_vdpa_probe
,
1131 .remove
= snet_vdpa_remove
,
1134 module_pci_driver(snet_vdpa_driver
);
1136 MODULE_AUTHOR("Alvaro Karsz <alvaro.karsz@solid-run.com>");
1137 MODULE_DESCRIPTION("SolidRun vDPA driver");
1138 MODULE_LICENSE("GPL v2");