1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Marvell. */
4 #include <linux/interrupt.h>
5 #include <linux/io-64-nonatomic-lo-hi.h>
6 #include <linux/module.h>
7 #include <linux/iommu.h>
8 #include "octep_vdpa.h"
10 #define OCTEP_VDPA_DRIVER_NAME "octep_vdpa"
13 u8 __iomem
*base
[PCI_STD_NUM_BARS
];
23 struct vdpa_device vdpa
;
24 struct octep_hw
*oct_hw
;
28 struct octep_vdpa_mgmt_dev
{
29 struct vdpa_mgmt_dev mdev
;
30 struct octep_hw oct_hw
;
32 /* Work entry to handle device setup */
33 struct work_struct setup_task
;
38 static struct octep_hw
*vdpa_to_octep_hw(struct vdpa_device
*vdpa_dev
)
40 struct octep_vdpa
*oct_vdpa
;
42 oct_vdpa
= container_of(vdpa_dev
, struct octep_vdpa
, vdpa
);
44 return oct_vdpa
->oct_hw
;
47 static irqreturn_t
octep_vdpa_intr_handler(int irq
, void *data
)
49 struct octep_hw
*oct_hw
= data
;
52 for (i
= 0; i
< oct_hw
->nr_vring
; i
++) {
53 if (oct_hw
->vqs
[i
].cb
.callback
&& ioread32(oct_hw
->vqs
[i
].cb_notify_addr
)) {
54 /* Acknowledge the per queue notification to the device */
55 iowrite32(0, oct_hw
->vqs
[i
].cb_notify_addr
);
56 oct_hw
->vqs
[i
].cb
.callback(oct_hw
->vqs
[i
].cb
.private);
63 static void octep_free_irqs(struct octep_hw
*oct_hw
)
65 struct pci_dev
*pdev
= oct_hw
->pdev
;
67 if (oct_hw
->irq
!= -1) {
68 devm_free_irq(&pdev
->dev
, oct_hw
->irq
, oct_hw
);
71 pci_free_irq_vectors(pdev
);
74 static int octep_request_irqs(struct octep_hw
*oct_hw
)
76 struct pci_dev
*pdev
= oct_hw
->pdev
;
79 /* Currently HW device provisions one IRQ per VF, hence
80 * allocate one IRQ for all virtqueues call interface.
82 ret
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSIX
);
84 dev_err(&pdev
->dev
, "Failed to alloc msix vector");
88 snprintf(oct_hw
->vqs
->msix_name
, sizeof(oct_hw
->vqs
->msix_name
),
89 OCTEP_VDPA_DRIVER_NAME
"-vf-%d", pci_iov_vf_id(pdev
));
91 irq
= pci_irq_vector(pdev
, 0);
92 ret
= devm_request_irq(&pdev
->dev
, irq
, octep_vdpa_intr_handler
, 0,
93 oct_hw
->vqs
->msix_name
, oct_hw
);
95 dev_err(&pdev
->dev
, "Failed to register interrupt handler\n");
103 pci_free_irq_vectors(pdev
);
107 static u64
octep_vdpa_get_device_features(struct vdpa_device
*vdpa_dev
)
109 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
111 return oct_hw
->features
;
114 static int octep_vdpa_set_driver_features(struct vdpa_device
*vdpa_dev
, u64 features
)
116 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
119 pr_debug("Driver Features: %llx\n", features
);
121 ret
= octep_verify_features(features
);
123 dev_warn(&oct_hw
->pdev
->dev
,
124 "Must negotiate minimum features 0x%llx for this device",
125 BIT_ULL(VIRTIO_F_VERSION_1
) | BIT_ULL(VIRTIO_F_NOTIFICATION_DATA
) |
126 BIT_ULL(VIRTIO_F_RING_PACKED
));
129 octep_hw_set_drv_features(oct_hw
, features
);
134 static u64
octep_vdpa_get_driver_features(struct vdpa_device
*vdpa_dev
)
136 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
138 return octep_hw_get_drv_features(oct_hw
);
141 static u8
octep_vdpa_get_status(struct vdpa_device
*vdpa_dev
)
143 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
145 return octep_hw_get_status(oct_hw
);
148 static void octep_vdpa_set_status(struct vdpa_device
*vdpa_dev
, u8 status
)
150 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
153 status_old
= octep_hw_get_status(oct_hw
);
155 if (status_old
== status
)
158 if ((status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
159 !(status_old
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
160 if (octep_request_irqs(oct_hw
))
161 status
= status_old
| VIRTIO_CONFIG_S_FAILED
;
163 octep_hw_set_status(oct_hw
, status
);
166 static int octep_vdpa_reset(struct vdpa_device
*vdpa_dev
)
168 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
169 u8 status
= octep_hw_get_status(oct_hw
);
175 for (qid
= 0; qid
< oct_hw
->nr_vring
; qid
++) {
176 oct_hw
->vqs
[qid
].cb
.callback
= NULL
;
177 oct_hw
->vqs
[qid
].cb
.private = NULL
;
178 oct_hw
->config_cb
.callback
= NULL
;
179 oct_hw
->config_cb
.private = NULL
;
181 octep_hw_reset(oct_hw
);
183 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
)
184 octep_free_irqs(oct_hw
);
189 static u16
octep_vdpa_get_vq_num_max(struct vdpa_device
*vdpa_dev
)
191 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
193 return octep_get_vq_size(oct_hw
);
196 static int octep_vdpa_get_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
197 struct vdpa_vq_state
*state
)
199 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
201 return octep_get_vq_state(oct_hw
, qid
, state
);
204 static int octep_vdpa_set_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
205 const struct vdpa_vq_state
*state
)
207 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
209 return octep_set_vq_state(oct_hw
, qid
, state
);
212 static void octep_vdpa_set_vq_cb(struct vdpa_device
*vdpa_dev
, u16 qid
, struct vdpa_callback
*cb
)
214 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
216 oct_hw
->vqs
[qid
].cb
= *cb
;
219 static void octep_vdpa_set_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
, bool ready
)
221 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
223 octep_set_vq_ready(oct_hw
, qid
, ready
);
226 static bool octep_vdpa_get_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
)
228 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
230 return octep_get_vq_ready(oct_hw
, qid
);
233 static void octep_vdpa_set_vq_num(struct vdpa_device
*vdpa_dev
, u16 qid
, u32 num
)
235 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
237 octep_set_vq_num(oct_hw
, qid
, num
);
240 static int octep_vdpa_set_vq_address(struct vdpa_device
*vdpa_dev
, u16 qid
, u64 desc_area
,
241 u64 driver_area
, u64 device_area
)
243 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
245 pr_debug("qid[%d]: desc_area: %llx\n", qid
, desc_area
);
246 pr_debug("qid[%d]: driver_area: %llx\n", qid
, driver_area
);
247 pr_debug("qid[%d]: device_area: %llx\n\n", qid
, device_area
);
249 return octep_set_vq_address(oct_hw
, qid
, desc_area
, driver_area
, device_area
);
252 static void octep_vdpa_kick_vq(struct vdpa_device
*vdpa_dev
, u16 qid
)
257 static void octep_vdpa_kick_vq_with_data(struct vdpa_device
*vdpa_dev
, u32 data
)
259 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
260 u16 idx
= data
& 0xFFFF;
262 vp_iowrite32(data
, oct_hw
->vqs
[idx
].notify_addr
);
265 static u32
octep_vdpa_get_generation(struct vdpa_device
*vdpa_dev
)
267 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
269 return vp_ioread8(&oct_hw
->common_cfg
->config_generation
);
272 static u32
octep_vdpa_get_device_id(struct vdpa_device
*vdpa_dev
)
274 return VIRTIO_ID_NET
;
277 static u32
octep_vdpa_get_vendor_id(struct vdpa_device
*vdpa_dev
)
279 return PCI_VENDOR_ID_CAVIUM
;
282 static u32
octep_vdpa_get_vq_align(struct vdpa_device
*vdpa_dev
)
287 static size_t octep_vdpa_get_config_size(struct vdpa_device
*vdpa_dev
)
289 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
291 return oct_hw
->config_size
;
294 static void octep_vdpa_get_config(struct vdpa_device
*vdpa_dev
, unsigned int offset
, void *buf
,
297 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
299 octep_read_dev_config(oct_hw
, offset
, buf
, len
);
302 static void octep_vdpa_set_config(struct vdpa_device
*vdpa_dev
, unsigned int offset
,
303 const void *buf
, unsigned int len
)
308 static void octep_vdpa_set_config_cb(struct vdpa_device
*vdpa_dev
, struct vdpa_callback
*cb
)
310 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
312 oct_hw
->config_cb
.callback
= cb
->callback
;
313 oct_hw
->config_cb
.private = cb
->private;
316 static struct vdpa_notification_area
octep_get_vq_notification(struct vdpa_device
*vdpa_dev
,
319 struct octep_hw
*oct_hw
= vdpa_to_octep_hw(vdpa_dev
);
320 struct vdpa_notification_area area
;
322 area
.addr
= oct_hw
->vqs
[idx
].notify_pa
;
323 area
.size
= PAGE_SIZE
;
328 static struct vdpa_config_ops octep_vdpa_ops
= {
329 .get_device_features
= octep_vdpa_get_device_features
,
330 .set_driver_features
= octep_vdpa_set_driver_features
,
331 .get_driver_features
= octep_vdpa_get_driver_features
,
332 .get_status
= octep_vdpa_get_status
,
333 .set_status
= octep_vdpa_set_status
,
334 .reset
= octep_vdpa_reset
,
335 .get_vq_num_max
= octep_vdpa_get_vq_num_max
,
336 .get_vq_state
= octep_vdpa_get_vq_state
,
337 .set_vq_state
= octep_vdpa_set_vq_state
,
338 .set_vq_cb
= octep_vdpa_set_vq_cb
,
339 .set_vq_ready
= octep_vdpa_set_vq_ready
,
340 .get_vq_ready
= octep_vdpa_get_vq_ready
,
341 .set_vq_num
= octep_vdpa_set_vq_num
,
342 .set_vq_address
= octep_vdpa_set_vq_address
,
344 .kick_vq
= octep_vdpa_kick_vq
,
345 .kick_vq_with_data
= octep_vdpa_kick_vq_with_data
,
346 .get_generation
= octep_vdpa_get_generation
,
347 .get_device_id
= octep_vdpa_get_device_id
,
348 .get_vendor_id
= octep_vdpa_get_vendor_id
,
349 .get_vq_align
= octep_vdpa_get_vq_align
,
350 .get_config_size
= octep_vdpa_get_config_size
,
351 .get_config
= octep_vdpa_get_config
,
352 .set_config
= octep_vdpa_set_config
,
353 .set_config_cb
= octep_vdpa_set_config_cb
,
354 .get_vq_notification
= octep_get_vq_notification
,
357 static int octep_iomap_region(struct pci_dev
*pdev
, u8 __iomem
**tbl
, u8 bar
)
361 ret
= pci_request_region(pdev
, bar
, OCTEP_VDPA_DRIVER_NAME
);
363 dev_err(&pdev
->dev
, "Failed to request BAR:%u region\n", bar
);
367 tbl
[bar
] = pci_iomap(pdev
, bar
, pci_resource_len(pdev
, bar
));
369 dev_err(&pdev
->dev
, "Failed to iomap BAR:%u\n", bar
);
370 pci_release_region(pdev
, bar
);
377 static void octep_iounmap_region(struct pci_dev
*pdev
, u8 __iomem
**tbl
, u8 bar
)
379 pci_iounmap(pdev
, tbl
[bar
]);
380 pci_release_region(pdev
, bar
);
383 static void octep_vdpa_pf_bar_shrink(struct octep_pf
*octpf
)
385 struct pci_dev
*pf_dev
= octpf
->pdev
;
386 struct resource
*res
= pf_dev
->resource
+ PCI_STD_RESOURCES
+ 4;
387 struct pci_bus_region bus_region
;
389 octpf
->res
.start
= res
->start
;
390 octpf
->res
.end
= res
->end
;
391 octpf
->vf_base
= res
->start
;
393 bus_region
.start
= res
->start
;
394 bus_region
.end
= res
->start
- 1;
396 pcibios_bus_to_resource(pf_dev
->bus
, res
, &bus_region
);
399 static void octep_vdpa_pf_bar_expand(struct octep_pf
*octpf
)
401 struct pci_dev
*pf_dev
= octpf
->pdev
;
402 struct resource
*res
= pf_dev
->resource
+ PCI_STD_RESOURCES
+ 4;
403 struct pci_bus_region bus_region
;
405 bus_region
.start
= octpf
->res
.start
;
406 bus_region
.end
= octpf
->res
.end
;
408 pcibios_bus_to_resource(pf_dev
->bus
, res
, &bus_region
);
411 static void octep_vdpa_remove_pf(struct pci_dev
*pdev
)
413 struct octep_pf
*octpf
= pci_get_drvdata(pdev
);
415 pci_disable_sriov(pdev
);
417 if (octpf
->base
[OCTEP_HW_CAPS_BAR
])
418 octep_iounmap_region(pdev
, octpf
->base
, OCTEP_HW_CAPS_BAR
);
420 if (octpf
->base
[OCTEP_HW_MBOX_BAR
])
421 octep_iounmap_region(pdev
, octpf
->base
, OCTEP_HW_MBOX_BAR
);
423 octep_vdpa_pf_bar_expand(octpf
);
426 static void octep_vdpa_vf_bar_shrink(struct pci_dev
*pdev
)
428 struct resource
*vf_res
= pdev
->resource
+ PCI_STD_RESOURCES
+ 4;
430 memset(vf_res
, 0, sizeof(*vf_res
));
433 static void octep_vdpa_remove_vf(struct pci_dev
*pdev
)
435 struct octep_vdpa_mgmt_dev
*mgmt_dev
= pci_get_drvdata(pdev
);
436 struct octep_hw
*oct_hw
;
439 oct_hw
= &mgmt_dev
->oct_hw
;
440 status
= atomic_read(&mgmt_dev
->status
);
441 atomic_set(&mgmt_dev
->status
, OCTEP_VDPA_DEV_STATUS_UNINIT
);
443 cancel_work_sync(&mgmt_dev
->setup_task
);
444 if (status
== OCTEP_VDPA_DEV_STATUS_READY
)
445 vdpa_mgmtdev_unregister(&mgmt_dev
->mdev
);
447 if (oct_hw
->base
[OCTEP_HW_CAPS_BAR
])
448 octep_iounmap_region(pdev
, oct_hw
->base
, OCTEP_HW_CAPS_BAR
);
450 if (oct_hw
->base
[OCTEP_HW_MBOX_BAR
])
451 octep_iounmap_region(pdev
, oct_hw
->base
, OCTEP_HW_MBOX_BAR
);
453 octep_vdpa_vf_bar_shrink(pdev
);
456 static void octep_vdpa_remove(struct pci_dev
*pdev
)
459 octep_vdpa_remove_vf(pdev
);
461 octep_vdpa_remove_pf(pdev
);
464 static int octep_vdpa_dev_add(struct vdpa_mgmt_dev
*mdev
, const char *name
,
465 const struct vdpa_dev_set_config
*config
)
467 struct octep_vdpa_mgmt_dev
*mgmt_dev
= container_of(mdev
, struct octep_vdpa_mgmt_dev
, mdev
);
468 struct octep_hw
*oct_hw
= &mgmt_dev
->oct_hw
;
469 struct pci_dev
*pdev
= oct_hw
->pdev
;
470 struct vdpa_device
*vdpa_dev
;
471 struct octep_vdpa
*oct_vdpa
;
475 oct_vdpa
= vdpa_alloc_device(struct octep_vdpa
, vdpa
, &pdev
->dev
, &octep_vdpa_ops
, 1, 1,
477 if (IS_ERR(oct_vdpa
)) {
478 dev_err(&pdev
->dev
, "Failed to allocate vDPA structure for octep vdpa device");
479 return PTR_ERR(oct_vdpa
);
482 oct_vdpa
->pdev
= pdev
;
483 oct_vdpa
->vdpa
.dma_dev
= &pdev
->dev
;
484 oct_vdpa
->vdpa
.mdev
= mdev
;
485 oct_vdpa
->oct_hw
= oct_hw
;
486 vdpa_dev
= &oct_vdpa
->vdpa
;
488 device_features
= oct_hw
->features
;
489 if (config
->mask
& BIT_ULL(VDPA_ATTR_DEV_FEATURES
)) {
490 if (config
->device_features
& ~device_features
) {
491 dev_err(&pdev
->dev
, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
492 config
->device_features
, device_features
);
496 device_features
&= config
->device_features
;
499 oct_hw
->features
= device_features
;
500 dev_info(&pdev
->dev
, "Vdpa management device features : %llx\n", device_features
);
502 ret
= octep_verify_features(device_features
);
504 dev_warn(mdev
->device
,
505 "Must provision minimum features 0x%llx for this device",
506 BIT_ULL(VIRTIO_F_VERSION_1
) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM
) |
507 BIT_ULL(VIRTIO_F_NOTIFICATION_DATA
) | BIT_ULL(VIRTIO_F_RING_PACKED
));
511 ret
= dev_set_name(&vdpa_dev
->dev
, "%s", name
);
513 ret
= dev_set_name(&vdpa_dev
->dev
, "vdpa%u", vdpa_dev
->index
);
515 ret
= _vdpa_register_device(&oct_vdpa
->vdpa
, oct_hw
->nr_vring
);
517 dev_err(&pdev
->dev
, "Failed to register to vDPA bus");
523 put_device(&oct_vdpa
->vdpa
.dev
);
527 static void octep_vdpa_dev_del(struct vdpa_mgmt_dev
*mdev
, struct vdpa_device
*vdpa_dev
)
529 _vdpa_unregister_device(vdpa_dev
);
532 static const struct vdpa_mgmtdev_ops octep_vdpa_mgmt_dev_ops
= {
533 .dev_add
= octep_vdpa_dev_add
,
534 .dev_del
= octep_vdpa_dev_del
537 static bool get_device_ready_status(u8 __iomem
*addr
)
539 u64 signature
= readq(addr
+ OCTEP_VF_MBOX_DATA(0));
541 if (signature
== OCTEP_DEV_READY_SIGNATURE
) {
542 writeq(0, addr
+ OCTEP_VF_MBOX_DATA(0));
549 static struct virtio_device_id id_table
[] = {
550 { VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
554 static void octep_vdpa_setup_task(struct work_struct
*work
)
556 struct octep_vdpa_mgmt_dev
*mgmt_dev
= container_of(work
, struct octep_vdpa_mgmt_dev
,
558 struct pci_dev
*pdev
= mgmt_dev
->pdev
;
559 struct device
*dev
= &pdev
->dev
;
560 struct octep_hw
*oct_hw
;
561 unsigned long timeout
;
564 oct_hw
= &mgmt_dev
->oct_hw
;
566 atomic_set(&mgmt_dev
->status
, OCTEP_VDPA_DEV_STATUS_WAIT_FOR_BAR_INIT
);
568 /* Wait for a maximum of 5 sec */
569 timeout
= jiffies
+ msecs_to_jiffies(5000);
570 while (!time_after(jiffies
, timeout
)) {
571 if (get_device_ready_status(oct_hw
->base
[OCTEP_HW_MBOX_BAR
])) {
572 atomic_set(&mgmt_dev
->status
, OCTEP_VDPA_DEV_STATUS_INIT
);
576 if (atomic_read(&mgmt_dev
->status
) >= OCTEP_VDPA_DEV_STATUS_READY
) {
577 dev_info(dev
, "Stopping vDPA setup task.\n");
581 usleep_range(1000, 1500);
584 if (atomic_read(&mgmt_dev
->status
) != OCTEP_VDPA_DEV_STATUS_INIT
) {
585 dev_err(dev
, "BAR initialization is timed out\n");
589 ret
= octep_iomap_region(pdev
, oct_hw
->base
, OCTEP_HW_CAPS_BAR
);
593 ret
= octep_hw_caps_read(oct_hw
, pdev
);
597 mgmt_dev
->mdev
.ops
= &octep_vdpa_mgmt_dev_ops
;
598 mgmt_dev
->mdev
.id_table
= id_table
;
599 mgmt_dev
->mdev
.max_supported_vqs
= oct_hw
->nr_vring
;
600 mgmt_dev
->mdev
.supported_features
= oct_hw
->features
;
601 mgmt_dev
->mdev
.config_attr_mask
= (1 << VDPA_ATTR_DEV_FEATURES
);
602 mgmt_dev
->mdev
.device
= dev
;
604 ret
= vdpa_mgmtdev_register(&mgmt_dev
->mdev
);
606 dev_err(dev
, "Failed to register vdpa management interface\n");
610 atomic_set(&mgmt_dev
->status
, OCTEP_VDPA_DEV_STATUS_READY
);
615 octep_iounmap_region(pdev
, oct_hw
->base
, OCTEP_HW_CAPS_BAR
);
616 oct_hw
->base
[OCTEP_HW_CAPS_BAR
] = NULL
;
619 static int octep_vdpa_probe_vf(struct pci_dev
*pdev
)
621 struct octep_vdpa_mgmt_dev
*mgmt_dev
;
622 struct device
*dev
= &pdev
->dev
;
625 ret
= pcim_enable_device(pdev
);
627 dev_err(dev
, "Failed to enable device\n");
631 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
633 dev_err(dev
, "No usable DMA configuration\n");
636 pci_set_master(pdev
);
638 mgmt_dev
= devm_kzalloc(dev
, sizeof(struct octep_vdpa_mgmt_dev
), GFP_KERNEL
);
642 ret
= octep_iomap_region(pdev
, mgmt_dev
->oct_hw
.base
, OCTEP_HW_MBOX_BAR
);
646 mgmt_dev
->pdev
= pdev
;
647 pci_set_drvdata(pdev
, mgmt_dev
);
649 atomic_set(&mgmt_dev
->status
, OCTEP_VDPA_DEV_STATUS_ALLOC
);
650 INIT_WORK(&mgmt_dev
->setup_task
, octep_vdpa_setup_task
);
651 schedule_work(&mgmt_dev
->setup_task
);
652 dev_info(&pdev
->dev
, "octep vdpa mgmt device setup task is queued\n");
657 static void octep_vdpa_assign_barspace(struct pci_dev
*vf_dev
, struct pci_dev
*pf_dev
, u8 idx
)
659 struct resource
*vf_res
= vf_dev
->resource
+ PCI_STD_RESOURCES
+ 4;
660 struct resource
*pf_res
= pf_dev
->resource
+ PCI_STD_RESOURCES
+ 4;
661 struct octep_pf
*pf
= pci_get_drvdata(pf_dev
);
662 struct pci_bus_region bus_region
;
664 vf_res
->name
= pci_name(vf_dev
);
665 vf_res
->flags
= pf_res
->flags
;
666 vf_res
->parent
= (pf_dev
->resource
+ PCI_STD_RESOURCES
)->parent
;
668 bus_region
.start
= pf
->vf_base
+ idx
* pf
->vf_stride
;
669 bus_region
.end
= bus_region
.start
+ pf
->vf_stride
- 1;
670 pcibios_bus_to_resource(vf_dev
->bus
, vf_res
, &bus_region
);
673 static int octep_sriov_enable(struct pci_dev
*pdev
, int num_vfs
)
675 struct octep_pf
*pf
= pci_get_drvdata(pdev
);
676 u8 __iomem
*addr
= pf
->base
[OCTEP_HW_MBOX_BAR
];
677 struct pci_dev
*vf_pdev
= NULL
;
682 ret
= pci_enable_sriov(pdev
, num_vfs
);
686 pf
->enabled_vfs
= num_vfs
;
688 while ((vf_pdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
, PCI_ANY_ID
, vf_pdev
))) {
689 if (vf_pdev
->device
!= pf
->vf_devid
)
692 octep_vdpa_assign_barspace(vf_pdev
, pdev
, index
);
693 if (++index
== num_vfs
) {
700 for (i
= 0; i
< pf
->enabled_vfs
; i
++)
701 writeq(OCTEP_DEV_READY_SIGNATURE
, addr
+ OCTEP_PF_MBOX_DATA(i
));
707 static int octep_sriov_disable(struct pci_dev
*pdev
)
709 struct octep_pf
*pf
= pci_get_drvdata(pdev
);
711 if (!pci_num_vf(pdev
))
714 pci_disable_sriov(pdev
);
720 static int octep_vdpa_sriov_configure(struct pci_dev
*pdev
, int num_vfs
)
723 return octep_sriov_enable(pdev
, num_vfs
);
725 return octep_sriov_disable(pdev
);
728 static u16
octep_get_vf_devid(struct pci_dev
*pdev
)
732 switch (pdev
->device
) {
733 case OCTEP_VDPA_DEVID_CN106K_PF
:
734 did
= OCTEP_VDPA_DEVID_CN106K_VF
;
736 case OCTEP_VDPA_DEVID_CN105K_PF
:
737 did
= OCTEP_VDPA_DEVID_CN105K_VF
;
739 case OCTEP_VDPA_DEVID_CN103K_PF
:
740 did
= OCTEP_VDPA_DEVID_CN103K_VF
;
750 static int octep_vdpa_pf_setup(struct octep_pf
*octpf
)
752 u8 __iomem
*addr
= octpf
->base
[OCTEP_HW_MBOX_BAR
];
753 struct pci_dev
*pdev
= octpf
->pdev
;
758 totalvfs
= pci_sriov_get_totalvfs(pdev
);
759 if (unlikely(!totalvfs
)) {
760 dev_info(&pdev
->dev
, "Total VFs are %d in PF sriov configuration\n", totalvfs
);
764 addr
= octpf
->base
[OCTEP_HW_MBOX_BAR
];
765 val
= readq(addr
+ OCTEP_EPF_RINFO(0));
767 dev_err(&pdev
->dev
, "Invalid device configuration\n");
771 if (OCTEP_EPF_RINFO_RPVF(val
) != BIT_ULL(0)) {
772 val
&= ~GENMASK_ULL(35, 32);
774 writeq(val
, addr
+ OCTEP_EPF_RINFO(0));
777 len
= pci_resource_len(pdev
, OCTEP_HW_CAPS_BAR
);
779 octpf
->vf_stride
= len
/ totalvfs
;
780 octpf
->vf_devid
= octep_get_vf_devid(pdev
);
782 octep_vdpa_pf_bar_shrink(octpf
);
787 static int octep_vdpa_probe_pf(struct pci_dev
*pdev
)
789 struct device
*dev
= &pdev
->dev
;
790 struct octep_pf
*octpf
;
793 ret
= pcim_enable_device(pdev
);
795 dev_err(dev
, "Failed to enable device\n");
799 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
801 dev_err(dev
, "No usable DMA configuration\n");
804 octpf
= devm_kzalloc(dev
, sizeof(*octpf
), GFP_KERNEL
);
808 ret
= octep_iomap_region(pdev
, octpf
->base
, OCTEP_HW_MBOX_BAR
);
812 pci_set_master(pdev
);
813 pci_set_drvdata(pdev
, octpf
);
816 ret
= octep_vdpa_pf_setup(octpf
);
823 octep_iounmap_region(pdev
, octpf
->base
, OCTEP_HW_MBOX_BAR
);
827 static int octep_vdpa_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
830 return octep_vdpa_probe_vf(pdev
);
832 return octep_vdpa_probe_pf(pdev
);
835 static struct pci_device_id octep_pci_vdpa_map
[] = {
836 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OCTEP_VDPA_DEVID_CN106K_PF
) },
837 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OCTEP_VDPA_DEVID_CN106K_VF
) },
838 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OCTEP_VDPA_DEVID_CN105K_PF
) },
839 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OCTEP_VDPA_DEVID_CN105K_VF
) },
840 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OCTEP_VDPA_DEVID_CN103K_PF
) },
841 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM
, OCTEP_VDPA_DEVID_CN103K_VF
) },
845 static struct pci_driver octep_pci_vdpa
= {
846 .name
= OCTEP_VDPA_DRIVER_NAME
,
847 .id_table
= octep_pci_vdpa_map
,
848 .probe
= octep_vdpa_probe
,
849 .remove
= octep_vdpa_remove
,
850 .sriov_configure
= octep_vdpa_sriov_configure
853 module_pci_driver(octep_pci_vdpa
);
855 MODULE_AUTHOR("Marvell");
856 MODULE_DESCRIPTION("Marvell Octeon PCIe endpoint vDPA driver");
857 MODULE_LICENSE("GPL");