Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / vdpa / octeon_ep / octep_vdpa_main.c
blobcd55b1aac1512cd7c4717d102f61d9866aef47ca
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2024 Marvell. */
4 #include <linux/interrupt.h>
5 #include <linux/io-64-nonatomic-lo-hi.h>
6 #include <linux/module.h>
7 #include <linux/iommu.h>
8 #include "octep_vdpa.h"
10 #define OCTEP_VDPA_DRIVER_NAME "octep_vdpa"
12 struct octep_pf {
13 u8 __iomem *base[PCI_STD_NUM_BARS];
14 struct pci_dev *pdev;
15 struct resource res;
16 u64 vf_base;
17 int enabled_vfs;
18 u32 vf_stride;
19 u16 vf_devid;
22 struct octep_vdpa {
23 struct vdpa_device vdpa;
24 struct octep_hw *oct_hw;
25 struct pci_dev *pdev;
28 struct octep_vdpa_mgmt_dev {
29 struct vdpa_mgmt_dev mdev;
30 struct octep_hw oct_hw;
31 struct pci_dev *pdev;
32 /* Work entry to handle device setup */
33 struct work_struct setup_task;
34 /* Device status */
35 atomic_t status;
38 static struct octep_hw *vdpa_to_octep_hw(struct vdpa_device *vdpa_dev)
40 struct octep_vdpa *oct_vdpa;
42 oct_vdpa = container_of(vdpa_dev, struct octep_vdpa, vdpa);
44 return oct_vdpa->oct_hw;
47 static irqreturn_t octep_vdpa_intr_handler(int irq, void *data)
49 struct octep_hw *oct_hw = data;
50 int i;
52 for (i = 0; i < oct_hw->nr_vring; i++) {
53 if (oct_hw->vqs[i].cb.callback && ioread32(oct_hw->vqs[i].cb_notify_addr)) {
54 /* Acknowledge the per queue notification to the device */
55 iowrite32(0, oct_hw->vqs[i].cb_notify_addr);
56 oct_hw->vqs[i].cb.callback(oct_hw->vqs[i].cb.private);
60 return IRQ_HANDLED;
63 static void octep_free_irqs(struct octep_hw *oct_hw)
65 struct pci_dev *pdev = oct_hw->pdev;
67 if (oct_hw->irq != -1) {
68 devm_free_irq(&pdev->dev, oct_hw->irq, oct_hw);
69 oct_hw->irq = -1;
71 pci_free_irq_vectors(pdev);
74 static int octep_request_irqs(struct octep_hw *oct_hw)
76 struct pci_dev *pdev = oct_hw->pdev;
77 int ret, irq;
79 /* Currently HW device provisions one IRQ per VF, hence
80 * allocate one IRQ for all virtqueues call interface.
82 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
83 if (ret < 0) {
84 dev_err(&pdev->dev, "Failed to alloc msix vector");
85 return ret;
88 snprintf(oct_hw->vqs->msix_name, sizeof(oct_hw->vqs->msix_name),
89 OCTEP_VDPA_DRIVER_NAME "-vf-%d", pci_iov_vf_id(pdev));
91 irq = pci_irq_vector(pdev, 0);
92 ret = devm_request_irq(&pdev->dev, irq, octep_vdpa_intr_handler, 0,
93 oct_hw->vqs->msix_name, oct_hw);
94 if (ret) {
95 dev_err(&pdev->dev, "Failed to register interrupt handler\n");
96 goto free_irq_vec;
98 oct_hw->irq = irq;
100 return 0;
102 free_irq_vec:
103 pci_free_irq_vectors(pdev);
104 return ret;
107 static u64 octep_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
109 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
111 return oct_hw->features;
114 static int octep_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
116 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
117 int ret;
119 pr_debug("Driver Features: %llx\n", features);
121 ret = octep_verify_features(features);
122 if (ret) {
123 dev_warn(&oct_hw->pdev->dev,
124 "Must negotiate minimum features 0x%llx for this device",
125 BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_NOTIFICATION_DATA) |
126 BIT_ULL(VIRTIO_F_RING_PACKED));
127 return ret;
129 octep_hw_set_drv_features(oct_hw, features);
131 return 0;
134 static u64 octep_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
136 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
138 return octep_hw_get_drv_features(oct_hw);
141 static u8 octep_vdpa_get_status(struct vdpa_device *vdpa_dev)
143 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
145 return octep_hw_get_status(oct_hw);
148 static void octep_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
150 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
151 u8 status_old;
153 status_old = octep_hw_get_status(oct_hw);
155 if (status_old == status)
156 return;
158 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
159 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
160 if (octep_request_irqs(oct_hw))
161 status = status_old | VIRTIO_CONFIG_S_FAILED;
163 octep_hw_set_status(oct_hw, status);
166 static int octep_vdpa_reset(struct vdpa_device *vdpa_dev)
168 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
169 u8 status = octep_hw_get_status(oct_hw);
170 u16 qid;
172 if (status == 0)
173 return 0;
175 for (qid = 0; qid < oct_hw->nr_vring; qid++) {
176 oct_hw->vqs[qid].cb.callback = NULL;
177 oct_hw->vqs[qid].cb.private = NULL;
178 oct_hw->config_cb.callback = NULL;
179 oct_hw->config_cb.private = NULL;
181 octep_hw_reset(oct_hw);
183 if (status & VIRTIO_CONFIG_S_DRIVER_OK)
184 octep_free_irqs(oct_hw);
186 return 0;
189 static u16 octep_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
191 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
193 return octep_get_vq_size(oct_hw);
196 static int octep_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
197 struct vdpa_vq_state *state)
199 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
201 return octep_get_vq_state(oct_hw, qid, state);
204 static int octep_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
205 const struct vdpa_vq_state *state)
207 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
209 return octep_set_vq_state(oct_hw, qid, state);
212 static void octep_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, struct vdpa_callback *cb)
214 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
216 oct_hw->vqs[qid].cb = *cb;
219 static void octep_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, u16 qid, bool ready)
221 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
223 octep_set_vq_ready(oct_hw, qid, ready);
226 static bool octep_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
228 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
230 return octep_get_vq_ready(oct_hw, qid);
233 static void octep_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, u32 num)
235 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
237 octep_set_vq_num(oct_hw, qid, num);
240 static int octep_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, u64 desc_area,
241 u64 driver_area, u64 device_area)
243 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
245 pr_debug("qid[%d]: desc_area: %llx\n", qid, desc_area);
246 pr_debug("qid[%d]: driver_area: %llx\n", qid, driver_area);
247 pr_debug("qid[%d]: device_area: %llx\n\n", qid, device_area);
249 return octep_set_vq_address(oct_hw, qid, desc_area, driver_area, device_area);
252 static void octep_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
254 /* Not supported */
257 static void octep_vdpa_kick_vq_with_data(struct vdpa_device *vdpa_dev, u32 data)
259 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
260 u16 idx = data & 0xFFFF;
262 vp_iowrite32(data, oct_hw->vqs[idx].notify_addr);
265 static u32 octep_vdpa_get_generation(struct vdpa_device *vdpa_dev)
267 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
269 return vp_ioread8(&oct_hw->common_cfg->config_generation);
272 static u32 octep_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
274 return VIRTIO_ID_NET;
277 static u32 octep_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
279 return PCI_VENDOR_ID_CAVIUM;
282 static u32 octep_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
284 return PAGE_SIZE;
287 static size_t octep_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
289 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
291 return oct_hw->config_size;
294 static void octep_vdpa_get_config(struct vdpa_device *vdpa_dev, unsigned int offset, void *buf,
295 unsigned int len)
297 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
299 octep_read_dev_config(oct_hw, offset, buf, len);
302 static void octep_vdpa_set_config(struct vdpa_device *vdpa_dev, unsigned int offset,
303 const void *buf, unsigned int len)
305 /* Not supported */
308 static void octep_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, struct vdpa_callback *cb)
310 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
312 oct_hw->config_cb.callback = cb->callback;
313 oct_hw->config_cb.private = cb->private;
316 static struct vdpa_notification_area octep_get_vq_notification(struct vdpa_device *vdpa_dev,
317 u16 idx)
319 struct octep_hw *oct_hw = vdpa_to_octep_hw(vdpa_dev);
320 struct vdpa_notification_area area;
322 area.addr = oct_hw->vqs[idx].notify_pa;
323 area.size = PAGE_SIZE;
325 return area;
328 static struct vdpa_config_ops octep_vdpa_ops = {
329 .get_device_features = octep_vdpa_get_device_features,
330 .set_driver_features = octep_vdpa_set_driver_features,
331 .get_driver_features = octep_vdpa_get_driver_features,
332 .get_status = octep_vdpa_get_status,
333 .set_status = octep_vdpa_set_status,
334 .reset = octep_vdpa_reset,
335 .get_vq_num_max = octep_vdpa_get_vq_num_max,
336 .get_vq_state = octep_vdpa_get_vq_state,
337 .set_vq_state = octep_vdpa_set_vq_state,
338 .set_vq_cb = octep_vdpa_set_vq_cb,
339 .set_vq_ready = octep_vdpa_set_vq_ready,
340 .get_vq_ready = octep_vdpa_get_vq_ready,
341 .set_vq_num = octep_vdpa_set_vq_num,
342 .set_vq_address = octep_vdpa_set_vq_address,
343 .get_vq_irq = NULL,
344 .kick_vq = octep_vdpa_kick_vq,
345 .kick_vq_with_data = octep_vdpa_kick_vq_with_data,
346 .get_generation = octep_vdpa_get_generation,
347 .get_device_id = octep_vdpa_get_device_id,
348 .get_vendor_id = octep_vdpa_get_vendor_id,
349 .get_vq_align = octep_vdpa_get_vq_align,
350 .get_config_size = octep_vdpa_get_config_size,
351 .get_config = octep_vdpa_get_config,
352 .set_config = octep_vdpa_set_config,
353 .set_config_cb = octep_vdpa_set_config_cb,
354 .get_vq_notification = octep_get_vq_notification,
357 static int octep_iomap_region(struct pci_dev *pdev, u8 __iomem **tbl, u8 bar)
359 int ret;
361 ret = pci_request_region(pdev, bar, OCTEP_VDPA_DRIVER_NAME);
362 if (ret) {
363 dev_err(&pdev->dev, "Failed to request BAR:%u region\n", bar);
364 return ret;
367 tbl[bar] = pci_iomap(pdev, bar, pci_resource_len(pdev, bar));
368 if (!tbl[bar]) {
369 dev_err(&pdev->dev, "Failed to iomap BAR:%u\n", bar);
370 pci_release_region(pdev, bar);
371 ret = -ENOMEM;
374 return ret;
377 static void octep_iounmap_region(struct pci_dev *pdev, u8 __iomem **tbl, u8 bar)
379 pci_iounmap(pdev, tbl[bar]);
380 pci_release_region(pdev, bar);
383 static void octep_vdpa_pf_bar_shrink(struct octep_pf *octpf)
385 struct pci_dev *pf_dev = octpf->pdev;
386 struct resource *res = pf_dev->resource + PCI_STD_RESOURCES + 4;
387 struct pci_bus_region bus_region;
389 octpf->res.start = res->start;
390 octpf->res.end = res->end;
391 octpf->vf_base = res->start;
393 bus_region.start = res->start;
394 bus_region.end = res->start - 1;
396 pcibios_bus_to_resource(pf_dev->bus, res, &bus_region);
399 static void octep_vdpa_pf_bar_expand(struct octep_pf *octpf)
401 struct pci_dev *pf_dev = octpf->pdev;
402 struct resource *res = pf_dev->resource + PCI_STD_RESOURCES + 4;
403 struct pci_bus_region bus_region;
405 bus_region.start = octpf->res.start;
406 bus_region.end = octpf->res.end;
408 pcibios_bus_to_resource(pf_dev->bus, res, &bus_region);
411 static void octep_vdpa_remove_pf(struct pci_dev *pdev)
413 struct octep_pf *octpf = pci_get_drvdata(pdev);
415 pci_disable_sriov(pdev);
417 if (octpf->base[OCTEP_HW_CAPS_BAR])
418 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_CAPS_BAR);
420 if (octpf->base[OCTEP_HW_MBOX_BAR])
421 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
423 octep_vdpa_pf_bar_expand(octpf);
426 static void octep_vdpa_vf_bar_shrink(struct pci_dev *pdev)
428 struct resource *vf_res = pdev->resource + PCI_STD_RESOURCES + 4;
430 memset(vf_res, 0, sizeof(*vf_res));
433 static void octep_vdpa_remove_vf(struct pci_dev *pdev)
435 struct octep_vdpa_mgmt_dev *mgmt_dev = pci_get_drvdata(pdev);
436 struct octep_hw *oct_hw;
437 int status;
439 oct_hw = &mgmt_dev->oct_hw;
440 status = atomic_read(&mgmt_dev->status);
441 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_UNINIT);
443 cancel_work_sync(&mgmt_dev->setup_task);
444 if (status == OCTEP_VDPA_DEV_STATUS_READY)
445 vdpa_mgmtdev_unregister(&mgmt_dev->mdev);
447 if (oct_hw->base[OCTEP_HW_CAPS_BAR])
448 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR);
450 if (oct_hw->base[OCTEP_HW_MBOX_BAR])
451 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_MBOX_BAR);
453 octep_vdpa_vf_bar_shrink(pdev);
456 static void octep_vdpa_remove(struct pci_dev *pdev)
458 if (pdev->is_virtfn)
459 octep_vdpa_remove_vf(pdev);
460 else
461 octep_vdpa_remove_pf(pdev);
464 static int octep_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
465 const struct vdpa_dev_set_config *config)
467 struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(mdev, struct octep_vdpa_mgmt_dev, mdev);
468 struct octep_hw *oct_hw = &mgmt_dev->oct_hw;
469 struct pci_dev *pdev = oct_hw->pdev;
470 struct vdpa_device *vdpa_dev;
471 struct octep_vdpa *oct_vdpa;
472 u64 device_features;
473 int ret;
475 oct_vdpa = vdpa_alloc_device(struct octep_vdpa, vdpa, &pdev->dev, &octep_vdpa_ops, 1, 1,
476 NULL, false);
477 if (IS_ERR(oct_vdpa)) {
478 dev_err(&pdev->dev, "Failed to allocate vDPA structure for octep vdpa device");
479 return PTR_ERR(oct_vdpa);
482 oct_vdpa->pdev = pdev;
483 oct_vdpa->vdpa.dma_dev = &pdev->dev;
484 oct_vdpa->vdpa.mdev = mdev;
485 oct_vdpa->oct_hw = oct_hw;
486 vdpa_dev = &oct_vdpa->vdpa;
488 device_features = oct_hw->features;
489 if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
490 if (config->device_features & ~device_features) {
491 dev_err(&pdev->dev, "The provisioned features 0x%llx are not supported by this device with features 0x%llx\n",
492 config->device_features, device_features);
493 ret = -EINVAL;
494 goto vdpa_dev_put;
496 device_features &= config->device_features;
499 oct_hw->features = device_features;
500 dev_info(&pdev->dev, "Vdpa management device features : %llx\n", device_features);
502 ret = octep_verify_features(device_features);
503 if (ret) {
504 dev_warn(mdev->device,
505 "Must provision minimum features 0x%llx for this device",
506 BIT_ULL(VIRTIO_F_VERSION_1) | BIT_ULL(VIRTIO_F_ACCESS_PLATFORM) |
507 BIT_ULL(VIRTIO_F_NOTIFICATION_DATA) | BIT_ULL(VIRTIO_F_RING_PACKED));
508 goto vdpa_dev_put;
510 if (name)
511 ret = dev_set_name(&vdpa_dev->dev, "%s", name);
512 else
513 ret = dev_set_name(&vdpa_dev->dev, "vdpa%u", vdpa_dev->index);
515 ret = _vdpa_register_device(&oct_vdpa->vdpa, oct_hw->nr_vring);
516 if (ret) {
517 dev_err(&pdev->dev, "Failed to register to vDPA bus");
518 goto vdpa_dev_put;
520 return 0;
522 vdpa_dev_put:
523 put_device(&oct_vdpa->vdpa.dev);
524 return ret;
527 static void octep_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *vdpa_dev)
529 _vdpa_unregister_device(vdpa_dev);
532 static const struct vdpa_mgmtdev_ops octep_vdpa_mgmt_dev_ops = {
533 .dev_add = octep_vdpa_dev_add,
534 .dev_del = octep_vdpa_dev_del
537 static bool get_device_ready_status(u8 __iomem *addr)
539 u64 signature = readq(addr + OCTEP_VF_MBOX_DATA(0));
541 if (signature == OCTEP_DEV_READY_SIGNATURE) {
542 writeq(0, addr + OCTEP_VF_MBOX_DATA(0));
543 return true;
546 return false;
549 static struct virtio_device_id id_table[] = {
550 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
551 { 0 },
554 static void octep_vdpa_setup_task(struct work_struct *work)
556 struct octep_vdpa_mgmt_dev *mgmt_dev = container_of(work, struct octep_vdpa_mgmt_dev,
557 setup_task);
558 struct pci_dev *pdev = mgmt_dev->pdev;
559 struct device *dev = &pdev->dev;
560 struct octep_hw *oct_hw;
561 unsigned long timeout;
562 int ret;
564 oct_hw = &mgmt_dev->oct_hw;
566 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_WAIT_FOR_BAR_INIT);
568 /* Wait for a maximum of 5 sec */
569 timeout = jiffies + msecs_to_jiffies(5000);
570 while (!time_after(jiffies, timeout)) {
571 if (get_device_ready_status(oct_hw->base[OCTEP_HW_MBOX_BAR])) {
572 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_INIT);
573 break;
576 if (atomic_read(&mgmt_dev->status) >= OCTEP_VDPA_DEV_STATUS_READY) {
577 dev_info(dev, "Stopping vDPA setup task.\n");
578 return;
581 usleep_range(1000, 1500);
584 if (atomic_read(&mgmt_dev->status) != OCTEP_VDPA_DEV_STATUS_INIT) {
585 dev_err(dev, "BAR initialization is timed out\n");
586 return;
589 ret = octep_iomap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR);
590 if (ret)
591 return;
593 ret = octep_hw_caps_read(oct_hw, pdev);
594 if (ret < 0)
595 goto unmap_region;
597 mgmt_dev->mdev.ops = &octep_vdpa_mgmt_dev_ops;
598 mgmt_dev->mdev.id_table = id_table;
599 mgmt_dev->mdev.max_supported_vqs = oct_hw->nr_vring;
600 mgmt_dev->mdev.supported_features = oct_hw->features;
601 mgmt_dev->mdev.config_attr_mask = (1 << VDPA_ATTR_DEV_FEATURES);
602 mgmt_dev->mdev.device = dev;
604 ret = vdpa_mgmtdev_register(&mgmt_dev->mdev);
605 if (ret) {
606 dev_err(dev, "Failed to register vdpa management interface\n");
607 goto unmap_region;
610 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_READY);
612 return;
614 unmap_region:
615 octep_iounmap_region(pdev, oct_hw->base, OCTEP_HW_CAPS_BAR);
616 oct_hw->base[OCTEP_HW_CAPS_BAR] = NULL;
619 static int octep_vdpa_probe_vf(struct pci_dev *pdev)
621 struct octep_vdpa_mgmt_dev *mgmt_dev;
622 struct device *dev = &pdev->dev;
623 int ret;
625 ret = pcim_enable_device(pdev);
626 if (ret) {
627 dev_err(dev, "Failed to enable device\n");
628 return ret;
631 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
632 if (ret) {
633 dev_err(dev, "No usable DMA configuration\n");
634 return ret;
636 pci_set_master(pdev);
638 mgmt_dev = devm_kzalloc(dev, sizeof(struct octep_vdpa_mgmt_dev), GFP_KERNEL);
639 if (!mgmt_dev)
640 return -ENOMEM;
642 ret = octep_iomap_region(pdev, mgmt_dev->oct_hw.base, OCTEP_HW_MBOX_BAR);
643 if (ret)
644 return ret;
646 mgmt_dev->pdev = pdev;
647 pci_set_drvdata(pdev, mgmt_dev);
649 atomic_set(&mgmt_dev->status, OCTEP_VDPA_DEV_STATUS_ALLOC);
650 INIT_WORK(&mgmt_dev->setup_task, octep_vdpa_setup_task);
651 schedule_work(&mgmt_dev->setup_task);
652 dev_info(&pdev->dev, "octep vdpa mgmt device setup task is queued\n");
654 return 0;
657 static void octep_vdpa_assign_barspace(struct pci_dev *vf_dev, struct pci_dev *pf_dev, u8 idx)
659 struct resource *vf_res = vf_dev->resource + PCI_STD_RESOURCES + 4;
660 struct resource *pf_res = pf_dev->resource + PCI_STD_RESOURCES + 4;
661 struct octep_pf *pf = pci_get_drvdata(pf_dev);
662 struct pci_bus_region bus_region;
664 vf_res->name = pci_name(vf_dev);
665 vf_res->flags = pf_res->flags;
666 vf_res->parent = (pf_dev->resource + PCI_STD_RESOURCES)->parent;
668 bus_region.start = pf->vf_base + idx * pf->vf_stride;
669 bus_region.end = bus_region.start + pf->vf_stride - 1;
670 pcibios_bus_to_resource(vf_dev->bus, vf_res, &bus_region);
673 static int octep_sriov_enable(struct pci_dev *pdev, int num_vfs)
675 struct octep_pf *pf = pci_get_drvdata(pdev);
676 u8 __iomem *addr = pf->base[OCTEP_HW_MBOX_BAR];
677 struct pci_dev *vf_pdev = NULL;
678 bool done = false;
679 int index = 0;
680 int ret, i;
682 ret = pci_enable_sriov(pdev, num_vfs);
683 if (ret)
684 return ret;
686 pf->enabled_vfs = num_vfs;
688 while ((vf_pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, vf_pdev))) {
689 if (vf_pdev->device != pf->vf_devid)
690 continue;
692 octep_vdpa_assign_barspace(vf_pdev, pdev, index);
693 if (++index == num_vfs) {
694 done = true;
695 break;
699 if (done) {
700 for (i = 0; i < pf->enabled_vfs; i++)
701 writeq(OCTEP_DEV_READY_SIGNATURE, addr + OCTEP_PF_MBOX_DATA(i));
704 return num_vfs;
707 static int octep_sriov_disable(struct pci_dev *pdev)
709 struct octep_pf *pf = pci_get_drvdata(pdev);
711 if (!pci_num_vf(pdev))
712 return 0;
714 pci_disable_sriov(pdev);
715 pf->enabled_vfs = 0;
717 return 0;
720 static int octep_vdpa_sriov_configure(struct pci_dev *pdev, int num_vfs)
722 if (num_vfs > 0)
723 return octep_sriov_enable(pdev, num_vfs);
724 else
725 return octep_sriov_disable(pdev);
728 static u16 octep_get_vf_devid(struct pci_dev *pdev)
730 u16 did;
732 switch (pdev->device) {
733 case OCTEP_VDPA_DEVID_CN106K_PF:
734 did = OCTEP_VDPA_DEVID_CN106K_VF;
735 break;
736 case OCTEP_VDPA_DEVID_CN105K_PF:
737 did = OCTEP_VDPA_DEVID_CN105K_VF;
738 break;
739 case OCTEP_VDPA_DEVID_CN103K_PF:
740 did = OCTEP_VDPA_DEVID_CN103K_VF;
741 break;
742 default:
743 did = 0xFFFF;
744 break;
747 return did;
750 static int octep_vdpa_pf_setup(struct octep_pf *octpf)
752 u8 __iomem *addr = octpf->base[OCTEP_HW_MBOX_BAR];
753 struct pci_dev *pdev = octpf->pdev;
754 int totalvfs;
755 size_t len;
756 u64 val;
758 totalvfs = pci_sriov_get_totalvfs(pdev);
759 if (unlikely(!totalvfs)) {
760 dev_info(&pdev->dev, "Total VFs are %d in PF sriov configuration\n", totalvfs);
761 return 0;
764 addr = octpf->base[OCTEP_HW_MBOX_BAR];
765 val = readq(addr + OCTEP_EPF_RINFO(0));
766 if (val == 0) {
767 dev_err(&pdev->dev, "Invalid device configuration\n");
768 return -EINVAL;
771 if (OCTEP_EPF_RINFO_RPVF(val) != BIT_ULL(0)) {
772 val &= ~GENMASK_ULL(35, 32);
773 val |= BIT_ULL(32);
774 writeq(val, addr + OCTEP_EPF_RINFO(0));
777 len = pci_resource_len(pdev, OCTEP_HW_CAPS_BAR);
779 octpf->vf_stride = len / totalvfs;
780 octpf->vf_devid = octep_get_vf_devid(pdev);
782 octep_vdpa_pf_bar_shrink(octpf);
784 return 0;
787 static int octep_vdpa_probe_pf(struct pci_dev *pdev)
789 struct device *dev = &pdev->dev;
790 struct octep_pf *octpf;
791 int ret;
793 ret = pcim_enable_device(pdev);
794 if (ret) {
795 dev_err(dev, "Failed to enable device\n");
796 return ret;
799 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
800 if (ret) {
801 dev_err(dev, "No usable DMA configuration\n");
802 return ret;
804 octpf = devm_kzalloc(dev, sizeof(*octpf), GFP_KERNEL);
805 if (!octpf)
806 return -ENOMEM;
808 ret = octep_iomap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
809 if (ret)
810 return ret;
812 pci_set_master(pdev);
813 pci_set_drvdata(pdev, octpf);
814 octpf->pdev = pdev;
816 ret = octep_vdpa_pf_setup(octpf);
817 if (ret)
818 goto unmap_region;
820 return 0;
822 unmap_region:
823 octep_iounmap_region(pdev, octpf->base, OCTEP_HW_MBOX_BAR);
824 return ret;
827 static int octep_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
829 if (pdev->is_virtfn)
830 return octep_vdpa_probe_vf(pdev);
831 else
832 return octep_vdpa_probe_pf(pdev);
835 static struct pci_device_id octep_pci_vdpa_map[] = {
836 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN106K_PF) },
837 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN106K_VF) },
838 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN105K_PF) },
839 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN105K_VF) },
840 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN103K_PF) },
841 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_VDPA_DEVID_CN103K_VF) },
842 { 0 },
845 static struct pci_driver octep_pci_vdpa = {
846 .name = OCTEP_VDPA_DRIVER_NAME,
847 .id_table = octep_pci_vdpa_map,
848 .probe = octep_vdpa_probe,
849 .remove = octep_vdpa_remove,
850 .sriov_configure = octep_vdpa_sriov_configure
853 module_pci_driver(octep_pci_vdpa);
855 MODULE_AUTHOR("Marvell");
856 MODULE_DESCRIPTION("Marvell Octeon PCIe endpoint vDPA driver");
857 MODULE_LICENSE("GPL");