1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
17 #define VERSION_STRING "0.1"
18 #define DRIVER_AUTHOR "Intel Corporation"
19 #define IFCVF_DRIVER_NAME "ifcvf"
21 static irqreturn_t
ifcvf_config_changed(int irq
, void *arg
)
23 struct ifcvf_hw
*vf
= arg
;
25 if (vf
->config_cb
.callback
)
26 return vf
->config_cb
.callback(vf
->config_cb
.private);
31 static irqreturn_t
ifcvf_intr_handler(int irq
, void *arg
)
33 struct vring_info
*vring
= arg
;
35 if (vring
->cb
.callback
)
36 return vring
->cb
.callback(vring
->cb
.private);
41 static void ifcvf_free_irq_vectors(void *data
)
43 pci_free_irq_vectors(data
);
46 static void ifcvf_free_irq(struct ifcvf_adapter
*adapter
, int queues
)
48 struct pci_dev
*pdev
= adapter
->pdev
;
49 struct ifcvf_hw
*vf
= &adapter
->vf
;
53 for (i
= 0; i
< queues
; i
++)
54 devm_free_irq(&pdev
->dev
, vf
->vring
[i
].irq
, &vf
->vring
[i
]);
56 ifcvf_free_irq_vectors(pdev
);
59 static int ifcvf_request_irq(struct ifcvf_adapter
*adapter
)
61 struct pci_dev
*pdev
= adapter
->pdev
;
62 struct ifcvf_hw
*vf
= &adapter
->vf
;
63 int vector
, i
, ret
, irq
;
65 ret
= pci_alloc_irq_vectors(pdev
, IFCVF_MAX_INTR
,
66 IFCVF_MAX_INTR
, PCI_IRQ_MSIX
);
68 IFCVF_ERR(pdev
, "Failed to alloc IRQ vectors\n");
72 snprintf(vf
->config_msix_name
, 256, "ifcvf[%s]-config\n",
75 irq
= pci_irq_vector(pdev
, vector
);
76 ret
= devm_request_irq(&pdev
->dev
, irq
,
77 ifcvf_config_changed
, 0,
78 vf
->config_msix_name
, vf
);
80 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++) {
81 snprintf(vf
->vring
[i
].msix_name
, 256, "ifcvf[%s]-%d\n",
83 vector
= i
+ IFCVF_MSI_QUEUE_OFF
;
84 irq
= pci_irq_vector(pdev
, vector
);
85 ret
= devm_request_irq(&pdev
->dev
, irq
,
86 ifcvf_intr_handler
, 0,
87 vf
->vring
[i
].msix_name
,
91 "Failed to request irq for vq %d\n", i
);
92 ifcvf_free_irq(adapter
, i
);
97 vf
->vring
[i
].irq
= irq
;
103 static int ifcvf_start_datapath(void *private)
105 struct ifcvf_hw
*vf
= ifcvf_private_to_vf(private);
109 vf
->nr_vring
= IFCVF_MAX_QUEUE_PAIRS
* 2;
110 ret
= ifcvf_start_hw(vf
);
112 status
= ifcvf_get_status(vf
);
113 status
|= VIRTIO_CONFIG_S_FAILED
;
114 ifcvf_set_status(vf
, status
);
120 static int ifcvf_stop_datapath(void *private)
122 struct ifcvf_hw
*vf
= ifcvf_private_to_vf(private);
125 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++)
126 vf
->vring
[i
].cb
.callback
= NULL
;
133 static void ifcvf_reset_vring(struct ifcvf_adapter
*adapter
)
135 struct ifcvf_hw
*vf
= ifcvf_private_to_vf(adapter
);
138 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++) {
139 vf
->vring
[i
].last_avail_idx
= 0;
140 vf
->vring
[i
].desc
= 0;
141 vf
->vring
[i
].avail
= 0;
142 vf
->vring
[i
].used
= 0;
143 vf
->vring
[i
].ready
= 0;
144 vf
->vring
[i
].cb
.callback
= NULL
;
145 vf
->vring
[i
].cb
.private = NULL
;
151 static struct ifcvf_adapter
*vdpa_to_adapter(struct vdpa_device
*vdpa_dev
)
153 return container_of(vdpa_dev
, struct ifcvf_adapter
, vdpa
);
156 static struct ifcvf_hw
*vdpa_to_vf(struct vdpa_device
*vdpa_dev
)
158 struct ifcvf_adapter
*adapter
= vdpa_to_adapter(vdpa_dev
);
163 static u64
ifcvf_vdpa_get_features(struct vdpa_device
*vdpa_dev
)
165 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
168 features
= ifcvf_get_features(vf
) & IFCVF_SUPPORTED_FEATURES
;
173 static int ifcvf_vdpa_set_features(struct vdpa_device
*vdpa_dev
, u64 features
)
175 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
177 vf
->req_features
= features
;
182 static u8
ifcvf_vdpa_get_status(struct vdpa_device
*vdpa_dev
)
184 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
186 return ifcvf_get_status(vf
);
189 static void ifcvf_vdpa_set_status(struct vdpa_device
*vdpa_dev
, u8 status
)
191 struct ifcvf_adapter
*adapter
;
196 vf
= vdpa_to_vf(vdpa_dev
);
197 adapter
= dev_get_drvdata(vdpa_dev
->dev
.parent
);
198 status_old
= ifcvf_get_status(vf
);
200 if (status_old
== status
)
203 if ((status_old
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
204 !(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
205 ifcvf_stop_datapath(adapter
);
206 ifcvf_free_irq(adapter
, IFCVF_MAX_QUEUE_PAIRS
* 2);
210 ifcvf_reset_vring(adapter
);
214 if ((status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
215 !(status_old
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
216 ret
= ifcvf_request_irq(adapter
);
218 status
= ifcvf_get_status(vf
);
219 status
|= VIRTIO_CONFIG_S_FAILED
;
220 ifcvf_set_status(vf
, status
);
224 if (ifcvf_start_datapath(adapter
) < 0)
225 IFCVF_ERR(adapter
->pdev
,
226 "Failed to set ifcvf vdpa status %u\n",
230 ifcvf_set_status(vf
, status
);
233 static u16
ifcvf_vdpa_get_vq_num_max(struct vdpa_device
*vdpa_dev
)
235 return IFCVF_QUEUE_MAX
;
238 static u64
ifcvf_vdpa_get_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
)
240 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
242 return ifcvf_get_vq_state(vf
, qid
);
245 static int ifcvf_vdpa_set_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
248 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
250 return ifcvf_set_vq_state(vf
, qid
, num
);
253 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device
*vdpa_dev
, u16 qid
,
254 struct vdpa_callback
*cb
)
256 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
258 vf
->vring
[qid
].cb
= *cb
;
261 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device
*vdpa_dev
,
264 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
266 vf
->vring
[qid
].ready
= ready
;
269 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
)
271 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
273 return vf
->vring
[qid
].ready
;
276 static void ifcvf_vdpa_set_vq_num(struct vdpa_device
*vdpa_dev
, u16 qid
,
279 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
281 vf
->vring
[qid
].size
= num
;
284 static int ifcvf_vdpa_set_vq_address(struct vdpa_device
*vdpa_dev
, u16 qid
,
285 u64 desc_area
, u64 driver_area
,
288 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
290 vf
->vring
[qid
].desc
= desc_area
;
291 vf
->vring
[qid
].avail
= driver_area
;
292 vf
->vring
[qid
].used
= device_area
;
297 static void ifcvf_vdpa_kick_vq(struct vdpa_device
*vdpa_dev
, u16 qid
)
299 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
301 ifcvf_notify_queue(vf
, qid
);
304 static u32
ifcvf_vdpa_get_generation(struct vdpa_device
*vdpa_dev
)
306 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
308 return ioread8(&vf
->common_cfg
->config_generation
);
311 static u32
ifcvf_vdpa_get_device_id(struct vdpa_device
*vdpa_dev
)
313 return VIRTIO_ID_NET
;
316 static u32
ifcvf_vdpa_get_vendor_id(struct vdpa_device
*vdpa_dev
)
318 return IFCVF_SUBSYS_VENDOR_ID
;
321 static u32
ifcvf_vdpa_get_vq_align(struct vdpa_device
*vdpa_dev
)
323 return IFCVF_QUEUE_ALIGNMENT
;
326 static void ifcvf_vdpa_get_config(struct vdpa_device
*vdpa_dev
,
328 void *buf
, unsigned int len
)
330 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
332 WARN_ON(offset
+ len
> sizeof(struct virtio_net_config
));
333 ifcvf_read_net_config(vf
, offset
, buf
, len
);
336 static void ifcvf_vdpa_set_config(struct vdpa_device
*vdpa_dev
,
337 unsigned int offset
, const void *buf
,
340 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
342 WARN_ON(offset
+ len
> sizeof(struct virtio_net_config
));
343 ifcvf_write_net_config(vf
, offset
, buf
, len
);
346 static void ifcvf_vdpa_set_config_cb(struct vdpa_device
*vdpa_dev
,
347 struct vdpa_callback
*cb
)
349 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
351 vf
->config_cb
.callback
= cb
->callback
;
352 vf
->config_cb
.private = cb
->private;
356 * IFCVF currently does't have on-chip IOMMU, so not
357 * implemented set_map()/dma_map()/dma_unmap()
359 static const struct vdpa_config_ops ifc_vdpa_ops
= {
360 .get_features
= ifcvf_vdpa_get_features
,
361 .set_features
= ifcvf_vdpa_set_features
,
362 .get_status
= ifcvf_vdpa_get_status
,
363 .set_status
= ifcvf_vdpa_set_status
,
364 .get_vq_num_max
= ifcvf_vdpa_get_vq_num_max
,
365 .get_vq_state
= ifcvf_vdpa_get_vq_state
,
366 .set_vq_state
= ifcvf_vdpa_set_vq_state
,
367 .set_vq_cb
= ifcvf_vdpa_set_vq_cb
,
368 .set_vq_ready
= ifcvf_vdpa_set_vq_ready
,
369 .get_vq_ready
= ifcvf_vdpa_get_vq_ready
,
370 .set_vq_num
= ifcvf_vdpa_set_vq_num
,
371 .set_vq_address
= ifcvf_vdpa_set_vq_address
,
372 .kick_vq
= ifcvf_vdpa_kick_vq
,
373 .get_generation
= ifcvf_vdpa_get_generation
,
374 .get_device_id
= ifcvf_vdpa_get_device_id
,
375 .get_vendor_id
= ifcvf_vdpa_get_vendor_id
,
376 .get_vq_align
= ifcvf_vdpa_get_vq_align
,
377 .get_config
= ifcvf_vdpa_get_config
,
378 .set_config
= ifcvf_vdpa_set_config
,
379 .set_config_cb
= ifcvf_vdpa_set_config_cb
,
382 static int ifcvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
384 struct device
*dev
= &pdev
->dev
;
385 struct ifcvf_adapter
*adapter
;
389 ret
= pcim_enable_device(pdev
);
391 IFCVF_ERR(pdev
, "Failed to enable device\n");
395 ret
= pcim_iomap_regions(pdev
, BIT(0) | BIT(2) | BIT(4),
398 IFCVF_ERR(pdev
, "Failed to request MMIO region\n");
402 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
404 IFCVF_ERR(pdev
, "No usable DMA confiugration\n");
408 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
411 "No usable coherent DMA confiugration\n");
415 ret
= devm_add_action_or_reset(dev
, ifcvf_free_irq_vectors
, pdev
);
418 "Failed for adding devres for freeing irq vectors\n");
422 adapter
= vdpa_alloc_device(struct ifcvf_adapter
, vdpa
,
424 if (adapter
== NULL
) {
425 IFCVF_ERR(pdev
, "Failed to allocate vDPA structure");
429 pci_set_master(pdev
);
430 pci_set_drvdata(pdev
, adapter
);
433 vf
->base
= pcim_iomap_table(pdev
);
435 adapter
->pdev
= pdev
;
436 adapter
->vdpa
.dma_dev
= &pdev
->dev
;
438 ret
= ifcvf_init_hw(vf
, pdev
);
440 IFCVF_ERR(pdev
, "Failed to init IFCVF hw\n");
444 ret
= vdpa_register_device(&adapter
->vdpa
);
446 IFCVF_ERR(pdev
, "Failed to register ifcvf to vdpa bus");
453 put_device(&adapter
->vdpa
.dev
);
457 static void ifcvf_remove(struct pci_dev
*pdev
)
459 struct ifcvf_adapter
*adapter
= pci_get_drvdata(pdev
);
461 vdpa_unregister_device(&adapter
->vdpa
);
464 static struct pci_device_id ifcvf_pci_ids
[] = {
465 { PCI_DEVICE_SUB(IFCVF_VENDOR_ID
,
467 IFCVF_SUBSYS_VENDOR_ID
,
468 IFCVF_SUBSYS_DEVICE_ID
) },
471 MODULE_DEVICE_TABLE(pci
, ifcvf_pci_ids
);
473 static struct pci_driver ifcvf_driver
= {
474 .name
= IFCVF_DRIVER_NAME
,
475 .id_table
= ifcvf_pci_ids
,
476 .probe
= ifcvf_probe
,
477 .remove
= ifcvf_remove
,
480 module_pci_driver(ifcvf_driver
);
482 MODULE_LICENSE("GPL v2");
483 MODULE_VERSION(VERSION_STRING
);