1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
17 #define VERSION_STRING "0.1"
18 #define DRIVER_AUTHOR "Intel Corporation"
19 #define IFCVF_DRIVER_NAME "ifcvf"
21 static irqreturn_t
ifcvf_config_changed(int irq
, void *arg
)
23 struct ifcvf_hw
*vf
= arg
;
25 if (vf
->config_cb
.callback
)
26 return vf
->config_cb
.callback(vf
->config_cb
.private);
31 static irqreturn_t
ifcvf_intr_handler(int irq
, void *arg
)
33 struct vring_info
*vring
= arg
;
35 if (vring
->cb
.callback
)
36 return vring
->cb
.callback(vring
->cb
.private);
41 static void ifcvf_free_irq_vectors(void *data
)
43 pci_free_irq_vectors(data
);
46 static void ifcvf_free_irq(struct ifcvf_adapter
*adapter
, int queues
)
48 struct pci_dev
*pdev
= adapter
->pdev
;
49 struct ifcvf_hw
*vf
= &adapter
->vf
;
53 for (i
= 0; i
< queues
; i
++) {
54 devm_free_irq(&pdev
->dev
, vf
->vring
[i
].irq
, &vf
->vring
[i
]);
55 vf
->vring
[i
].irq
= -EINVAL
;
58 devm_free_irq(&pdev
->dev
, vf
->config_irq
, vf
);
59 ifcvf_free_irq_vectors(pdev
);
62 static int ifcvf_request_irq(struct ifcvf_adapter
*adapter
)
64 struct pci_dev
*pdev
= adapter
->pdev
;
65 struct ifcvf_hw
*vf
= &adapter
->vf
;
66 int vector
, i
, ret
, irq
;
68 ret
= pci_alloc_irq_vectors(pdev
, IFCVF_MAX_INTR
,
69 IFCVF_MAX_INTR
, PCI_IRQ_MSIX
);
71 IFCVF_ERR(pdev
, "Failed to alloc IRQ vectors\n");
75 snprintf(vf
->config_msix_name
, 256, "ifcvf[%s]-config\n",
78 vf
->config_irq
= pci_irq_vector(pdev
, vector
);
79 ret
= devm_request_irq(&pdev
->dev
, vf
->config_irq
,
80 ifcvf_config_changed
, 0,
81 vf
->config_msix_name
, vf
);
83 IFCVF_ERR(pdev
, "Failed to request config irq\n");
87 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++) {
88 snprintf(vf
->vring
[i
].msix_name
, 256, "ifcvf[%s]-%d\n",
90 vector
= i
+ IFCVF_MSI_QUEUE_OFF
;
91 irq
= pci_irq_vector(pdev
, vector
);
92 ret
= devm_request_irq(&pdev
->dev
, irq
,
93 ifcvf_intr_handler
, 0,
94 vf
->vring
[i
].msix_name
,
98 "Failed to request irq for vq %d\n", i
);
99 ifcvf_free_irq(adapter
, i
);
104 vf
->vring
[i
].irq
= irq
;
110 static int ifcvf_start_datapath(void *private)
112 struct ifcvf_hw
*vf
= ifcvf_private_to_vf(private);
116 vf
->nr_vring
= IFCVF_MAX_QUEUE_PAIRS
* 2;
117 ret
= ifcvf_start_hw(vf
);
119 status
= ifcvf_get_status(vf
);
120 status
|= VIRTIO_CONFIG_S_FAILED
;
121 ifcvf_set_status(vf
, status
);
127 static int ifcvf_stop_datapath(void *private)
129 struct ifcvf_hw
*vf
= ifcvf_private_to_vf(private);
132 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++)
133 vf
->vring
[i
].cb
.callback
= NULL
;
140 static void ifcvf_reset_vring(struct ifcvf_adapter
*adapter
)
142 struct ifcvf_hw
*vf
= ifcvf_private_to_vf(adapter
);
145 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++) {
146 vf
->vring
[i
].last_avail_idx
= 0;
147 vf
->vring
[i
].desc
= 0;
148 vf
->vring
[i
].avail
= 0;
149 vf
->vring
[i
].used
= 0;
150 vf
->vring
[i
].ready
= 0;
151 vf
->vring
[i
].cb
.callback
= NULL
;
152 vf
->vring
[i
].cb
.private = NULL
;
158 static struct ifcvf_adapter
*vdpa_to_adapter(struct vdpa_device
*vdpa_dev
)
160 return container_of(vdpa_dev
, struct ifcvf_adapter
, vdpa
);
163 static struct ifcvf_hw
*vdpa_to_vf(struct vdpa_device
*vdpa_dev
)
165 struct ifcvf_adapter
*adapter
= vdpa_to_adapter(vdpa_dev
);
170 static u64
ifcvf_vdpa_get_features(struct vdpa_device
*vdpa_dev
)
172 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
175 features
= ifcvf_get_features(vf
) & IFCVF_SUPPORTED_FEATURES
;
180 static int ifcvf_vdpa_set_features(struct vdpa_device
*vdpa_dev
, u64 features
)
182 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
184 vf
->req_features
= features
;
189 static u8
ifcvf_vdpa_get_status(struct vdpa_device
*vdpa_dev
)
191 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
193 return ifcvf_get_status(vf
);
196 static void ifcvf_vdpa_set_status(struct vdpa_device
*vdpa_dev
, u8 status
)
198 struct ifcvf_adapter
*adapter
;
203 vf
= vdpa_to_vf(vdpa_dev
);
204 adapter
= dev_get_drvdata(vdpa_dev
->dev
.parent
);
205 status_old
= ifcvf_get_status(vf
);
207 if (status_old
== status
)
210 if ((status_old
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
211 !(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
212 ifcvf_stop_datapath(adapter
);
213 ifcvf_free_irq(adapter
, IFCVF_MAX_QUEUE_PAIRS
* 2);
217 ifcvf_reset_vring(adapter
);
221 if ((status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
222 !(status_old
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
223 ret
= ifcvf_request_irq(adapter
);
225 status
= ifcvf_get_status(vf
);
226 status
|= VIRTIO_CONFIG_S_FAILED
;
227 ifcvf_set_status(vf
, status
);
231 if (ifcvf_start_datapath(adapter
) < 0)
232 IFCVF_ERR(adapter
->pdev
,
233 "Failed to set ifcvf vdpa status %u\n",
237 ifcvf_set_status(vf
, status
);
240 static u16
ifcvf_vdpa_get_vq_num_max(struct vdpa_device
*vdpa_dev
)
242 return IFCVF_QUEUE_MAX
;
245 static int ifcvf_vdpa_get_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
246 struct vdpa_vq_state
*state
)
248 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
250 state
->avail_index
= ifcvf_get_vq_state(vf
, qid
);
254 static int ifcvf_vdpa_set_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
255 const struct vdpa_vq_state
*state
)
257 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
259 return ifcvf_set_vq_state(vf
, qid
, state
->avail_index
);
262 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device
*vdpa_dev
, u16 qid
,
263 struct vdpa_callback
*cb
)
265 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
267 vf
->vring
[qid
].cb
= *cb
;
270 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device
*vdpa_dev
,
273 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
275 vf
->vring
[qid
].ready
= ready
;
278 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
)
280 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
282 return vf
->vring
[qid
].ready
;
285 static void ifcvf_vdpa_set_vq_num(struct vdpa_device
*vdpa_dev
, u16 qid
,
288 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
290 vf
->vring
[qid
].size
= num
;
293 static int ifcvf_vdpa_set_vq_address(struct vdpa_device
*vdpa_dev
, u16 qid
,
294 u64 desc_area
, u64 driver_area
,
297 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
299 vf
->vring
[qid
].desc
= desc_area
;
300 vf
->vring
[qid
].avail
= driver_area
;
301 vf
->vring
[qid
].used
= device_area
;
306 static void ifcvf_vdpa_kick_vq(struct vdpa_device
*vdpa_dev
, u16 qid
)
308 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
310 ifcvf_notify_queue(vf
, qid
);
313 static u32
ifcvf_vdpa_get_generation(struct vdpa_device
*vdpa_dev
)
315 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
317 return ioread8(&vf
->common_cfg
->config_generation
);
320 static u32
ifcvf_vdpa_get_device_id(struct vdpa_device
*vdpa_dev
)
322 return VIRTIO_ID_NET
;
325 static u32
ifcvf_vdpa_get_vendor_id(struct vdpa_device
*vdpa_dev
)
327 return IFCVF_SUBSYS_VENDOR_ID
;
330 static u32
ifcvf_vdpa_get_vq_align(struct vdpa_device
*vdpa_dev
)
332 return IFCVF_QUEUE_ALIGNMENT
;
335 static void ifcvf_vdpa_get_config(struct vdpa_device
*vdpa_dev
,
337 void *buf
, unsigned int len
)
339 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
341 WARN_ON(offset
+ len
> sizeof(struct virtio_net_config
));
342 ifcvf_read_net_config(vf
, offset
, buf
, len
);
345 static void ifcvf_vdpa_set_config(struct vdpa_device
*vdpa_dev
,
346 unsigned int offset
, const void *buf
,
349 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
351 WARN_ON(offset
+ len
> sizeof(struct virtio_net_config
));
352 ifcvf_write_net_config(vf
, offset
, buf
, len
);
355 static void ifcvf_vdpa_set_config_cb(struct vdpa_device
*vdpa_dev
,
356 struct vdpa_callback
*cb
)
358 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
360 vf
->config_cb
.callback
= cb
->callback
;
361 vf
->config_cb
.private = cb
->private;
364 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device
*vdpa_dev
,
367 struct ifcvf_hw
*vf
= vdpa_to_vf(vdpa_dev
);
369 return vf
->vring
[qid
].irq
;
373 * IFCVF currently does't have on-chip IOMMU, so not
374 * implemented set_map()/dma_map()/dma_unmap()
376 static const struct vdpa_config_ops ifc_vdpa_ops
= {
377 .get_features
= ifcvf_vdpa_get_features
,
378 .set_features
= ifcvf_vdpa_set_features
,
379 .get_status
= ifcvf_vdpa_get_status
,
380 .set_status
= ifcvf_vdpa_set_status
,
381 .get_vq_num_max
= ifcvf_vdpa_get_vq_num_max
,
382 .get_vq_state
= ifcvf_vdpa_get_vq_state
,
383 .set_vq_state
= ifcvf_vdpa_set_vq_state
,
384 .set_vq_cb
= ifcvf_vdpa_set_vq_cb
,
385 .set_vq_ready
= ifcvf_vdpa_set_vq_ready
,
386 .get_vq_ready
= ifcvf_vdpa_get_vq_ready
,
387 .set_vq_num
= ifcvf_vdpa_set_vq_num
,
388 .set_vq_address
= ifcvf_vdpa_set_vq_address
,
389 .get_vq_irq
= ifcvf_vdpa_get_vq_irq
,
390 .kick_vq
= ifcvf_vdpa_kick_vq
,
391 .get_generation
= ifcvf_vdpa_get_generation
,
392 .get_device_id
= ifcvf_vdpa_get_device_id
,
393 .get_vendor_id
= ifcvf_vdpa_get_vendor_id
,
394 .get_vq_align
= ifcvf_vdpa_get_vq_align
,
395 .get_config
= ifcvf_vdpa_get_config
,
396 .set_config
= ifcvf_vdpa_set_config
,
397 .set_config_cb
= ifcvf_vdpa_set_config_cb
,
400 static int ifcvf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
402 struct device
*dev
= &pdev
->dev
;
403 struct ifcvf_adapter
*adapter
;
407 ret
= pcim_enable_device(pdev
);
409 IFCVF_ERR(pdev
, "Failed to enable device\n");
413 ret
= pcim_iomap_regions(pdev
, BIT(0) | BIT(2) | BIT(4),
416 IFCVF_ERR(pdev
, "Failed to request MMIO region\n");
420 ret
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
422 IFCVF_ERR(pdev
, "No usable DMA configuration\n");
426 ret
= devm_add_action_or_reset(dev
, ifcvf_free_irq_vectors
, pdev
);
429 "Failed for adding devres for freeing irq vectors\n");
433 adapter
= vdpa_alloc_device(struct ifcvf_adapter
, vdpa
,
435 IFCVF_MAX_QUEUE_PAIRS
* 2);
436 if (adapter
== NULL
) {
437 IFCVF_ERR(pdev
, "Failed to allocate vDPA structure");
441 pci_set_master(pdev
);
442 pci_set_drvdata(pdev
, adapter
);
445 vf
->base
= pcim_iomap_table(pdev
);
447 adapter
->pdev
= pdev
;
448 adapter
->vdpa
.dma_dev
= &pdev
->dev
;
450 ret
= ifcvf_init_hw(vf
, pdev
);
452 IFCVF_ERR(pdev
, "Failed to init IFCVF hw\n");
456 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++)
457 vf
->vring
[i
].irq
= -EINVAL
;
459 ret
= vdpa_register_device(&adapter
->vdpa
);
461 IFCVF_ERR(pdev
, "Failed to register ifcvf to vdpa bus");
468 put_device(&adapter
->vdpa
.dev
);
472 static void ifcvf_remove(struct pci_dev
*pdev
)
474 struct ifcvf_adapter
*adapter
= pci_get_drvdata(pdev
);
476 vdpa_unregister_device(&adapter
->vdpa
);
479 static struct pci_device_id ifcvf_pci_ids
[] = {
480 { PCI_DEVICE_SUB(IFCVF_VENDOR_ID
,
482 IFCVF_SUBSYS_VENDOR_ID
,
483 IFCVF_SUBSYS_DEVICE_ID
) },
486 MODULE_DEVICE_TABLE(pci
, ifcvf_pci_ids
);
488 static struct pci_driver ifcvf_driver
= {
489 .name
= IFCVF_DRIVER_NAME
,
490 .id_table
= ifcvf_pci_ids
,
491 .probe
= ifcvf_probe
,
492 .remove
= ifcvf_remove
,
495 module_pci_driver(ifcvf_driver
);
497 MODULE_LICENSE("GPL v2");
498 MODULE_VERSION(VERSION_STRING
);