1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include "ifcvf_base.h"
13 u16
ifcvf_set_vq_vector(struct ifcvf_hw
*hw
, u16 qid
, int vector
)
15 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
17 vp_iowrite16(qid
, &cfg
->queue_select
);
18 vp_iowrite16(vector
, &cfg
->queue_msix_vector
);
20 return vp_ioread16(&cfg
->queue_msix_vector
);
23 u16
ifcvf_set_config_vector(struct ifcvf_hw
*hw
, int vector
)
25 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
27 vp_iowrite16(vector
, &cfg
->msix_config
);
29 return vp_ioread16(&cfg
->msix_config
);
32 static void __iomem
*get_cap_addr(struct ifcvf_hw
*hw
,
33 struct virtio_pci_cap
*cap
)
38 length
= le32_to_cpu(cap
->length
);
39 offset
= le32_to_cpu(cap
->offset
);
42 if (bar
>= IFCVF_PCI_MAX_RESOURCE
) {
44 "Invalid bar number %u to get capabilities\n", bar
);
48 if (offset
+ length
> pci_resource_len(hw
->pdev
, bar
)) {
50 "offset(%u) + len(%u) overflows bar%u's capability\n",
55 return hw
->base
[bar
] + offset
;
58 static int ifcvf_read_config_range(struct pci_dev
*dev
,
59 uint32_t *val
, int size
, int where
)
63 for (i
= 0; i
< size
; i
+= 4) {
64 ret
= pci_read_config_dword(dev
, where
+ i
, val
+ i
/ 4);
72 u16
ifcvf_get_vq_size(struct ifcvf_hw
*hw
, u16 qid
)
76 if (qid
>= hw
->nr_vring
)
79 vp_iowrite16(qid
, &hw
->common_cfg
->queue_select
);
80 queue_size
= vp_ioread16(&hw
->common_cfg
->queue_size
);
85 u16
ifcvf_get_max_vq_size(struct ifcvf_hw
*hw
)
87 u16 queue_size
, max_size
, qid
;
89 max_size
= ifcvf_get_vq_size(hw
, 0);
90 for (qid
= 1; qid
< hw
->nr_vring
; qid
++) {
91 queue_size
= ifcvf_get_vq_size(hw
, qid
);
92 /* 0 means the queue is unavailable */
96 max_size
= max(queue_size
, max_size
);
102 int ifcvf_init_hw(struct ifcvf_hw
*hw
, struct pci_dev
*pdev
)
104 struct virtio_pci_cap cap
;
110 ret
= pci_read_config_byte(pdev
, PCI_CAPABILITY_LIST
, &pos
);
112 IFCVF_ERR(pdev
, "Failed to read PCI capability list\n");
118 ret
= ifcvf_read_config_range(pdev
, (u32
*)&cap
,
122 "Failed to get PCI capability at %x\n", pos
);
126 if (cap
.cap_vndr
!= PCI_CAP_ID_VNDR
)
129 switch (cap
.cfg_type
) {
130 case VIRTIO_PCI_CAP_COMMON_CFG
:
131 hw
->common_cfg
= get_cap_addr(hw
, &cap
);
132 IFCVF_DBG(pdev
, "hw->common_cfg = %p\n",
135 case VIRTIO_PCI_CAP_NOTIFY_CFG
:
136 pci_read_config_dword(pdev
, pos
+ sizeof(cap
),
137 &hw
->notify_off_multiplier
);
138 hw
->notify_bar
= cap
.bar
;
139 hw
->notify_base
= get_cap_addr(hw
, &cap
);
140 hw
->notify_base_pa
= pci_resource_start(pdev
, cap
.bar
) +
141 le32_to_cpu(cap
.offset
);
142 IFCVF_DBG(pdev
, "hw->notify_base = %p\n",
145 case VIRTIO_PCI_CAP_ISR_CFG
:
146 hw
->isr
= get_cap_addr(hw
, &cap
);
147 IFCVF_DBG(pdev
, "hw->isr = %p\n", hw
->isr
);
149 case VIRTIO_PCI_CAP_DEVICE_CFG
:
150 hw
->dev_cfg
= get_cap_addr(hw
, &cap
);
151 hw
->cap_dev_config_size
= le32_to_cpu(cap
.length
);
152 IFCVF_DBG(pdev
, "hw->dev_cfg = %p\n", hw
->dev_cfg
);
160 if (hw
->common_cfg
== NULL
|| hw
->notify_base
== NULL
||
161 hw
->isr
== NULL
|| hw
->dev_cfg
== NULL
) {
162 IFCVF_ERR(pdev
, "Incomplete PCI capabilities\n");
166 hw
->nr_vring
= vp_ioread16(&hw
->common_cfg
->num_queues
);
167 hw
->vring
= kzalloc(sizeof(struct vring_info
) * hw
->nr_vring
, GFP_KERNEL
);
171 for (i
= 0; i
< hw
->nr_vring
; i
++) {
172 vp_iowrite16(i
, &hw
->common_cfg
->queue_select
);
173 notify_off
= vp_ioread16(&hw
->common_cfg
->queue_notify_off
);
174 hw
->vring
[i
].notify_addr
= hw
->notify_base
+
175 notify_off
* hw
->notify_off_multiplier
;
176 hw
->vring
[i
].notify_pa
= hw
->notify_base_pa
+
177 notify_off
* hw
->notify_off_multiplier
;
178 hw
->vring
[i
].irq
= -EINVAL
;
181 hw
->lm_cfg
= hw
->base
[IFCVF_LM_BAR
];
184 "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
185 hw
->common_cfg
, hw
->notify_base
, hw
->isr
,
186 hw
->dev_cfg
, hw
->notify_off_multiplier
);
188 hw
->vqs_reused_irq
= -EINVAL
;
189 hw
->config_irq
= -EINVAL
;
194 u8
ifcvf_get_status(struct ifcvf_hw
*hw
)
196 return vp_ioread8(&hw
->common_cfg
->device_status
);
199 void ifcvf_set_status(struct ifcvf_hw
*hw
, u8 status
)
201 vp_iowrite8(status
, &hw
->common_cfg
->device_status
);
204 void ifcvf_reset(struct ifcvf_hw
*hw
)
206 ifcvf_set_status(hw
, 0);
207 while (ifcvf_get_status(hw
))
211 u64
ifcvf_get_hw_features(struct ifcvf_hw
*hw
)
213 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
214 u32 features_lo
, features_hi
;
217 vp_iowrite32(0, &cfg
->device_feature_select
);
218 features_lo
= vp_ioread32(&cfg
->device_feature
);
220 vp_iowrite32(1, &cfg
->device_feature_select
);
221 features_hi
= vp_ioread32(&cfg
->device_feature
);
223 features
= ((u64
)features_hi
<< 32) | features_lo
;
228 /* return provisioned vDPA dev features */
229 u64
ifcvf_get_dev_features(struct ifcvf_hw
*hw
)
231 return hw
->dev_features
;
234 u64
ifcvf_get_driver_features(struct ifcvf_hw
*hw
)
236 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
237 u32 features_lo
, features_hi
;
240 vp_iowrite32(0, &cfg
->device_feature_select
);
241 features_lo
= vp_ioread32(&cfg
->guest_feature
);
243 vp_iowrite32(1, &cfg
->device_feature_select
);
244 features_hi
= vp_ioread32(&cfg
->guest_feature
);
246 features
= ((u64
)features_hi
<< 32) | features_lo
;
251 int ifcvf_verify_min_features(struct ifcvf_hw
*hw
, u64 features
)
253 if (!(features
& BIT_ULL(VIRTIO_F_ACCESS_PLATFORM
)) && features
) {
254 IFCVF_ERR(hw
->pdev
, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
261 u32
ifcvf_get_config_size(struct ifcvf_hw
*hw
)
263 u32 net_config_size
= sizeof(struct virtio_net_config
);
264 u32 blk_config_size
= sizeof(struct virtio_blk_config
);
265 u32 cap_size
= hw
->cap_dev_config_size
;
268 /* If the onboard device config space size is greater than
269 * the size of struct virtio_net/blk_config, only the spec
270 * implementing contents size is returned, this is very
271 * unlikely, defensive programming.
273 switch (hw
->dev_type
) {
275 config_size
= min(cap_size
, net_config_size
);
277 case VIRTIO_ID_BLOCK
:
278 config_size
= min(cap_size
, blk_config_size
);
282 IFCVF_ERR(hw
->pdev
, "VIRTIO ID %u not supported\n", hw
->dev_type
);
288 void ifcvf_read_dev_config(struct ifcvf_hw
*hw
, u64 offset
,
289 void *dst
, int length
)
291 u8 old_gen
, new_gen
, *p
;
294 WARN_ON(offset
+ length
> hw
->config_size
);
296 old_gen
= vp_ioread8(&hw
->common_cfg
->config_generation
);
298 for (i
= 0; i
< length
; i
++)
299 *p
++ = vp_ioread8(hw
->dev_cfg
+ offset
+ i
);
301 new_gen
= vp_ioread8(&hw
->common_cfg
->config_generation
);
302 } while (old_gen
!= new_gen
);
305 void ifcvf_write_dev_config(struct ifcvf_hw
*hw
, u64 offset
,
306 const void *src
, int length
)
312 WARN_ON(offset
+ length
> hw
->config_size
);
313 for (i
= 0; i
< length
; i
++)
314 vp_iowrite8(*p
++, hw
->dev_cfg
+ offset
+ i
);
317 void ifcvf_set_driver_features(struct ifcvf_hw
*hw
, u64 features
)
319 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
321 vp_iowrite32(0, &cfg
->guest_feature_select
);
322 vp_iowrite32((u32
)features
, &cfg
->guest_feature
);
324 vp_iowrite32(1, &cfg
->guest_feature_select
);
325 vp_iowrite32(features
>> 32, &cfg
->guest_feature
);
328 u16
ifcvf_get_vq_state(struct ifcvf_hw
*hw
, u16 qid
)
330 struct ifcvf_lm_cfg __iomem
*lm_cfg
= hw
->lm_cfg
;
333 last_avail_idx
= vp_ioread16(&lm_cfg
->vq_state_region
+ qid
* 2);
335 return last_avail_idx
;
338 int ifcvf_set_vq_state(struct ifcvf_hw
*hw
, u16 qid
, u16 num
)
340 struct ifcvf_lm_cfg __iomem
*lm_cfg
= hw
->lm_cfg
;
342 vp_iowrite16(num
, &lm_cfg
->vq_state_region
+ qid
* 2);
347 void ifcvf_set_vq_num(struct ifcvf_hw
*hw
, u16 qid
, u32 num
)
349 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
351 vp_iowrite16(qid
, &cfg
->queue_select
);
352 vp_iowrite16(num
, &cfg
->queue_size
);
355 int ifcvf_set_vq_address(struct ifcvf_hw
*hw
, u16 qid
, u64 desc_area
,
356 u64 driver_area
, u64 device_area
)
358 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
360 vp_iowrite16(qid
, &cfg
->queue_select
);
361 vp_iowrite64_twopart(desc_area
, &cfg
->queue_desc_lo
,
362 &cfg
->queue_desc_hi
);
363 vp_iowrite64_twopart(driver_area
, &cfg
->queue_avail_lo
,
364 &cfg
->queue_avail_hi
);
365 vp_iowrite64_twopart(device_area
, &cfg
->queue_used_lo
,
366 &cfg
->queue_used_hi
);
371 bool ifcvf_get_vq_ready(struct ifcvf_hw
*hw
, u16 qid
)
373 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
376 vp_iowrite16(qid
, &cfg
->queue_select
);
377 queue_enable
= vp_ioread16(&cfg
->queue_enable
);
379 return (bool)queue_enable
;
382 void ifcvf_set_vq_ready(struct ifcvf_hw
*hw
, u16 qid
, bool ready
)
384 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
386 vp_iowrite16(qid
, &cfg
->queue_select
);
387 vp_iowrite16(ready
, &cfg
->queue_enable
);
390 static void ifcvf_reset_vring(struct ifcvf_hw
*hw
)
394 for (qid
= 0; qid
< hw
->nr_vring
; qid
++) {
395 hw
->vring
[qid
].cb
.callback
= NULL
;
396 hw
->vring
[qid
].cb
.private = NULL
;
397 ifcvf_set_vq_vector(hw
, qid
, VIRTIO_MSI_NO_VECTOR
);
401 static void ifcvf_reset_config_handler(struct ifcvf_hw
*hw
)
403 hw
->config_cb
.callback
= NULL
;
404 hw
->config_cb
.private = NULL
;
405 ifcvf_set_config_vector(hw
, VIRTIO_MSI_NO_VECTOR
);
408 static void ifcvf_synchronize_irq(struct ifcvf_hw
*hw
)
410 u32 nvectors
= hw
->num_msix_vectors
;
411 struct pci_dev
*pdev
= hw
->pdev
;
414 for (i
= 0; i
< nvectors
; i
++) {
415 irq
= pci_irq_vector(pdev
, i
);
417 synchronize_irq(irq
);
421 void ifcvf_stop(struct ifcvf_hw
*hw
)
423 ifcvf_synchronize_irq(hw
);
424 ifcvf_reset_vring(hw
);
425 ifcvf_reset_config_handler(hw
);
428 void ifcvf_notify_queue(struct ifcvf_hw
*hw
, u16 qid
)
430 vp_iowrite16(qid
, hw
->vring
[qid
].notify_addr
);