1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include "ifcvf_base.h"
13 static inline u8
ifc_ioread8(u8 __iomem
*addr
)
17 static inline u16
ifc_ioread16 (__le16 __iomem
*addr
)
19 return ioread16(addr
);
22 static inline u32
ifc_ioread32(__le32 __iomem
*addr
)
24 return ioread32(addr
);
27 static inline void ifc_iowrite8(u8 value
, u8 __iomem
*addr
)
29 iowrite8(value
, addr
);
32 static inline void ifc_iowrite16(u16 value
, __le16 __iomem
*addr
)
34 iowrite16(value
, addr
);
37 static inline void ifc_iowrite32(u32 value
, __le32 __iomem
*addr
)
39 iowrite32(value
, addr
);
42 static void ifc_iowrite64_twopart(u64 val
,
43 __le32 __iomem
*lo
, __le32 __iomem
*hi
)
45 ifc_iowrite32((u32
)val
, lo
);
46 ifc_iowrite32(val
>> 32, hi
);
49 struct ifcvf_adapter
*vf_to_adapter(struct ifcvf_hw
*hw
)
51 return container_of(hw
, struct ifcvf_adapter
, vf
);
54 static void __iomem
*get_cap_addr(struct ifcvf_hw
*hw
,
55 struct virtio_pci_cap
*cap
)
57 struct ifcvf_adapter
*ifcvf
;
62 length
= le32_to_cpu(cap
->length
);
63 offset
= le32_to_cpu(cap
->offset
);
66 ifcvf
= vf_to_adapter(hw
);
69 if (bar
>= IFCVF_PCI_MAX_RESOURCE
) {
71 "Invalid bar number %u to get capabilities\n", bar
);
75 if (offset
+ length
> pci_resource_len(pdev
, bar
)) {
77 "offset(%u) + len(%u) overflows bar%u's capability\n",
82 return hw
->base
[bar
] + offset
;
85 static int ifcvf_read_config_range(struct pci_dev
*dev
,
86 uint32_t *val
, int size
, int where
)
90 for (i
= 0; i
< size
; i
+= 4) {
91 ret
= pci_read_config_dword(dev
, where
+ i
, val
+ i
/ 4);
99 int ifcvf_init_hw(struct ifcvf_hw
*hw
, struct pci_dev
*pdev
)
101 struct virtio_pci_cap cap
;
107 ret
= pci_read_config_byte(pdev
, PCI_CAPABILITY_LIST
, &pos
);
109 IFCVF_ERR(pdev
, "Failed to read PCI capability list\n");
114 ret
= ifcvf_read_config_range(pdev
, (u32
*)&cap
,
118 "Failed to get PCI capability at %x\n", pos
);
122 if (cap
.cap_vndr
!= PCI_CAP_ID_VNDR
)
125 switch (cap
.cfg_type
) {
126 case VIRTIO_PCI_CAP_COMMON_CFG
:
127 hw
->common_cfg
= get_cap_addr(hw
, &cap
);
128 IFCVF_DBG(pdev
, "hw->common_cfg = %p\n",
131 case VIRTIO_PCI_CAP_NOTIFY_CFG
:
132 pci_read_config_dword(pdev
, pos
+ sizeof(cap
),
133 &hw
->notify_off_multiplier
);
134 hw
->notify_bar
= cap
.bar
;
135 hw
->notify_base
= get_cap_addr(hw
, &cap
);
136 IFCVF_DBG(pdev
, "hw->notify_base = %p\n",
139 case VIRTIO_PCI_CAP_ISR_CFG
:
140 hw
->isr
= get_cap_addr(hw
, &cap
);
141 IFCVF_DBG(pdev
, "hw->isr = %p\n", hw
->isr
);
143 case VIRTIO_PCI_CAP_DEVICE_CFG
:
144 hw
->net_cfg
= get_cap_addr(hw
, &cap
);
145 IFCVF_DBG(pdev
, "hw->net_cfg = %p\n", hw
->net_cfg
);
153 if (hw
->common_cfg
== NULL
|| hw
->notify_base
== NULL
||
154 hw
->isr
== NULL
|| hw
->net_cfg
== NULL
) {
155 IFCVF_ERR(pdev
, "Incomplete PCI capabilities\n");
159 for (i
= 0; i
< IFCVF_MAX_QUEUE_PAIRS
* 2; i
++) {
160 ifc_iowrite16(i
, &hw
->common_cfg
->queue_select
);
161 notify_off
= ifc_ioread16(&hw
->common_cfg
->queue_notify_off
);
162 hw
->vring
[i
].notify_addr
= hw
->notify_base
+
163 notify_off
* hw
->notify_off_multiplier
;
166 hw
->lm_cfg
= hw
->base
[IFCVF_LM_BAR
];
169 "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
170 hw
->common_cfg
, hw
->notify_base
, hw
->isr
,
171 hw
->net_cfg
, hw
->notify_off_multiplier
);
176 u8
ifcvf_get_status(struct ifcvf_hw
*hw
)
178 return ifc_ioread8(&hw
->common_cfg
->device_status
);
181 void ifcvf_set_status(struct ifcvf_hw
*hw
, u8 status
)
183 ifc_iowrite8(status
, &hw
->common_cfg
->device_status
);
186 void ifcvf_reset(struct ifcvf_hw
*hw
)
188 hw
->config_cb
.callback
= NULL
;
189 hw
->config_cb
.private = NULL
;
191 ifcvf_set_status(hw
, 0);
192 /* flush set_status, make sure VF is stopped, reset */
193 ifcvf_get_status(hw
);
196 static void ifcvf_add_status(struct ifcvf_hw
*hw
, u8 status
)
199 status
|= ifcvf_get_status(hw
);
201 ifcvf_set_status(hw
, status
);
202 ifcvf_get_status(hw
);
205 u64
ifcvf_get_features(struct ifcvf_hw
*hw
)
207 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
208 u32 features_lo
, features_hi
;
210 ifc_iowrite32(0, &cfg
->device_feature_select
);
211 features_lo
= ifc_ioread32(&cfg
->device_feature
);
213 ifc_iowrite32(1, &cfg
->device_feature_select
);
214 features_hi
= ifc_ioread32(&cfg
->device_feature
);
216 return ((u64
)features_hi
<< 32) | features_lo
;
219 void ifcvf_read_net_config(struct ifcvf_hw
*hw
, u64 offset
,
220 void *dst
, int length
)
222 u8 old_gen
, new_gen
, *p
;
225 WARN_ON(offset
+ length
> sizeof(struct virtio_net_config
));
227 old_gen
= ifc_ioread8(&hw
->common_cfg
->config_generation
);
229 for (i
= 0; i
< length
; i
++)
230 *p
++ = ifc_ioread8(hw
->net_cfg
+ offset
+ i
);
232 new_gen
= ifc_ioread8(&hw
->common_cfg
->config_generation
);
233 } while (old_gen
!= new_gen
);
236 void ifcvf_write_net_config(struct ifcvf_hw
*hw
, u64 offset
,
237 const void *src
, int length
)
243 WARN_ON(offset
+ length
> sizeof(struct virtio_net_config
));
244 for (i
= 0; i
< length
; i
++)
245 ifc_iowrite8(*p
++, hw
->net_cfg
+ offset
+ i
);
248 static void ifcvf_set_features(struct ifcvf_hw
*hw
, u64 features
)
250 struct virtio_pci_common_cfg __iomem
*cfg
= hw
->common_cfg
;
252 ifc_iowrite32(0, &cfg
->guest_feature_select
);
253 ifc_iowrite32((u32
)features
, &cfg
->guest_feature
);
255 ifc_iowrite32(1, &cfg
->guest_feature_select
);
256 ifc_iowrite32(features
>> 32, &cfg
->guest_feature
);
259 static int ifcvf_config_features(struct ifcvf_hw
*hw
)
261 struct ifcvf_adapter
*ifcvf
;
263 ifcvf
= vf_to_adapter(hw
);
264 ifcvf_set_features(hw
, hw
->req_features
);
265 ifcvf_add_status(hw
, VIRTIO_CONFIG_S_FEATURES_OK
);
267 if (!(ifcvf_get_status(hw
) & VIRTIO_CONFIG_S_FEATURES_OK
)) {
268 IFCVF_ERR(ifcvf
->pdev
, "Failed to set FEATURES_OK status\n");
275 u16
ifcvf_get_vq_state(struct ifcvf_hw
*hw
, u16 qid
)
277 struct ifcvf_lm_cfg __iomem
*ifcvf_lm
;
278 void __iomem
*avail_idx_addr
;
282 ifcvf_lm
= (struct ifcvf_lm_cfg __iomem
*)hw
->lm_cfg
;
283 q_pair_id
= qid
/ (IFCVF_MAX_QUEUE_PAIRS
* 2);
284 avail_idx_addr
= &ifcvf_lm
->vring_lm_cfg
[q_pair_id
].idx_addr
[qid
% 2];
285 last_avail_idx
= ifc_ioread16(avail_idx_addr
);
287 return last_avail_idx
;
290 int ifcvf_set_vq_state(struct ifcvf_hw
*hw
, u16 qid
, u16 num
)
292 struct ifcvf_lm_cfg __iomem
*ifcvf_lm
;
293 void __iomem
*avail_idx_addr
;
296 ifcvf_lm
= (struct ifcvf_lm_cfg __iomem
*)hw
->lm_cfg
;
297 q_pair_id
= qid
/ (IFCVF_MAX_QUEUE_PAIRS
* 2);
298 avail_idx_addr
= &ifcvf_lm
->vring_lm_cfg
[q_pair_id
].idx_addr
[qid
% 2];
299 hw
->vring
[qid
].last_avail_idx
= num
;
300 ifc_iowrite16(num
, avail_idx_addr
);
305 static int ifcvf_hw_enable(struct ifcvf_hw
*hw
)
307 struct virtio_pci_common_cfg __iomem
*cfg
;
308 struct ifcvf_adapter
*ifcvf
;
311 ifcvf
= vf_to_adapter(hw
);
312 cfg
= hw
->common_cfg
;
313 ifc_iowrite16(IFCVF_MSI_CONFIG_OFF
, &cfg
->msix_config
);
315 if (ifc_ioread16(&cfg
->msix_config
) == VIRTIO_MSI_NO_VECTOR
) {
316 IFCVF_ERR(ifcvf
->pdev
, "No msix vector for device config\n");
320 for (i
= 0; i
< hw
->nr_vring
; i
++) {
321 if (!hw
->vring
[i
].ready
)
324 ifc_iowrite16(i
, &cfg
->queue_select
);
325 ifc_iowrite64_twopart(hw
->vring
[i
].desc
, &cfg
->queue_desc_lo
,
326 &cfg
->queue_desc_hi
);
327 ifc_iowrite64_twopart(hw
->vring
[i
].avail
, &cfg
->queue_avail_lo
,
328 &cfg
->queue_avail_hi
);
329 ifc_iowrite64_twopart(hw
->vring
[i
].used
, &cfg
->queue_used_lo
,
330 &cfg
->queue_used_hi
);
331 ifc_iowrite16(hw
->vring
[i
].size
, &cfg
->queue_size
);
332 ifc_iowrite16(i
+ IFCVF_MSI_QUEUE_OFF
, &cfg
->queue_msix_vector
);
334 if (ifc_ioread16(&cfg
->queue_msix_vector
) ==
335 VIRTIO_MSI_NO_VECTOR
) {
336 IFCVF_ERR(ifcvf
->pdev
,
337 "No msix vector for queue %u\n", i
);
341 ifcvf_set_vq_state(hw
, i
, hw
->vring
[i
].last_avail_idx
);
342 ifc_iowrite16(1, &cfg
->queue_enable
);
348 static void ifcvf_hw_disable(struct ifcvf_hw
*hw
)
350 struct virtio_pci_common_cfg __iomem
*cfg
;
353 cfg
= hw
->common_cfg
;
354 ifc_iowrite16(VIRTIO_MSI_NO_VECTOR
, &cfg
->msix_config
);
356 for (i
= 0; i
< hw
->nr_vring
; i
++) {
357 ifc_iowrite16(i
, &cfg
->queue_select
);
358 ifc_iowrite16(VIRTIO_MSI_NO_VECTOR
, &cfg
->queue_msix_vector
);
361 ifc_ioread16(&cfg
->queue_msix_vector
);
364 int ifcvf_start_hw(struct ifcvf_hw
*hw
)
367 ifcvf_add_status(hw
, VIRTIO_CONFIG_S_ACKNOWLEDGE
);
368 ifcvf_add_status(hw
, VIRTIO_CONFIG_S_DRIVER
);
370 if (ifcvf_config_features(hw
) < 0)
373 if (ifcvf_hw_enable(hw
) < 0)
376 ifcvf_add_status(hw
, VIRTIO_CONFIG_S_DRIVER_OK
);
381 void ifcvf_stop_hw(struct ifcvf_hw
*hw
)
383 ifcvf_hw_disable(hw
);
387 void ifcvf_notify_queue(struct ifcvf_hw
*hw
, u16 qid
)
389 ifc_iowrite16(qid
, hw
->vring
[qid
].notify_addr
);