1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Advanced Micro Devices, Inc */
5 #include <linux/vdpa.h>
6 #include <uapi/linux/vdpa.h>
7 #include <linux/virtio_pci_modern.h>
9 #include <linux/pds/pds_common.h>
10 #include <linux/pds/pds_core_if.h>
11 #include <linux/pds/pds_adminq.h>
12 #include <linux/pds/pds_auxbus.h>
19 static u64
pds_vdpa_get_driver_features(struct vdpa_device
*vdpa_dev
);
21 static struct pds_vdpa_device
*vdpa_to_pdsv(struct vdpa_device
*vdpa_dev
)
23 return container_of(vdpa_dev
, struct pds_vdpa_device
, vdpa_dev
);
26 static int pds_vdpa_notify_handler(struct notifier_block
*nb
,
30 struct pds_vdpa_device
*pdsv
= container_of(nb
, struct pds_vdpa_device
, nb
);
31 struct device
*dev
= &pdsv
->vdpa_aux
->padev
->aux_dev
.dev
;
33 dev_dbg(dev
, "%s: event code %lu\n", __func__
, ecode
);
35 if (ecode
== PDS_EVENT_RESET
|| ecode
== PDS_EVENT_LINK_CHANGE
) {
36 if (pdsv
->config_cb
.callback
)
37 pdsv
->config_cb
.callback(pdsv
->config_cb
.private);
43 static int pds_vdpa_register_event_handler(struct pds_vdpa_device
*pdsv
)
45 struct device
*dev
= &pdsv
->vdpa_aux
->padev
->aux_dev
.dev
;
46 struct notifier_block
*nb
= &pdsv
->nb
;
49 if (!nb
->notifier_call
) {
50 nb
->notifier_call
= pds_vdpa_notify_handler
;
51 err
= pdsc_register_notify(nb
);
53 nb
->notifier_call
= NULL
;
54 dev_err(dev
, "failed to register pds event handler: %ps\n",
58 dev_dbg(dev
, "pds event handler registered\n");
64 static void pds_vdpa_unregister_event_handler(struct pds_vdpa_device
*pdsv
)
66 if (pdsv
->nb
.notifier_call
) {
67 pdsc_unregister_notify(&pdsv
->nb
);
68 pdsv
->nb
.notifier_call
= NULL
;
72 static int pds_vdpa_set_vq_address(struct vdpa_device
*vdpa_dev
, u16 qid
,
73 u64 desc_addr
, u64 driver_addr
, u64 device_addr
)
75 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
77 pdsv
->vqs
[qid
].desc_addr
= desc_addr
;
78 pdsv
->vqs
[qid
].avail_addr
= driver_addr
;
79 pdsv
->vqs
[qid
].used_addr
= device_addr
;
84 static void pds_vdpa_set_vq_num(struct vdpa_device
*vdpa_dev
, u16 qid
, u32 num
)
86 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
88 pdsv
->vqs
[qid
].q_len
= num
;
91 static void pds_vdpa_kick_vq(struct vdpa_device
*vdpa_dev
, u16 qid
)
93 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
95 iowrite16(qid
, pdsv
->vqs
[qid
].notify
);
98 static void pds_vdpa_set_vq_cb(struct vdpa_device
*vdpa_dev
, u16 qid
,
99 struct vdpa_callback
*cb
)
101 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
103 pdsv
->vqs
[qid
].event_cb
= *cb
;
106 static irqreturn_t
pds_vdpa_isr(int irq
, void *data
)
108 struct pds_vdpa_vq_info
*vq
;
111 if (vq
->event_cb
.callback
)
112 vq
->event_cb
.callback(vq
->event_cb
.private);
117 static void pds_vdpa_release_irq(struct pds_vdpa_device
*pdsv
, int qid
)
119 if (pdsv
->vqs
[qid
].irq
== VIRTIO_MSI_NO_VECTOR
)
122 free_irq(pdsv
->vqs
[qid
].irq
, &pdsv
->vqs
[qid
]);
123 pdsv
->vqs
[qid
].irq
= VIRTIO_MSI_NO_VECTOR
;
126 static void pds_vdpa_set_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
, bool ready
)
128 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
129 struct device
*dev
= &pdsv
->vdpa_dev
.dev
;
134 dev_dbg(dev
, "%s: qid %d ready %d => %d\n",
135 __func__
, qid
, pdsv
->vqs
[qid
].ready
, ready
);
136 if (ready
== pdsv
->vqs
[qid
].ready
)
139 driver_features
= pds_vdpa_get_driver_features(vdpa_dev
);
140 if (driver_features
& BIT_ULL(VIRTIO_F_RING_PACKED
))
141 invert_idx
= PDS_VDPA_PACKED_INVERT_IDX
;
144 /* Pass vq setup info to DSC using adminq to gather up and
145 * send all info at once so FW can do its full set up in
148 err
= pds_vdpa_cmd_init_vq(pdsv
, qid
, invert_idx
, &pdsv
->vqs
[qid
]);
150 dev_err(dev
, "Failed to init vq %d: %pe\n",
155 err
= pds_vdpa_cmd_reset_vq(pdsv
, qid
, invert_idx
, &pdsv
->vqs
[qid
]);
157 dev_err(dev
, "%s: reset_vq failed qid %d: %pe\n",
158 __func__
, qid
, ERR_PTR(err
));
161 pdsv
->vqs
[qid
].ready
= ready
;
164 static bool pds_vdpa_get_vq_ready(struct vdpa_device
*vdpa_dev
, u16 qid
)
166 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
168 return pdsv
->vqs
[qid
].ready
;
171 static int pds_vdpa_set_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
172 const struct vdpa_vq_state
*state
)
174 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
175 struct pds_auxiliary_dev
*padev
= pdsv
->vdpa_aux
->padev
;
176 struct device
*dev
= &padev
->aux_dev
.dev
;
181 if (pdsv
->vqs
[qid
].ready
) {
182 dev_err(dev
, "Setting device position is denied while vq is enabled\n");
186 driver_features
= pds_vdpa_get_driver_features(vdpa_dev
);
187 if (driver_features
& BIT_ULL(VIRTIO_F_RING_PACKED
)) {
188 avail
= state
->packed
.last_avail_idx
|
189 (state
->packed
.last_avail_counter
<< 15);
190 used
= state
->packed
.last_used_idx
|
191 (state
->packed
.last_used_counter
<< 15);
193 /* The avail and used index are stored with the packed wrap
194 * counter bit inverted. This way, in case set_vq_state is
195 * not called, the initial value can be set to zero prior to
196 * feature negotiation, and it is good for both packed and
199 avail
^= PDS_VDPA_PACKED_INVERT_IDX
;
200 used
^= PDS_VDPA_PACKED_INVERT_IDX
;
202 avail
= state
->split
.avail_index
;
203 /* state->split does not provide a used_index:
204 * the vq will be set to "empty" here, and the vq will read
205 * the current used index the next time the vq is kicked.
211 dev_dbg(dev
, "Setting used equal to avail, for interoperability\n");
215 pdsv
->vqs
[qid
].avail_idx
= avail
;
216 pdsv
->vqs
[qid
].used_idx
= used
;
221 static int pds_vdpa_get_vq_state(struct vdpa_device
*vdpa_dev
, u16 qid
,
222 struct vdpa_vq_state
*state
)
224 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
225 struct pds_auxiliary_dev
*padev
= pdsv
->vdpa_aux
->padev
;
226 struct device
*dev
= &padev
->aux_dev
.dev
;
231 if (pdsv
->vqs
[qid
].ready
) {
232 dev_err(dev
, "Getting device position is denied while vq is enabled\n");
236 avail
= pdsv
->vqs
[qid
].avail_idx
;
237 used
= pdsv
->vqs
[qid
].used_idx
;
239 driver_features
= pds_vdpa_get_driver_features(vdpa_dev
);
240 if (driver_features
& BIT_ULL(VIRTIO_F_RING_PACKED
)) {
241 avail
^= PDS_VDPA_PACKED_INVERT_IDX
;
242 used
^= PDS_VDPA_PACKED_INVERT_IDX
;
244 state
->packed
.last_avail_idx
= avail
& 0x7fff;
245 state
->packed
.last_avail_counter
= avail
>> 15;
246 state
->packed
.last_used_idx
= used
& 0x7fff;
247 state
->packed
.last_used_counter
= used
>> 15;
249 state
->split
.avail_index
= avail
;
250 /* state->split does not provide a used_index. */
256 static struct vdpa_notification_area
257 pds_vdpa_get_vq_notification(struct vdpa_device
*vdpa_dev
, u16 qid
)
259 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
260 struct virtio_pci_modern_device
*vd_mdev
;
261 struct vdpa_notification_area area
;
263 area
.addr
= pdsv
->vqs
[qid
].notify_pa
;
265 vd_mdev
= &pdsv
->vdpa_aux
->vd_mdev
;
266 if (!vd_mdev
->notify_offset_multiplier
)
267 area
.size
= PDS_PAGE_SIZE
;
269 area
.size
= vd_mdev
->notify_offset_multiplier
;
274 static int pds_vdpa_get_vq_irq(struct vdpa_device
*vdpa_dev
, u16 qid
)
276 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
278 return pdsv
->vqs
[qid
].irq
;
281 static u32
pds_vdpa_get_vq_align(struct vdpa_device
*vdpa_dev
)
283 return PDS_PAGE_SIZE
;
286 static u32
pds_vdpa_get_vq_group(struct vdpa_device
*vdpa_dev
, u16 idx
)
291 static u64
pds_vdpa_get_device_features(struct vdpa_device
*vdpa_dev
)
293 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
295 return pdsv
->supported_features
;
298 static int pds_vdpa_set_driver_features(struct vdpa_device
*vdpa_dev
, u64 features
)
300 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
301 struct device
*dev
= &pdsv
->vdpa_dev
.dev
;
307 if (!(features
& BIT_ULL(VIRTIO_F_ACCESS_PLATFORM
)) && features
) {
308 dev_err(dev
, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n");
312 /* Check for valid feature bits */
313 nego_features
= features
& pdsv
->supported_features
;
314 missing
= features
& ~nego_features
;
316 dev_err(dev
, "Can't support all requested features in %#llx, missing %#llx features\n",
321 driver_features
= pds_vdpa_get_driver_features(vdpa_dev
);
322 pdsv
->negotiated_features
= nego_features
;
323 dev_dbg(dev
, "%s: %#llx => %#llx\n",
324 __func__
, driver_features
, nego_features
);
326 /* if we're faking the F_MAC, strip it before writing to device */
327 hw_features
= le64_to_cpu(pdsv
->vdpa_aux
->ident
.hw_features
);
328 if (!(hw_features
& BIT_ULL(VIRTIO_NET_F_MAC
)))
329 nego_features
&= ~BIT_ULL(VIRTIO_NET_F_MAC
);
331 if (driver_features
== nego_features
)
334 vp_modern_set_features(&pdsv
->vdpa_aux
->vd_mdev
, nego_features
);
339 static u64
pds_vdpa_get_driver_features(struct vdpa_device
*vdpa_dev
)
341 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
343 return pdsv
->negotiated_features
;
346 static void pds_vdpa_set_config_cb(struct vdpa_device
*vdpa_dev
,
347 struct vdpa_callback
*cb
)
349 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
351 pdsv
->config_cb
.callback
= cb
->callback
;
352 pdsv
->config_cb
.private = cb
->private;
355 static u16
pds_vdpa_get_vq_num_max(struct vdpa_device
*vdpa_dev
)
357 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
359 /* qemu has assert() that vq_num_max <= VIRTQUEUE_MAX_SIZE (1024) */
360 return min_t(u16
, 1024, BIT(le16_to_cpu(pdsv
->vdpa_aux
->ident
.max_qlen
)));
363 static u32
pds_vdpa_get_device_id(struct vdpa_device
*vdpa_dev
)
365 return VIRTIO_ID_NET
;
368 static u32
pds_vdpa_get_vendor_id(struct vdpa_device
*vdpa_dev
)
370 return PCI_VENDOR_ID_PENSANDO
;
373 static u8
pds_vdpa_get_status(struct vdpa_device
*vdpa_dev
)
375 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
377 return vp_modern_get_status(&pdsv
->vdpa_aux
->vd_mdev
);
380 static int pds_vdpa_request_irqs(struct pds_vdpa_device
*pdsv
)
382 struct pci_dev
*pdev
= pdsv
->vdpa_aux
->padev
->vf_pdev
;
383 struct pds_vdpa_aux
*vdpa_aux
= pdsv
->vdpa_aux
;
384 struct device
*dev
= &pdsv
->vdpa_dev
.dev
;
385 int max_vq
, nintrs
, qid
, err
;
387 max_vq
= vdpa_aux
->vdpa_mdev
.max_supported_vqs
;
389 nintrs
= pci_alloc_irq_vectors(pdev
, max_vq
, max_vq
, PCI_IRQ_MSIX
);
391 dev_err(dev
, "Couldn't get %d msix vectors: %pe\n",
392 max_vq
, ERR_PTR(nintrs
));
396 for (qid
= 0; qid
< pdsv
->num_vqs
; ++qid
) {
397 int irq
= pci_irq_vector(pdev
, qid
);
399 snprintf(pdsv
->vqs
[qid
].irq_name
, sizeof(pdsv
->vqs
[qid
].irq_name
),
400 "vdpa-%s-%d", dev_name(dev
), qid
);
402 err
= request_irq(irq
, pds_vdpa_isr
, 0,
403 pdsv
->vqs
[qid
].irq_name
,
406 dev_err(dev
, "%s: no irq for qid %d: %pe\n",
407 __func__
, qid
, ERR_PTR(err
));
411 pdsv
->vqs
[qid
].irq
= irq
;
414 vdpa_aux
->nintrs
= nintrs
;
420 pds_vdpa_release_irq(pdsv
, qid
);
422 pci_free_irq_vectors(pdev
);
424 vdpa_aux
->nintrs
= 0;
429 void pds_vdpa_release_irqs(struct pds_vdpa_device
*pdsv
)
431 struct pds_vdpa_aux
*vdpa_aux
;
432 struct pci_dev
*pdev
;
438 pdev
= pdsv
->vdpa_aux
->padev
->vf_pdev
;
439 vdpa_aux
= pdsv
->vdpa_aux
;
441 if (!vdpa_aux
->nintrs
)
444 for (qid
= 0; qid
< pdsv
->num_vqs
; qid
++)
445 pds_vdpa_release_irq(pdsv
, qid
);
447 pci_free_irq_vectors(pdev
);
449 vdpa_aux
->nintrs
= 0;
452 static void pds_vdpa_set_status(struct vdpa_device
*vdpa_dev
, u8 status
)
454 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
455 struct device
*dev
= &pdsv
->vdpa_dev
.dev
;
459 old_status
= pds_vdpa_get_status(vdpa_dev
);
460 dev_dbg(dev
, "%s: old %#x new %#x\n", __func__
, old_status
, status
);
462 if (status
& ~old_status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
463 if (pds_vdpa_request_irqs(pdsv
))
464 status
= old_status
| VIRTIO_CONFIG_S_FAILED
;
467 pds_vdpa_cmd_set_status(pdsv
, status
);
470 struct vdpa_callback null_cb
= { };
472 pds_vdpa_set_config_cb(vdpa_dev
, &null_cb
);
473 pds_vdpa_cmd_reset(pdsv
);
475 for (i
= 0; i
< pdsv
->num_vqs
; i
++) {
476 pdsv
->vqs
[i
].avail_idx
= 0;
477 pdsv
->vqs
[i
].used_idx
= 0;
480 pds_vdpa_cmd_set_mac(pdsv
, pdsv
->mac
);
483 if (status
& ~old_status
& VIRTIO_CONFIG_S_FEATURES_OK
) {
484 for (i
= 0; i
< pdsv
->num_vqs
; i
++) {
485 pdsv
->vqs
[i
].notify
=
486 vp_modern_map_vq_notify(&pdsv
->vdpa_aux
->vd_mdev
,
487 i
, &pdsv
->vqs
[i
].notify_pa
);
491 if (old_status
& ~status
& VIRTIO_CONFIG_S_DRIVER_OK
)
492 pds_vdpa_release_irqs(pdsv
);
495 static void pds_vdpa_init_vqs_entry(struct pds_vdpa_device
*pdsv
, int qid
,
496 void __iomem
*notify
)
498 memset(&pdsv
->vqs
[qid
], 0, sizeof(pdsv
->vqs
[0]));
499 pdsv
->vqs
[qid
].qid
= qid
;
500 pdsv
->vqs
[qid
].pdsv
= pdsv
;
501 pdsv
->vqs
[qid
].ready
= false;
502 pdsv
->vqs
[qid
].irq
= VIRTIO_MSI_NO_VECTOR
;
503 pdsv
->vqs
[qid
].notify
= notify
;
506 static int pds_vdpa_reset(struct vdpa_device
*vdpa_dev
)
508 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
514 dev
= &pdsv
->vdpa_aux
->padev
->aux_dev
.dev
;
515 status
= pds_vdpa_get_status(vdpa_dev
);
520 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
522 for (i
= 0; i
< pdsv
->num_vqs
&& !err
; i
++) {
523 err
= pds_vdpa_cmd_reset_vq(pdsv
, i
, 0, &pdsv
->vqs
[i
]);
525 dev_err(dev
, "%s: reset_vq failed qid %d: %pe\n",
526 __func__
, i
, ERR_PTR(err
));
530 pds_vdpa_set_status(vdpa_dev
, 0);
532 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
533 /* Reset the vq info */
534 for (i
= 0; i
< pdsv
->num_vqs
&& !err
; i
++)
535 pds_vdpa_init_vqs_entry(pdsv
, i
, pdsv
->vqs
[i
].notify
);
541 static size_t pds_vdpa_get_config_size(struct vdpa_device
*vdpa_dev
)
543 return sizeof(struct virtio_net_config
);
546 static void pds_vdpa_get_config(struct vdpa_device
*vdpa_dev
,
548 void *buf
, unsigned int len
)
550 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
551 void __iomem
*device
;
553 if (offset
+ len
> sizeof(struct virtio_net_config
)) {
554 WARN(true, "%s: bad read, offset %d len %d\n", __func__
, offset
, len
);
558 device
= pdsv
->vdpa_aux
->vd_mdev
.device
;
559 memcpy_fromio(buf
, device
+ offset
, len
);
562 static void pds_vdpa_set_config(struct vdpa_device
*vdpa_dev
,
563 unsigned int offset
, const void *buf
,
566 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
567 void __iomem
*device
;
569 if (offset
+ len
> sizeof(struct virtio_net_config
)) {
570 WARN(true, "%s: bad read, offset %d len %d\n", __func__
, offset
, len
);
574 device
= pdsv
->vdpa_aux
->vd_mdev
.device
;
575 memcpy_toio(device
+ offset
, buf
, len
);
578 static const struct vdpa_config_ops pds_vdpa_ops
= {
579 .set_vq_address
= pds_vdpa_set_vq_address
,
580 .set_vq_num
= pds_vdpa_set_vq_num
,
581 .kick_vq
= pds_vdpa_kick_vq
,
582 .set_vq_cb
= pds_vdpa_set_vq_cb
,
583 .set_vq_ready
= pds_vdpa_set_vq_ready
,
584 .get_vq_ready
= pds_vdpa_get_vq_ready
,
585 .set_vq_state
= pds_vdpa_set_vq_state
,
586 .get_vq_state
= pds_vdpa_get_vq_state
,
587 .get_vq_notification
= pds_vdpa_get_vq_notification
,
588 .get_vq_irq
= pds_vdpa_get_vq_irq
,
589 .get_vq_align
= pds_vdpa_get_vq_align
,
590 .get_vq_group
= pds_vdpa_get_vq_group
,
592 .get_device_features
= pds_vdpa_get_device_features
,
593 .set_driver_features
= pds_vdpa_set_driver_features
,
594 .get_driver_features
= pds_vdpa_get_driver_features
,
595 .set_config_cb
= pds_vdpa_set_config_cb
,
596 .get_vq_num_max
= pds_vdpa_get_vq_num_max
,
597 .get_device_id
= pds_vdpa_get_device_id
,
598 .get_vendor_id
= pds_vdpa_get_vendor_id
,
599 .get_status
= pds_vdpa_get_status
,
600 .set_status
= pds_vdpa_set_status
,
601 .reset
= pds_vdpa_reset
,
602 .get_config_size
= pds_vdpa_get_config_size
,
603 .get_config
= pds_vdpa_get_config
,
604 .set_config
= pds_vdpa_set_config
,
606 static struct virtio_device_id pds_vdpa_id_table
[] = {
607 {VIRTIO_ID_NET
, VIRTIO_DEV_ANY_ID
},
611 static int pds_vdpa_dev_add(struct vdpa_mgmt_dev
*mdev
, const char *name
,
612 const struct vdpa_dev_set_config
*add_config
)
614 struct pds_vdpa_aux
*vdpa_aux
;
615 struct pds_vdpa_device
*pdsv
;
616 struct vdpa_mgmt_dev
*mgmt
;
617 u16 fw_max_vqs
, vq_pairs
;
618 struct device
*dma_dev
;
619 struct pci_dev
*pdev
;
625 vdpa_aux
= container_of(mdev
, struct pds_vdpa_aux
, vdpa_mdev
);
626 dev
= &vdpa_aux
->padev
->aux_dev
.dev
;
627 mgmt
= &vdpa_aux
->vdpa_mdev
;
629 if (vdpa_aux
->pdsv
) {
630 dev_warn(dev
, "Multiple vDPA devices on a VF is not supported.\n");
634 pdsv
= vdpa_alloc_device(struct pds_vdpa_device
, vdpa_dev
,
635 dev
, &pds_vdpa_ops
, 1, 1, name
, false);
637 dev_err(dev
, "Failed to allocate vDPA structure: %pe\n", pdsv
);
638 return PTR_ERR(pdsv
);
641 vdpa_aux
->pdsv
= pdsv
;
642 pdsv
->vdpa_aux
= vdpa_aux
;
644 pdev
= vdpa_aux
->padev
->vf_pdev
;
645 dma_dev
= &pdev
->dev
;
646 pdsv
->vdpa_dev
.dma_dev
= dma_dev
;
648 status
= pds_vdpa_get_status(&pdsv
->vdpa_dev
);
649 if (status
== 0xff) {
650 dev_err(dev
, "Broken PCI - status %#x\n", status
);
655 pdsv
->supported_features
= mgmt
->supported_features
;
657 if (add_config
->mask
& BIT_ULL(VDPA_ATTR_DEV_FEATURES
)) {
658 u64 unsupp_features
=
659 add_config
->device_features
& ~pdsv
->supported_features
;
661 if (unsupp_features
) {
662 dev_err(dev
, "Unsupported features: %#llx\n", unsupp_features
);
667 pdsv
->supported_features
= add_config
->device_features
;
670 err
= pds_vdpa_cmd_reset(pdsv
);
672 dev_err(dev
, "Failed to reset hw: %pe\n", ERR_PTR(err
));
676 err
= pds_vdpa_init_hw(pdsv
);
678 dev_err(dev
, "Failed to init hw: %pe\n", ERR_PTR(err
));
682 fw_max_vqs
= le16_to_cpu(pdsv
->vdpa_aux
->ident
.max_vqs
);
683 vq_pairs
= fw_max_vqs
/ 2;
685 /* Make sure we have the queues being requested */
686 if (add_config
->mask
& (1 << VDPA_ATTR_DEV_NET_CFG_MAX_VQP
))
687 vq_pairs
= add_config
->net
.max_vq_pairs
;
689 pdsv
->num_vqs
= 2 * vq_pairs
;
690 if (pdsv
->supported_features
& BIT_ULL(VIRTIO_NET_F_CTRL_VQ
))
693 if (pdsv
->num_vqs
> fw_max_vqs
) {
694 dev_err(dev
, "%s: queue count requested %u greater than max %u\n",
695 __func__
, pdsv
->num_vqs
, fw_max_vqs
);
700 if (pdsv
->num_vqs
!= fw_max_vqs
) {
701 err
= pds_vdpa_cmd_set_max_vq_pairs(pdsv
, vq_pairs
);
703 dev_err(dev
, "Failed to set max_vq_pairs: %pe\n",
709 /* Set a mac, either from the user config if provided
710 * or use the device's mac if not 00:..:00
711 * or set a random mac
713 if (add_config
->mask
& BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR
)) {
714 ether_addr_copy(pdsv
->mac
, add_config
->net
.mac
);
716 struct virtio_net_config __iomem
*vc
;
718 vc
= pdsv
->vdpa_aux
->vd_mdev
.device
;
719 memcpy_fromio(pdsv
->mac
, vc
->mac
, sizeof(pdsv
->mac
));
720 if (is_zero_ether_addr(pdsv
->mac
) &&
721 (pdsv
->supported_features
& BIT_ULL(VIRTIO_NET_F_MAC
))) {
722 eth_random_addr(pdsv
->mac
);
723 dev_info(dev
, "setting random mac %pM\n", pdsv
->mac
);
726 pds_vdpa_cmd_set_mac(pdsv
, pdsv
->mac
);
728 for (i
= 0; i
< pdsv
->num_vqs
; i
++) {
729 void __iomem
*notify
;
731 notify
= vp_modern_map_vq_notify(&pdsv
->vdpa_aux
->vd_mdev
,
732 i
, &pdsv
->vqs
[i
].notify_pa
);
733 pds_vdpa_init_vqs_entry(pdsv
, i
, notify
);
736 pdsv
->vdpa_dev
.mdev
= &vdpa_aux
->vdpa_mdev
;
738 err
= pds_vdpa_register_event_handler(pdsv
);
740 dev_err(dev
, "Failed to register for PDS events: %pe\n", ERR_PTR(err
));
744 /* We use the _vdpa_register_device() call rather than the
745 * vdpa_register_device() to avoid a deadlock because our
746 * dev_add() is called with the vdpa_dev_lock already set
747 * by vdpa_nl_cmd_dev_add_set_doit()
749 err
= _vdpa_register_device(&pdsv
->vdpa_dev
, pdsv
->num_vqs
);
751 dev_err(dev
, "Failed to register to vDPA bus: %pe\n", ERR_PTR(err
));
755 pds_vdpa_debugfs_add_vdpadev(vdpa_aux
);
760 pds_vdpa_unregister_event_handler(pdsv
);
762 put_device(&pdsv
->vdpa_dev
.dev
);
763 vdpa_aux
->pdsv
= NULL
;
767 static void pds_vdpa_dev_del(struct vdpa_mgmt_dev
*mdev
,
768 struct vdpa_device
*vdpa_dev
)
770 struct pds_vdpa_device
*pdsv
= vdpa_to_pdsv(vdpa_dev
);
771 struct pds_vdpa_aux
*vdpa_aux
;
773 pds_vdpa_unregister_event_handler(pdsv
);
775 vdpa_aux
= container_of(mdev
, struct pds_vdpa_aux
, vdpa_mdev
);
776 _vdpa_unregister_device(vdpa_dev
);
778 pds_vdpa_cmd_reset(vdpa_aux
->pdsv
);
779 pds_vdpa_debugfs_reset_vdpadev(vdpa_aux
);
781 vdpa_aux
->pdsv
= NULL
;
783 dev_info(&vdpa_aux
->padev
->aux_dev
.dev
, "Removed vdpa device\n");
786 static const struct vdpa_mgmtdev_ops pds_vdpa_mgmt_dev_ops
= {
787 .dev_add
= pds_vdpa_dev_add
,
788 .dev_del
= pds_vdpa_dev_del
791 int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux
*vdpa_aux
)
793 union pds_core_adminq_cmd cmd
= {
794 .vdpa_ident
.opcode
= PDS_VDPA_CMD_IDENT
,
795 .vdpa_ident
.vf_id
= cpu_to_le16(vdpa_aux
->vf_id
),
797 union pds_core_adminq_comp comp
= {};
798 struct vdpa_mgmt_dev
*mgmt
;
799 struct pci_dev
*pf_pdev
;
800 struct device
*pf_dev
;
801 struct pci_dev
*pdev
;
808 dev
= &vdpa_aux
->padev
->aux_dev
.dev
;
809 pdev
= vdpa_aux
->padev
->vf_pdev
;
810 mgmt
= &vdpa_aux
->vdpa_mdev
;
812 /* Get resource info through the PF's adminq. It is a block of info,
813 * so we need to map some memory for PF to make available to the
814 * firmware for writing the data.
816 pf_pdev
= pci_physfn(vdpa_aux
->padev
->vf_pdev
);
817 pf_dev
= &pf_pdev
->dev
;
818 ident_pa
= dma_map_single(pf_dev
, &vdpa_aux
->ident
,
819 sizeof(vdpa_aux
->ident
), DMA_FROM_DEVICE
);
820 if (dma_mapping_error(pf_dev
, ident_pa
)) {
821 dev_err(dev
, "Failed to map ident space\n");
825 cmd
.vdpa_ident
.ident_pa
= cpu_to_le64(ident_pa
);
826 cmd
.vdpa_ident
.len
= cpu_to_le32(sizeof(vdpa_aux
->ident
));
827 err
= pds_client_adminq_cmd(vdpa_aux
->padev
, &cmd
,
828 sizeof(cmd
.vdpa_ident
), &comp
, 0);
829 dma_unmap_single(pf_dev
, ident_pa
,
830 sizeof(vdpa_aux
->ident
), DMA_FROM_DEVICE
);
832 dev_err(dev
, "Failed to ident hw, status %d: %pe\n",
833 comp
.status
, ERR_PTR(err
));
837 max_vqs
= le16_to_cpu(vdpa_aux
->ident
.max_vqs
);
838 dev_intrs
= pci_msix_vec_count(pdev
);
839 dev_dbg(dev
, "ident.max_vqs %d dev_intrs %d\n", max_vqs
, dev_intrs
);
841 max_vqs
= min_t(u16
, dev_intrs
, max_vqs
);
842 mgmt
->max_supported_vqs
= min_t(u16
, PDS_VDPA_MAX_QUEUES
, max_vqs
);
843 vdpa_aux
->nintrs
= 0;
845 mgmt
->ops
= &pds_vdpa_mgmt_dev_ops
;
846 mgmt
->id_table
= pds_vdpa_id_table
;
848 mgmt
->supported_features
= le64_to_cpu(vdpa_aux
->ident
.hw_features
);
850 /* advertise F_MAC even if the device doesn't */
851 mgmt
->supported_features
|= BIT_ULL(VIRTIO_NET_F_MAC
);
853 mgmt
->config_attr_mask
= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR
);
854 mgmt
->config_attr_mask
|= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP
);
855 mgmt
->config_attr_mask
|= BIT_ULL(VDPA_ATTR_DEV_FEATURES
);