2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "hw/virtio/virtio.h"
17 #include "net/checksum.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
25 #define VIRTIO_NET_VM_VERSION 11
27 #define MAC_TABLE_ENTRIES 64
28 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
31 * Calculate the number of bytes up to and including the given 'field' of
34 #define endof(container, field) \
35 (offsetof(container, field) + sizeof(((container *)0)->field))
37 typedef struct VirtIOFeature
{
42 static VirtIOFeature feature_sizes
[] = {
43 {.flags
= 1 << VIRTIO_NET_F_MAC
,
44 .end
= endof(struct virtio_net_config
, mac
)},
45 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
46 .end
= endof(struct virtio_net_config
, status
)},
47 {.flags
= 1 << VIRTIO_NET_F_MQ
,
48 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
52 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
54 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
56 return &n
->vqs
[nc
->queue_index
];
59 static int vq2q(int queue_index
)
61 return queue_index
/ 2;
65 * - we could suppress RX interrupt if we were so inclined.
68 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
70 VirtIONet
*n
= VIRTIO_NET(vdev
);
71 struct virtio_net_config netcfg
;
73 stw_p(&netcfg
.status
, n
->status
);
74 stw_p(&netcfg
.max_virtqueue_pairs
, n
->max_queues
);
75 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
76 memcpy(config
, &netcfg
, n
->config_size
);
79 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
81 VirtIONet
*n
= VIRTIO_NET(vdev
);
82 struct virtio_net_config netcfg
= {};
84 memcpy(&netcfg
, config
, n
->config_size
);
86 if (!(vdev
->guest_features
>> VIRTIO_NET_F_CTRL_MAC_ADDR
& 1) &&
87 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
88 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
89 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
93 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
95 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
96 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
97 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
100 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
102 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
103 NetClientState
*nc
= qemu_get_queue(n
->nic
);
104 int queues
= n
->multiqueue
? n
->max_queues
: 1;
109 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
113 if (!tap_get_vhost_net(nc
->peer
)) {
117 if (!!n
->vhost_started
== virtio_net_started(n
, status
) &&
118 !nc
->peer
->link_down
) {
121 if (!n
->vhost_started
) {
123 if (!vhost_net_query(tap_get_vhost_net(nc
->peer
), vdev
)) {
126 n
->vhost_started
= 1;
127 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
129 error_report("unable to start vhost net: %d: "
130 "falling back on userspace virtio", -r
);
131 n
->vhost_started
= 0;
134 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
135 n
->vhost_started
= 0;
139 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
141 VirtIONet
*n
= VIRTIO_NET(vdev
);
144 uint8_t queue_status
;
146 virtio_net_vhost_status(n
, status
);
148 for (i
= 0; i
< n
->max_queues
; i
++) {
151 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
154 queue_status
= status
;
157 if (!q
->tx_waiting
) {
161 if (virtio_net_started(n
, queue_status
) && !n
->vhost_started
) {
163 qemu_mod_timer(q
->tx_timer
,
164 qemu_get_clock_ns(vm_clock
) + n
->tx_timeout
);
166 qemu_bh_schedule(q
->tx_bh
);
170 qemu_del_timer(q
->tx_timer
);
172 qemu_bh_cancel(q
->tx_bh
);
178 static void virtio_net_set_link_status(NetClientState
*nc
)
180 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
181 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
182 uint16_t old_status
= n
->status
;
185 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
187 n
->status
|= VIRTIO_NET_S_LINK_UP
;
189 if (n
->status
!= old_status
)
190 virtio_notify_config(vdev
);
192 virtio_net_set_status(vdev
, vdev
->status
);
195 static void virtio_net_reset(VirtIODevice
*vdev
)
197 VirtIONet
*n
= VIRTIO_NET(vdev
);
199 /* Reset back to compatibility mode */
206 /* multiqueue is disabled by default */
209 /* Flush any MAC and VLAN filter table state */
210 n
->mac_table
.in_use
= 0;
211 n
->mac_table
.first_multi
= 0;
212 n
->mac_table
.multi_overflow
= 0;
213 n
->mac_table
.uni_overflow
= 0;
214 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
215 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
216 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
219 static void peer_test_vnet_hdr(VirtIONet
*n
)
221 NetClientState
*nc
= qemu_get_queue(n
->nic
);
226 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
230 n
->has_vnet_hdr
= tap_has_vnet_hdr(nc
->peer
);
233 static int peer_has_vnet_hdr(VirtIONet
*n
)
235 return n
->has_vnet_hdr
;
238 static int peer_has_ufo(VirtIONet
*n
)
240 if (!peer_has_vnet_hdr(n
))
243 n
->has_ufo
= tap_has_ufo(qemu_get_queue(n
->nic
)->peer
);
248 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
)
253 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
255 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
256 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
258 for (i
= 0; i
< n
->max_queues
; i
++) {
259 nc
= qemu_get_subqueue(n
->nic
, i
);
261 if (peer_has_vnet_hdr(n
) &&
262 tap_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
263 tap_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
264 n
->host_hdr_len
= n
->guest_hdr_len
;
269 static int peer_attach(VirtIONet
*n
, int index
)
271 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
277 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
281 return tap_enable(nc
->peer
);
284 static int peer_detach(VirtIONet
*n
, int index
)
286 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
292 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
296 return tap_disable(nc
->peer
);
299 static void virtio_net_set_queues(VirtIONet
*n
)
303 for (i
= 0; i
< n
->max_queues
; i
++) {
304 if (i
< n
->curr_queues
) {
305 assert(!peer_attach(n
, i
));
307 assert(!peer_detach(n
, i
));
312 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
314 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
316 VirtIONet
*n
= VIRTIO_NET(vdev
);
317 NetClientState
*nc
= qemu_get_queue(n
->nic
);
319 features
|= (1 << VIRTIO_NET_F_MAC
);
321 if (!peer_has_vnet_hdr(n
)) {
322 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
323 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
324 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
325 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
327 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
328 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
329 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
330 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
333 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
334 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
335 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
338 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
341 if (!tap_get_vhost_net(nc
->peer
)) {
344 return vhost_net_get_features(tap_get_vhost_net(nc
->peer
), features
);
347 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
349 uint32_t features
= 0;
351 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
353 features
|= (1 << VIRTIO_NET_F_MAC
);
354 features
|= (1 << VIRTIO_NET_F_CSUM
);
355 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
356 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
357 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
362 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
364 VirtIONet
*n
= VIRTIO_NET(vdev
);
367 virtio_net_set_multiqueue(n
, !!(features
& (1 << VIRTIO_NET_F_MQ
)));
369 virtio_net_set_mrg_rx_bufs(n
, !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
)));
371 if (n
->has_vnet_hdr
) {
372 tap_set_offload(qemu_get_subqueue(n
->nic
, 0)->peer
,
373 (features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
374 (features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
375 (features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
376 (features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
377 (features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
380 for (i
= 0; i
< n
->max_queues
; i
++) {
381 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
383 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
386 if (!tap_get_vhost_net(nc
->peer
)) {
389 vhost_net_ack_features(tap_get_vhost_net(nc
->peer
), features
);
393 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
394 struct iovec
*iov
, unsigned int iov_cnt
)
399 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
400 if (s
!= sizeof(on
)) {
401 return VIRTIO_NET_ERR
;
404 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
406 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
408 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
410 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
412 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
414 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
417 return VIRTIO_NET_ERR
;
420 return VIRTIO_NET_OK
;
423 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
424 struct iovec
*iov
, unsigned int iov_cnt
)
426 struct virtio_net_ctrl_mac mac_data
;
429 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
430 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
431 return VIRTIO_NET_ERR
;
433 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
434 assert(s
== sizeof(n
->mac
));
435 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
436 return VIRTIO_NET_OK
;
439 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
440 return VIRTIO_NET_ERR
;
443 n
->mac_table
.in_use
= 0;
444 n
->mac_table
.first_multi
= 0;
445 n
->mac_table
.uni_overflow
= 0;
446 n
->mac_table
.multi_overflow
= 0;
447 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
449 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
450 sizeof(mac_data
.entries
));
451 mac_data
.entries
= ldl_p(&mac_data
.entries
);
452 if (s
!= sizeof(mac_data
.entries
)) {
453 return VIRTIO_NET_ERR
;
455 iov_discard_front(&iov
, &iov_cnt
, s
);
457 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
458 return VIRTIO_NET_ERR
;
461 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
462 s
= iov_to_buf(iov
, iov_cnt
, 0, n
->mac_table
.macs
,
463 mac_data
.entries
* ETH_ALEN
);
464 if (s
!= mac_data
.entries
* ETH_ALEN
) {
465 return VIRTIO_NET_ERR
;
467 n
->mac_table
.in_use
+= mac_data
.entries
;
469 n
->mac_table
.uni_overflow
= 1;
472 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
474 n
->mac_table
.first_multi
= n
->mac_table
.in_use
;
476 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
477 sizeof(mac_data
.entries
));
478 mac_data
.entries
= ldl_p(&mac_data
.entries
);
479 if (s
!= sizeof(mac_data
.entries
)) {
480 return VIRTIO_NET_ERR
;
483 iov_discard_front(&iov
, &iov_cnt
, s
);
485 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
486 return VIRTIO_NET_ERR
;
489 if (n
->mac_table
.in_use
+ mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
490 s
= iov_to_buf(iov
, iov_cnt
, 0, n
->mac_table
.macs
,
491 mac_data
.entries
* ETH_ALEN
);
492 if (s
!= mac_data
.entries
* ETH_ALEN
) {
493 return VIRTIO_NET_ERR
;
495 n
->mac_table
.in_use
+= mac_data
.entries
;
497 n
->mac_table
.multi_overflow
= 1;
500 return VIRTIO_NET_OK
;
503 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
504 struct iovec
*iov
, unsigned int iov_cnt
)
509 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
511 if (s
!= sizeof(vid
)) {
512 return VIRTIO_NET_ERR
;
516 return VIRTIO_NET_ERR
;
518 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
519 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
520 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
521 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
523 return VIRTIO_NET_ERR
;
525 return VIRTIO_NET_OK
;
528 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
529 struct iovec
*iov
, unsigned int iov_cnt
)
531 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
532 struct virtio_net_ctrl_mq mq
;
536 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
537 if (s
!= sizeof(mq
)) {
538 return VIRTIO_NET_ERR
;
541 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
542 return VIRTIO_NET_ERR
;
545 queues
= lduw_p(&mq
.virtqueue_pairs
);
547 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
548 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
549 queues
> n
->max_queues
||
551 return VIRTIO_NET_ERR
;
554 n
->curr_queues
= queues
;
555 /* stop the backend before changing the number of queues to avoid handling a
557 virtio_net_set_status(vdev
, vdev
->status
);
558 virtio_net_set_queues(n
);
560 return VIRTIO_NET_OK
;
562 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
564 VirtIONet
*n
= VIRTIO_NET(vdev
);
565 struct virtio_net_ctrl_hdr ctrl
;
566 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
567 VirtQueueElement elem
;
570 unsigned int iov_cnt
;
572 while (virtqueue_pop(vq
, &elem
)) {
573 if (iov_size(elem
.in_sg
, elem
.in_num
) < sizeof(status
) ||
574 iov_size(elem
.out_sg
, elem
.out_num
) < sizeof(ctrl
)) {
575 error_report("virtio-net ctrl missing headers");
580 iov_cnt
= elem
.out_num
;
581 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
582 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
583 if (s
!= sizeof(ctrl
)) {
584 status
= VIRTIO_NET_ERR
;
585 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
586 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
587 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
588 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
589 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
590 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
591 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
592 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
595 s
= iov_from_buf(elem
.in_sg
, elem
.in_num
, 0, &status
, sizeof(status
));
596 assert(s
== sizeof(status
));
598 virtqueue_push(vq
, &elem
, sizeof(status
));
599 virtio_notify(vdev
, vq
);
605 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
607 VirtIONet
*n
= VIRTIO_NET(vdev
);
608 int queue_index
= vq2q(virtio_get_queue_index(vq
));
610 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
613 static int virtio_net_can_receive(NetClientState
*nc
)
615 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
616 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
617 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
619 if (!vdev
->vm_running
) {
623 if (nc
->queue_index
>= n
->curr_queues
) {
627 if (!virtio_queue_ready(q
->rx_vq
) ||
628 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
635 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
638 if (virtio_queue_empty(q
->rx_vq
) ||
639 (n
->mergeable_rx_bufs
&&
640 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
641 virtio_queue_set_notification(q
->rx_vq
, 1);
643 /* To avoid a race condition where the guest has made some buffers
644 * available after the above check but before notification was
645 * enabled, check for available buffers again.
647 if (virtio_queue_empty(q
->rx_vq
) ||
648 (n
->mergeable_rx_bufs
&&
649 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
654 virtio_queue_set_notification(q
->rx_vq
, 0);
658 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
659 * it never finds out that the packets don't have valid checksums. This
660 * causes dhclient to get upset. Fedora's carried a patch for ages to
661 * fix this with Xen but it hasn't appeared in an upstream release of
664 * To avoid breaking existing guests, we catch udp packets and add
665 * checksums. This is terrible but it's better than hacking the guest
668 * N.B. if we introduce a zero-copy API, this operation is no longer free so
669 * we should provide a mechanism to disable it to avoid polluting the host
672 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
673 uint8_t *buf
, size_t size
)
675 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
676 (size
> 27 && size
< 1500) && /* normal sized MTU */
677 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
678 (buf
[23] == 17) && /* ip.protocol == UDP */
679 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
680 net_checksum_calculate(buf
, size
);
681 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
685 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
686 const void *buf
, size_t size
)
688 if (n
->has_vnet_hdr
) {
689 /* FIXME this cast is evil */
690 void *wbuf
= (void *)buf
;
691 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
692 size
- n
->host_hdr_len
);
693 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
695 struct virtio_net_hdr hdr
= {
697 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
699 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
703 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
705 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
706 static const uint8_t vlan
[] = {0x81, 0x00};
707 uint8_t *ptr
= (uint8_t *)buf
;
713 ptr
+= n
->host_hdr_len
;
715 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
716 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
717 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
721 if (ptr
[0] & 1) { // multicast
722 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
724 } else if (n
->nomulti
) {
726 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
730 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
731 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
738 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
740 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
744 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
745 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
754 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
756 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
757 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
758 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
759 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
760 struct virtio_net_hdr_mrg_rxbuf mhdr
;
761 unsigned mhdr_cnt
= 0;
762 size_t offset
, i
, guest_offset
;
764 if (!virtio_net_can_receive(nc
)) {
768 /* hdr_len refers to the header we supply to the guest */
769 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
773 if (!receive_filter(n
, buf
, size
))
778 while (offset
< size
) {
779 VirtQueueElement elem
;
781 const struct iovec
*sg
= elem
.in_sg
;
785 if (virtqueue_pop(q
->rx_vq
, &elem
) == 0) {
788 error_report("virtio-net unexpected empty queue: "
789 "i %zd mergeable %d offset %zd, size %zd, "
790 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
791 i
, n
->mergeable_rx_bufs
, offset
, size
,
792 n
->guest_hdr_len
, n
->host_hdr_len
, vdev
->guest_features
);
796 if (elem
.in_num
< 1) {
797 error_report("virtio-net receive queue contains no in buffers");
803 if (n
->mergeable_rx_bufs
) {
804 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
806 offsetof(typeof(mhdr
), num_buffers
),
807 sizeof(mhdr
.num_buffers
));
810 receive_header(n
, sg
, elem
.in_num
, buf
, size
);
811 offset
= n
->host_hdr_len
;
812 total
+= n
->guest_hdr_len
;
813 guest_offset
= n
->guest_hdr_len
;
818 /* copy in packet. ugh */
819 len
= iov_from_buf(sg
, elem
.in_num
, guest_offset
,
820 buf
+ offset
, size
- offset
);
823 /* If buffers can't be merged, at this point we
824 * must have consumed the complete packet.
825 * Otherwise, drop it. */
826 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
828 error_report("virtio-net truncated non-mergeable packet: "
829 "i %zd mergeable %d offset %zd, size %zd, "
830 "guest hdr len %zd, host hdr len %zd",
831 i
, n
->mergeable_rx_bufs
,
832 offset
, size
, n
->guest_hdr_len
, n
->host_hdr_len
);
837 /* signal other side */
838 virtqueue_fill(q
->rx_vq
, &elem
, total
, i
++);
842 stw_p(&mhdr
.num_buffers
, i
);
843 iov_from_buf(mhdr_sg
, mhdr_cnt
,
845 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
848 virtqueue_flush(q
->rx_vq
, i
);
849 virtio_notify(vdev
, q
->rx_vq
);
854 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
856 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
858 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
859 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
860 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
862 virtqueue_push(q
->tx_vq
, &q
->async_tx
.elem
, 0);
863 virtio_notify(vdev
, q
->tx_vq
);
865 q
->async_tx
.elem
.out_num
= q
->async_tx
.len
= 0;
867 virtio_queue_set_notification(q
->tx_vq
, 1);
868 virtio_net_flush_tx(q
);
872 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
875 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
876 VirtQueueElement elem
;
877 int32_t num_packets
= 0;
878 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
879 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
883 assert(vdev
->vm_running
);
885 if (q
->async_tx
.elem
.out_num
) {
886 virtio_queue_set_notification(q
->tx_vq
, 0);
890 while (virtqueue_pop(q
->tx_vq
, &elem
)) {
892 unsigned int out_num
= elem
.out_num
;
893 struct iovec
*out_sg
= &elem
.out_sg
[0];
894 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
897 error_report("virtio-net header not in first element");
902 * If host wants to see the guest header as is, we can
903 * pass it on unchanged. Otherwise, copy just the parts
904 * that host is interested in.
906 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
907 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
908 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
911 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
913 n
->guest_hdr_len
, -1);
918 len
= n
->guest_hdr_len
;
920 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
921 out_sg
, out_num
, virtio_net_tx_complete
);
923 virtio_queue_set_notification(q
->tx_vq
, 0);
924 q
->async_tx
.elem
= elem
;
925 q
->async_tx
.len
= len
;
931 virtqueue_push(q
->tx_vq
, &elem
, 0);
932 virtio_notify(vdev
, q
->tx_vq
);
934 if (++num_packets
>= n
->tx_burst
) {
941 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
943 VirtIONet
*n
= VIRTIO_NET(vdev
);
944 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
946 /* This happens when device was stopped but VCPU wasn't. */
947 if (!vdev
->vm_running
) {
953 virtio_queue_set_notification(vq
, 1);
954 qemu_del_timer(q
->tx_timer
);
956 virtio_net_flush_tx(q
);
958 qemu_mod_timer(q
->tx_timer
,
959 qemu_get_clock_ns(vm_clock
) + n
->tx_timeout
);
961 virtio_queue_set_notification(vq
, 0);
965 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
967 VirtIONet
*n
= VIRTIO_NET(vdev
);
968 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
970 if (unlikely(q
->tx_waiting
)) {
974 /* This happens when device was stopped but VCPU wasn't. */
975 if (!vdev
->vm_running
) {
978 virtio_queue_set_notification(vq
, 0);
979 qemu_bh_schedule(q
->tx_bh
);
982 static void virtio_net_tx_timer(void *opaque
)
984 VirtIONetQueue
*q
= opaque
;
986 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
987 assert(vdev
->vm_running
);
991 /* Just in case the driver is not ready on more */
992 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
996 virtio_queue_set_notification(q
->tx_vq
, 1);
997 virtio_net_flush_tx(q
);
1000 static void virtio_net_tx_bh(void *opaque
)
1002 VirtIONetQueue
*q
= opaque
;
1003 VirtIONet
*n
= q
->n
;
1004 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1007 assert(vdev
->vm_running
);
1011 /* Just in case the driver is not ready on more */
1012 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1016 ret
= virtio_net_flush_tx(q
);
1017 if (ret
== -EBUSY
) {
1018 return; /* Notification re-enable handled by tx_complete */
1021 /* If we flush a full burst of packets, assume there are
1022 * more coming and immediately reschedule */
1023 if (ret
>= n
->tx_burst
) {
1024 qemu_bh_schedule(q
->tx_bh
);
1029 /* If less than a full burst, re-enable notification and flush
1030 * anything that may have come in while we weren't looking. If
1031 * we find something, assume the guest is still active and reschedule */
1032 virtio_queue_set_notification(q
->tx_vq
, 1);
1033 if (virtio_net_flush_tx(q
) > 0) {
1034 virtio_queue_set_notification(q
->tx_vq
, 0);
1035 qemu_bh_schedule(q
->tx_bh
);
1040 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1042 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1043 int i
, max
= multiqueue
? n
->max_queues
: 1;
1045 n
->multiqueue
= multiqueue
;
1047 for (i
= 2; i
<= n
->max_queues
* 2 + 1; i
++) {
1048 virtio_del_queue(vdev
, i
);
1051 for (i
= 1; i
< max
; i
++) {
1052 n
->vqs
[i
].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1053 if (n
->vqs
[i
].tx_timer
) {
1055 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1056 n
->vqs
[i
].tx_timer
= qemu_new_timer_ns(vm_clock
,
1057 virtio_net_tx_timer
,
1061 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1062 n
->vqs
[i
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[i
]);
1065 n
->vqs
[i
].tx_waiting
= 0;
1069 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1070 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1073 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1075 virtio_net_set_queues(n
);
1078 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
1081 VirtIONet
*n
= opaque
;
1082 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1084 /* At this point, backend must be stopped, otherwise
1085 * it might keep writing to memory. */
1086 assert(!n
->vhost_started
);
1087 virtio_save(vdev
, f
);
1089 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1090 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1091 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1092 qemu_put_be16(f
, n
->status
);
1093 qemu_put_byte(f
, n
->promisc
);
1094 qemu_put_byte(f
, n
->allmulti
);
1095 qemu_put_be32(f
, n
->mac_table
.in_use
);
1096 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1097 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1098 qemu_put_be32(f
, n
->has_vnet_hdr
);
1099 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1100 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1101 qemu_put_byte(f
, n
->alluni
);
1102 qemu_put_byte(f
, n
->nomulti
);
1103 qemu_put_byte(f
, n
->nouni
);
1104 qemu_put_byte(f
, n
->nobcast
);
1105 qemu_put_byte(f
, n
->has_ufo
);
1106 if (n
->max_queues
> 1) {
1107 qemu_put_be16(f
, n
->max_queues
);
1108 qemu_put_be16(f
, n
->curr_queues
);
1109 for (i
= 1; i
< n
->curr_queues
; i
++) {
1110 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1115 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
1117 VirtIONet
*n
= opaque
;
1118 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1119 int ret
, i
, link_down
;
1121 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
1124 ret
= virtio_load(vdev
, f
);
1129 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1130 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1132 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
));
1134 if (version_id
>= 3)
1135 n
->status
= qemu_get_be16(f
);
1137 if (version_id
>= 4) {
1138 if (version_id
< 8) {
1139 n
->promisc
= qemu_get_be32(f
);
1140 n
->allmulti
= qemu_get_be32(f
);
1142 n
->promisc
= qemu_get_byte(f
);
1143 n
->allmulti
= qemu_get_byte(f
);
1147 if (version_id
>= 5) {
1148 n
->mac_table
.in_use
= qemu_get_be32(f
);
1149 /* MAC_TABLE_ENTRIES may be different from the saved image */
1150 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1151 qemu_get_buffer(f
, n
->mac_table
.macs
,
1152 n
->mac_table
.in_use
* ETH_ALEN
);
1153 } else if (n
->mac_table
.in_use
) {
1154 uint8_t *buf
= g_malloc0(n
->mac_table
.in_use
);
1155 qemu_get_buffer(f
, buf
, n
->mac_table
.in_use
* ETH_ALEN
);
1157 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1158 n
->mac_table
.in_use
= 0;
1162 if (version_id
>= 6)
1163 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1165 if (version_id
>= 7) {
1166 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1167 error_report("virtio-net: saved image requires vnet_hdr=on");
1171 if (n
->has_vnet_hdr
) {
1172 tap_set_offload(qemu_get_queue(n
->nic
)->peer
,
1173 (vdev
->guest_features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
1174 (vdev
->guest_features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
1175 (vdev
->guest_features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
1176 (vdev
->guest_features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
1177 (vdev
->guest_features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
1181 if (version_id
>= 9) {
1182 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1183 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1186 if (version_id
>= 10) {
1187 n
->alluni
= qemu_get_byte(f
);
1188 n
->nomulti
= qemu_get_byte(f
);
1189 n
->nouni
= qemu_get_byte(f
);
1190 n
->nobcast
= qemu_get_byte(f
);
1193 if (version_id
>= 11) {
1194 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1195 error_report("virtio-net: saved image requires TUN_F_UFO support");
1200 if (n
->max_queues
> 1) {
1201 if (n
->max_queues
!= qemu_get_be16(f
)) {
1202 error_report("virtio-net: different max_queues ");
1206 n
->curr_queues
= qemu_get_be16(f
);
1207 for (i
= 1; i
< n
->curr_queues
; i
++) {
1208 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1212 virtio_net_set_queues(n
);
1214 /* Find the first multicast entry in the saved MAC filter */
1215 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1216 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1220 n
->mac_table
.first_multi
= i
;
1222 /* nc.link_down can't be migrated, so infer link_down according
1223 * to link status bit in n->status */
1224 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1225 for (i
= 0; i
< n
->max_queues
; i
++) {
1226 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1232 static void virtio_net_cleanup(NetClientState
*nc
)
1234 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1239 static NetClientInfo net_virtio_info
= {
1240 .type
= NET_CLIENT_OPTIONS_KIND_NIC
,
1241 .size
= sizeof(NICState
),
1242 .can_receive
= virtio_net_can_receive
,
1243 .receive
= virtio_net_receive
,
1244 .cleanup
= virtio_net_cleanup
,
1245 .link_status_changed
= virtio_net_set_link_status
,
1248 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1250 VirtIONet
*n
= VIRTIO_NET(vdev
);
1251 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1252 assert(n
->vhost_started
);
1253 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc
->peer
), idx
);
1256 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1259 VirtIONet
*n
= VIRTIO_NET(vdev
);
1260 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1261 assert(n
->vhost_started
);
1262 vhost_net_virtqueue_mask(tap_get_vhost_net(nc
->peer
),
1266 void virtio_net_set_config_size(VirtIONet
*n
, uint32_t host_features
)
1268 int i
, config_size
= 0;
1269 host_features
|= (1 << VIRTIO_NET_F_MAC
);
1270 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1271 if (host_features
& feature_sizes
[i
].flags
) {
1272 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1275 n
->config_size
= config_size
;
1278 static int virtio_net_device_init(VirtIODevice
*vdev
)
1282 DeviceState
*qdev
= DEVICE(vdev
);
1283 VirtIONet
*n
= VIRTIO_NET(vdev
);
1285 virtio_init(VIRTIO_DEVICE(n
), "virtio-net", VIRTIO_ID_NET
,
1288 n
->max_queues
= MAX(n
->nic_conf
.queues
, 1);
1289 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1290 n
->vqs
[0].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1293 n
->tx_timeout
= n
->net_conf
.txtimer
;
1295 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1296 && strcmp(n
->net_conf
.tx
, "bh")) {
1297 error_report("virtio-net: "
1298 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1300 error_report("Defaulting to \"bh\"");
1303 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1304 n
->vqs
[0].tx_vq
= virtio_add_queue(vdev
, 256,
1305 virtio_net_handle_tx_timer
);
1306 n
->vqs
[0].tx_timer
= qemu_new_timer_ns(vm_clock
, virtio_net_tx_timer
,
1309 n
->vqs
[0].tx_vq
= virtio_add_queue(vdev
, 256,
1310 virtio_net_handle_tx_bh
);
1311 n
->vqs
[0].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[0]);
1313 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1314 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1315 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1316 n
->status
= VIRTIO_NET_S_LINK_UP
;
1318 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1319 object_get_typename(OBJECT(qdev
)), qdev
->id
, n
);
1320 peer_test_vnet_hdr(n
);
1321 if (peer_has_vnet_hdr(n
)) {
1322 for (i
= 0; i
< n
->max_queues
; i
++) {
1323 tap_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1325 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1327 n
->host_hdr_len
= 0;
1330 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1332 n
->vqs
[0].tx_waiting
= 0;
1333 n
->tx_burst
= n
->net_conf
.txburst
;
1334 virtio_net_set_mrg_rx_bufs(n
, 0);
1335 n
->promisc
= 1; /* for compatibility */
1337 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1339 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1342 register_savevm(qdev
, "virtio-net", -1, VIRTIO_NET_VM_VERSION
,
1343 virtio_net_save
, virtio_net_load
, n
);
1345 add_boot_device_path(n
->nic_conf
.bootindex
, qdev
, "/ethernet-phy@0");
1349 static int virtio_net_device_exit(DeviceState
*qdev
)
1351 VirtIONet
*n
= VIRTIO_NET(qdev
);
1352 VirtIODevice
*vdev
= VIRTIO_DEVICE(qdev
);
1355 /* This will stop vhost backend if appropriate. */
1356 virtio_net_set_status(vdev
, 0);
1358 unregister_savevm(qdev
, "virtio-net", n
);
1360 g_free(n
->mac_table
.macs
);
1363 for (i
= 0; i
< n
->max_queues
; i
++) {
1364 VirtIONetQueue
*q
= &n
->vqs
[i
];
1365 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
1367 qemu_purge_queued_packets(nc
);
1370 qemu_del_timer(q
->tx_timer
);
1371 qemu_free_timer(q
->tx_timer
);
1373 qemu_bh_delete(q
->tx_bh
);
1378 qemu_del_nic(n
->nic
);
1379 virtio_cleanup(vdev
);
1384 static void virtio_net_instance_init(Object
*obj
)
1386 VirtIONet
*n
= VIRTIO_NET(obj
);
1389 * The default config_size is sizeof(struct virtio_net_config).
1390 * Can be overriden with virtio_net_set_config_size.
1392 n
->config_size
= sizeof(struct virtio_net_config
);
1395 static Property virtio_net_properties
[] = {
1396 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
1397 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
1399 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
1400 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
1401 DEFINE_PROP_END_OF_LIST(),
1404 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
1406 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1407 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1408 dc
->exit
= virtio_net_device_exit
;
1409 dc
->props
= virtio_net_properties
;
1410 vdc
->init
= virtio_net_device_init
;
1411 vdc
->get_config
= virtio_net_get_config
;
1412 vdc
->set_config
= virtio_net_set_config
;
1413 vdc
->get_features
= virtio_net_get_features
;
1414 vdc
->set_features
= virtio_net_set_features
;
1415 vdc
->bad_features
= virtio_net_bad_features
;
1416 vdc
->reset
= virtio_net_reset
;
1417 vdc
->set_status
= virtio_net_set_status
;
1418 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1419 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1422 static const TypeInfo virtio_net_info
= {
1423 .name
= TYPE_VIRTIO_NET
,
1424 .parent
= TYPE_VIRTIO_DEVICE
,
1425 .instance_size
= sizeof(VirtIONet
),
1426 .instance_init
= virtio_net_instance_init
,
1427 .class_init
= virtio_net_class_init
,
1430 static void virtio_register_types(void)
1432 type_register_static(&virtio_net_info
);
1435 type_init(virtio_register_types
)