2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
49 #include <asm/xen/hypercall.h>
51 /* Provide an option to disable split event channels at load time as
52 * event channels are limited resource. Split event channels are
55 bool separate_tx_rx_irq
= true;
56 module_param(separate_tx_rx_irq
, bool, 0644);
58 /* The time that packets can stay on the guest Rx internal queue
59 * before they are dropped.
61 unsigned int rx_drain_timeout_msecs
= 10000;
62 module_param(rx_drain_timeout_msecs
, uint
, 0444);
64 /* The length of time before the frontend is considered unresponsive
65 * because it isn't providing Rx slots.
67 unsigned int rx_stall_timeout_msecs
= 60000;
68 module_param(rx_stall_timeout_msecs
, uint
, 0444);
70 unsigned int xenvif_max_queues
;
71 module_param_named(max_queues
, xenvif_max_queues
, uint
, 0644);
72 MODULE_PARM_DESC(max_queues
,
73 "Maximum number of queues per virtual interface");
76 * This is the maximum slots a skb can have. If a guest sends a skb
77 * which exceeds this limit it is considered malicious.
79 #define FATAL_SKB_SLOTS_DEFAULT 20
80 static unsigned int fatal_skb_slots
= FATAL_SKB_SLOTS_DEFAULT
;
81 module_param(fatal_skb_slots
, uint
, 0444);
83 /* The amount to copy out of the first guest Tx slot into the skb's
84 * linear area. If the first slot has more data, it will be mapped
85 * and put into the first frag.
87 * This is sized to avoid pulling headers from the frags for most
90 #define XEN_NETBACK_TX_COPY_LEN 128
93 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
96 static void make_tx_response(struct xenvif_queue
*queue
,
97 struct xen_netif_tx_request
*txp
,
99 static void push_tx_responses(struct xenvif_queue
*queue
);
101 static inline int tx_work_todo(struct xenvif_queue
*queue
);
103 static struct xen_netif_rx_response
*make_rx_response(struct xenvif_queue
*queue
,
110 static inline unsigned long idx_to_pfn(struct xenvif_queue
*queue
,
113 return page_to_pfn(queue
->mmap_pages
[idx
]);
116 static inline unsigned long idx_to_kaddr(struct xenvif_queue
*queue
,
119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue
, idx
));
122 #define callback_param(vif, pending_idx) \
123 (vif->pending_tx_info[pending_idx].callback_struct)
125 /* Find the containing VIF's structure from a pointer in pending_tx_info array
127 static inline struct xenvif_queue
*ubuf_to_queue(const struct ubuf_info
*ubuf
)
129 u16 pending_idx
= ubuf
->desc
;
130 struct pending_tx_info
*temp
=
131 container_of(ubuf
, struct pending_tx_info
, callback_struct
);
132 return container_of(temp
- pending_idx
,
137 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
139 return (u16
)frag
->page_offset
;
142 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
144 frag
->page_offset
= pending_idx
;
147 static inline pending_ring_idx_t
pending_index(unsigned i
)
149 return i
& (MAX_PENDING_REQS
-1);
152 bool xenvif_rx_ring_slots_available(struct xenvif_queue
*queue
, int needed
)
157 prod
= queue
->rx
.sring
->req_prod
;
158 cons
= queue
->rx
.req_cons
;
160 if (prod
- cons
>= needed
)
163 queue
->rx
.sring
->req_event
= prod
+ 1;
165 /* Make sure event is visible before we check prod
169 } while (queue
->rx
.sring
->req_prod
!= prod
);
174 void xenvif_rx_queue_tail(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
178 spin_lock_irqsave(&queue
->rx_queue
.lock
, flags
);
180 __skb_queue_tail(&queue
->rx_queue
, skb
);
182 queue
->rx_queue_len
+= skb
->len
;
183 if (queue
->rx_queue_len
> queue
->rx_queue_max
)
184 netif_tx_stop_queue(netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
));
186 spin_unlock_irqrestore(&queue
->rx_queue
.lock
, flags
);
189 static struct sk_buff
*xenvif_rx_dequeue(struct xenvif_queue
*queue
)
193 spin_lock_irq(&queue
->rx_queue
.lock
);
195 skb
= __skb_dequeue(&queue
->rx_queue
);
197 queue
->rx_queue_len
-= skb
->len
;
199 spin_unlock_irq(&queue
->rx_queue
.lock
);
204 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue
*queue
)
206 spin_lock_irq(&queue
->rx_queue
.lock
);
208 if (queue
->rx_queue_len
< queue
->rx_queue_max
)
209 netif_tx_wake_queue(netdev_get_tx_queue(queue
->vif
->dev
, queue
->id
));
211 spin_unlock_irq(&queue
->rx_queue
.lock
);
215 static void xenvif_rx_queue_purge(struct xenvif_queue
*queue
)
218 while ((skb
= xenvif_rx_dequeue(queue
)) != NULL
)
222 static void xenvif_rx_queue_drop_expired(struct xenvif_queue
*queue
)
227 skb
= skb_peek(&queue
->rx_queue
);
230 if (time_before(jiffies
, XENVIF_RX_CB(skb
)->expires
))
232 xenvif_rx_dequeue(queue
);
237 struct netrx_pending_operations
{
238 unsigned copy_prod
, copy_cons
;
239 unsigned meta_prod
, meta_cons
;
240 struct gnttab_copy
*copy
;
241 struct xenvif_rx_meta
*meta
;
243 grant_ref_t copy_gref
;
246 static struct xenvif_rx_meta
*get_next_rx_buffer(struct xenvif_queue
*queue
,
247 struct netrx_pending_operations
*npo
)
249 struct xenvif_rx_meta
*meta
;
250 struct xen_netif_rx_request
*req
;
252 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
254 meta
= npo
->meta
+ npo
->meta_prod
++;
255 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
261 npo
->copy_gref
= req
->gref
;
267 * Set up the grant operations for this fragment. If it's a flipping
268 * interface, we also set up the unmap request from here.
270 static void xenvif_gop_frag_copy(struct xenvif_queue
*queue
, struct sk_buff
*skb
,
271 struct netrx_pending_operations
*npo
,
272 struct page
*page
, unsigned long size
,
273 unsigned long offset
, int *head
)
275 struct gnttab_copy
*copy_gop
;
276 struct xenvif_rx_meta
*meta
;
278 int gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
280 /* Data must not cross a page boundary. */
281 BUG_ON(size
+ offset
> PAGE_SIZE
<<compound_order(page
));
283 meta
= npo
->meta
+ npo
->meta_prod
- 1;
285 /* Skip unused frames from start of page */
286 page
+= offset
>> PAGE_SHIFT
;
287 offset
&= ~PAGE_MASK
;
290 struct xen_page_foreign
*foreign
;
292 BUG_ON(offset
>= PAGE_SIZE
);
293 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
295 if (npo
->copy_off
== MAX_BUFFER_OFFSET
)
296 meta
= get_next_rx_buffer(queue
, npo
);
298 bytes
= PAGE_SIZE
- offset
;
302 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
303 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
305 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
306 copy_gop
->flags
= GNTCOPY_dest_gref
;
307 copy_gop
->len
= bytes
;
309 foreign
= xen_page_foreign(page
);
311 copy_gop
->source
.domid
= foreign
->domid
;
312 copy_gop
->source
.u
.ref
= foreign
->gref
;
313 copy_gop
->flags
|= GNTCOPY_source_gref
;
315 copy_gop
->source
.domid
= DOMID_SELF
;
316 copy_gop
->source
.u
.gmfn
=
317 virt_to_mfn(page_address(page
));
319 copy_gop
->source
.offset
= offset
;
321 copy_gop
->dest
.domid
= queue
->vif
->domid
;
322 copy_gop
->dest
.offset
= npo
->copy_off
;
323 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
325 npo
->copy_off
+= bytes
;
332 if (offset
== PAGE_SIZE
&& size
) {
333 BUG_ON(!PageCompound(page
));
338 /* Leave a gap for the GSO descriptor. */
339 if (skb_is_gso(skb
)) {
340 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
341 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
342 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
343 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
346 if (*head
&& ((1 << gso_type
) & queue
->vif
->gso_mask
))
347 queue
->rx
.req_cons
++;
349 *head
= 0; /* There must be something in this buffer now. */
355 * Prepare an SKB to be transmitted to the frontend.
357 * This function is responsible for allocating grant operations, meta
360 * It returns the number of meta structures consumed. The number of
361 * ring slots used is always equal to the number of meta slots used
362 * plus the number of GSO descriptors used. Currently, we use either
363 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
364 * frontend-side LRO).
366 static int xenvif_gop_skb(struct sk_buff
*skb
,
367 struct netrx_pending_operations
*npo
,
368 struct xenvif_queue
*queue
)
370 struct xenvif
*vif
= netdev_priv(skb
->dev
);
371 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
373 struct xen_netif_rx_request
*req
;
374 struct xenvif_rx_meta
*meta
;
380 old_meta_prod
= npo
->meta_prod
;
382 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
383 if (skb_is_gso(skb
)) {
384 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
385 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
386 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
387 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
390 /* Set up a GSO prefix descriptor, if necessary */
391 if ((1 << gso_type
) & vif
->gso_prefix_mask
) {
392 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
393 meta
= npo
->meta
+ npo
->meta_prod
++;
394 meta
->gso_type
= gso_type
;
395 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
400 req
= RING_GET_REQUEST(&queue
->rx
, queue
->rx
.req_cons
++);
401 meta
= npo
->meta
+ npo
->meta_prod
++;
403 if ((1 << gso_type
) & vif
->gso_mask
) {
404 meta
->gso_type
= gso_type
;
405 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
407 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
414 npo
->copy_gref
= req
->gref
;
417 while (data
< skb_tail_pointer(skb
)) {
418 unsigned int offset
= offset_in_page(data
);
419 unsigned int len
= PAGE_SIZE
- offset
;
421 if (data
+ len
> skb_tail_pointer(skb
))
422 len
= skb_tail_pointer(skb
) - data
;
424 xenvif_gop_frag_copy(queue
, skb
, npo
,
425 virt_to_page(data
), len
, offset
, &head
);
429 for (i
= 0; i
< nr_frags
; i
++) {
430 xenvif_gop_frag_copy(queue
, skb
, npo
,
431 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
432 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
433 skb_shinfo(skb
)->frags
[i
].page_offset
,
437 return npo
->meta_prod
- old_meta_prod
;
441 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
442 * used to set up the operations on the top of
443 * netrx_pending_operations, which have since been done. Check that
444 * they didn't give any errors and advance over them.
446 static int xenvif_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
447 struct netrx_pending_operations
*npo
)
449 struct gnttab_copy
*copy_op
;
450 int status
= XEN_NETIF_RSP_OKAY
;
453 for (i
= 0; i
< nr_meta_slots
; i
++) {
454 copy_op
= npo
->copy
+ npo
->copy_cons
++;
455 if (copy_op
->status
!= GNTST_okay
) {
457 "Bad status %d from copy to DOM%d.\n",
458 copy_op
->status
, vif
->domid
);
459 status
= XEN_NETIF_RSP_ERROR
;
466 static void xenvif_add_frag_responses(struct xenvif_queue
*queue
, int status
,
467 struct xenvif_rx_meta
*meta
,
471 unsigned long offset
;
473 /* No fragments used */
474 if (nr_meta_slots
<= 1)
479 for (i
= 0; i
< nr_meta_slots
; i
++) {
481 if (i
== nr_meta_slots
- 1)
484 flags
= XEN_NETRXF_more_data
;
487 make_rx_response(queue
, meta
[i
].id
, status
, offset
,
488 meta
[i
].size
, flags
);
492 void xenvif_kick_thread(struct xenvif_queue
*queue
)
497 static void xenvif_rx_action(struct xenvif_queue
*queue
)
501 struct xen_netif_rx_response
*resp
;
502 struct sk_buff_head rxq
;
506 unsigned long offset
;
507 bool need_to_notify
= false;
509 struct netrx_pending_operations npo
= {
510 .copy
= queue
->grant_copy_op
,
514 skb_queue_head_init(&rxq
);
516 while (xenvif_rx_ring_slots_available(queue
, XEN_NETBK_RX_SLOTS_MAX
)
517 && (skb
= xenvif_rx_dequeue(queue
)) != NULL
) {
518 queue
->last_rx_time
= jiffies
;
520 XENVIF_RX_CB(skb
)->meta_slots_used
= xenvif_gop_skb(skb
, &npo
, queue
);
522 __skb_queue_tail(&rxq
, skb
);
525 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(queue
->meta
));
530 BUG_ON(npo
.copy_prod
> MAX_GRANT_COPY_OPS
);
531 gnttab_batch_copy(queue
->grant_copy_op
, npo
.copy_prod
);
533 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
535 if ((1 << queue
->meta
[npo
.meta_cons
].gso_type
) &
536 queue
->vif
->gso_prefix_mask
) {
537 resp
= RING_GET_RESPONSE(&queue
->rx
,
538 queue
->rx
.rsp_prod_pvt
++);
540 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
542 resp
->offset
= queue
->meta
[npo
.meta_cons
].gso_size
;
543 resp
->id
= queue
->meta
[npo
.meta_cons
].id
;
544 resp
->status
= XENVIF_RX_CB(skb
)->meta_slots_used
;
547 XENVIF_RX_CB(skb
)->meta_slots_used
--;
551 queue
->stats
.tx_bytes
+= skb
->len
;
552 queue
->stats
.tx_packets
++;
554 status
= xenvif_check_gop(queue
->vif
,
555 XENVIF_RX_CB(skb
)->meta_slots_used
,
558 if (XENVIF_RX_CB(skb
)->meta_slots_used
== 1)
561 flags
= XEN_NETRXF_more_data
;
563 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
564 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
565 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
566 /* remote but checksummed. */
567 flags
|= XEN_NETRXF_data_validated
;
570 resp
= make_rx_response(queue
, queue
->meta
[npo
.meta_cons
].id
,
572 queue
->meta
[npo
.meta_cons
].size
,
575 if ((1 << queue
->meta
[npo
.meta_cons
].gso_type
) &
576 queue
->vif
->gso_mask
) {
577 struct xen_netif_extra_info
*gso
=
578 (struct xen_netif_extra_info
*)
579 RING_GET_RESPONSE(&queue
->rx
,
580 queue
->rx
.rsp_prod_pvt
++);
582 resp
->flags
|= XEN_NETRXF_extra_info
;
584 gso
->u
.gso
.type
= queue
->meta
[npo
.meta_cons
].gso_type
;
585 gso
->u
.gso
.size
= queue
->meta
[npo
.meta_cons
].gso_size
;
587 gso
->u
.gso
.features
= 0;
589 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
593 xenvif_add_frag_responses(queue
, status
,
594 queue
->meta
+ npo
.meta_cons
+ 1,
595 XENVIF_RX_CB(skb
)->meta_slots_used
);
597 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->rx
, ret
);
599 need_to_notify
|= !!ret
;
601 npo
.meta_cons
+= XENVIF_RX_CB(skb
)->meta_slots_used
;
607 notify_remote_via_irq(queue
->rx_irq
);
610 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue
*queue
)
614 RING_FINAL_CHECK_FOR_REQUESTS(&queue
->tx
, more_to_do
);
617 napi_schedule(&queue
->napi
);
620 static void tx_add_credit(struct xenvif_queue
*queue
)
622 unsigned long max_burst
, max_credit
;
625 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
626 * Otherwise the interface can seize up due to insufficient credit.
628 max_burst
= RING_GET_REQUEST(&queue
->tx
, queue
->tx
.req_cons
)->size
;
629 max_burst
= min(max_burst
, 131072UL);
630 max_burst
= max(max_burst
, queue
->credit_bytes
);
632 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
633 max_credit
= queue
->remaining_credit
+ queue
->credit_bytes
;
634 if (max_credit
< queue
->remaining_credit
)
635 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
637 queue
->remaining_credit
= min(max_credit
, max_burst
);
640 void xenvif_tx_credit_callback(unsigned long data
)
642 struct xenvif_queue
*queue
= (struct xenvif_queue
*)data
;
643 tx_add_credit(queue
);
644 xenvif_napi_schedule_or_enable_events(queue
);
647 static void xenvif_tx_err(struct xenvif_queue
*queue
,
648 struct xen_netif_tx_request
*txp
, RING_IDX end
)
650 RING_IDX cons
= queue
->tx
.req_cons
;
654 spin_lock_irqsave(&queue
->response_lock
, flags
);
655 make_tx_response(queue
, txp
, XEN_NETIF_RSP_ERROR
);
656 push_tx_responses(queue
);
657 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
660 txp
= RING_GET_REQUEST(&queue
->tx
, cons
++);
662 queue
->tx
.req_cons
= cons
;
665 static void xenvif_fatal_tx_err(struct xenvif
*vif
)
667 netdev_err(vif
->dev
, "fatal error; disabling device\n");
668 vif
->disabled
= true;
669 /* Disable the vif from queue 0's kthread */
671 xenvif_kick_thread(&vif
->queues
[0]);
674 static int xenvif_count_requests(struct xenvif_queue
*queue
,
675 struct xen_netif_tx_request
*first
,
676 struct xen_netif_tx_request
*txp
,
679 RING_IDX cons
= queue
->tx
.req_cons
;
684 if (!(first
->flags
& XEN_NETTXF_more_data
))
688 struct xen_netif_tx_request dropped_tx
= { 0 };
690 if (slots
>= work_to_do
) {
691 netdev_err(queue
->vif
->dev
,
692 "Asked for %d slots but exceeds this limit\n",
694 xenvif_fatal_tx_err(queue
->vif
);
698 /* This guest is really using too many slots and
699 * considered malicious.
701 if (unlikely(slots
>= fatal_skb_slots
)) {
702 netdev_err(queue
->vif
->dev
,
703 "Malicious frontend using %d slots, threshold %u\n",
704 slots
, fatal_skb_slots
);
705 xenvif_fatal_tx_err(queue
->vif
);
709 /* Xen network protocol had implicit dependency on
710 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
711 * the historical MAX_SKB_FRAGS value 18 to honor the
712 * same behavior as before. Any packet using more than
713 * 18 slots but less than fatal_skb_slots slots is
716 if (!drop_err
&& slots
>= XEN_NETBK_LEGACY_SLOTS_MAX
) {
718 netdev_dbg(queue
->vif
->dev
,
719 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
720 slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
727 memcpy(txp
, RING_GET_REQUEST(&queue
->tx
, cons
+ slots
),
730 /* If the guest submitted a frame >= 64 KiB then
731 * first->size overflowed and following slots will
732 * appear to be larger than the frame.
734 * This cannot be fatal error as there are buggy
735 * frontends that do this.
737 * Consume all slots and drop the packet.
739 if (!drop_err
&& txp
->size
> first
->size
) {
741 netdev_dbg(queue
->vif
->dev
,
742 "Invalid tx request, slot size %u > remaining size %u\n",
743 txp
->size
, first
->size
);
747 first
->size
-= txp
->size
;
750 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
751 netdev_err(queue
->vif
->dev
, "Cross page boundary, txp->offset: %u, size: %u\n",
752 txp
->offset
, txp
->size
);
753 xenvif_fatal_tx_err(queue
->vif
);
757 more_data
= txp
->flags
& XEN_NETTXF_more_data
;
765 xenvif_tx_err(queue
, first
, cons
+ slots
);
773 struct xenvif_tx_cb
{
777 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
779 static inline void xenvif_tx_create_map_op(struct xenvif_queue
*queue
,
781 struct xen_netif_tx_request
*txp
,
782 struct gnttab_map_grant_ref
*mop
)
784 queue
->pages_to_map
[mop
-queue
->tx_map_ops
] = queue
->mmap_pages
[pending_idx
];
785 gnttab_set_map_op(mop
, idx_to_kaddr(queue
, pending_idx
),
786 GNTMAP_host_map
| GNTMAP_readonly
,
787 txp
->gref
, queue
->vif
->domid
);
789 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, txp
,
793 static inline struct sk_buff
*xenvif_alloc_skb(unsigned int size
)
795 struct sk_buff
*skb
=
796 alloc_skb(size
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
797 GFP_ATOMIC
| __GFP_NOWARN
);
798 if (unlikely(skb
== NULL
))
801 /* Packets passed to netif_rx() must have some headroom. */
802 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
804 /* Initialize it here to avoid later surprises */
805 skb_shinfo(skb
)->destructor_arg
= NULL
;
810 static struct gnttab_map_grant_ref
*xenvif_get_requests(struct xenvif_queue
*queue
,
812 struct xen_netif_tx_request
*txp
,
813 struct gnttab_map_grant_ref
*gop
)
815 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
816 skb_frag_t
*frags
= shinfo
->frags
;
817 u16 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
819 pending_ring_idx_t index
;
820 unsigned int nr_slots
, frag_overflow
= 0;
822 /* At this point shinfo->nr_frags is in fact the number of
823 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
825 if (shinfo
->nr_frags
> MAX_SKB_FRAGS
) {
826 frag_overflow
= shinfo
->nr_frags
- MAX_SKB_FRAGS
;
827 BUG_ON(frag_overflow
> MAX_SKB_FRAGS
);
828 shinfo
->nr_frags
= MAX_SKB_FRAGS
;
830 nr_slots
= shinfo
->nr_frags
;
832 /* Skip first skb fragment if it is on same page as header fragment. */
833 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
835 for (shinfo
->nr_frags
= start
; shinfo
->nr_frags
< nr_slots
;
836 shinfo
->nr_frags
++, txp
++, gop
++) {
837 index
= pending_index(queue
->pending_cons
++);
838 pending_idx
= queue
->pending_ring
[index
];
839 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, gop
);
840 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], pending_idx
);
844 struct sk_buff
*nskb
= xenvif_alloc_skb(0);
845 if (unlikely(nskb
== NULL
)) {
847 netdev_err(queue
->vif
->dev
,
848 "Can't allocate the frag_list skb.\n");
852 shinfo
= skb_shinfo(nskb
);
853 frags
= shinfo
->frags
;
855 for (shinfo
->nr_frags
= 0; shinfo
->nr_frags
< frag_overflow
;
856 shinfo
->nr_frags
++, txp
++, gop
++) {
857 index
= pending_index(queue
->pending_cons
++);
858 pending_idx
= queue
->pending_ring
[index
];
859 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, gop
);
860 frag_set_pending_idx(&frags
[shinfo
->nr_frags
],
864 skb_shinfo(skb
)->frag_list
= nskb
;
870 static inline void xenvif_grant_handle_set(struct xenvif_queue
*queue
,
872 grant_handle_t handle
)
874 if (unlikely(queue
->grant_tx_handle
[pending_idx
] !=
875 NETBACK_INVALID_HANDLE
)) {
876 netdev_err(queue
->vif
->dev
,
877 "Trying to overwrite active handle! pending_idx: 0x%x\n",
881 queue
->grant_tx_handle
[pending_idx
] = handle
;
884 static inline void xenvif_grant_handle_reset(struct xenvif_queue
*queue
,
887 if (unlikely(queue
->grant_tx_handle
[pending_idx
] ==
888 NETBACK_INVALID_HANDLE
)) {
889 netdev_err(queue
->vif
->dev
,
890 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
894 queue
->grant_tx_handle
[pending_idx
] = NETBACK_INVALID_HANDLE
;
897 static int xenvif_tx_check_gop(struct xenvif_queue
*queue
,
899 struct gnttab_map_grant_ref
**gopp_map
,
900 struct gnttab_copy
**gopp_copy
)
902 struct gnttab_map_grant_ref
*gop_map
= *gopp_map
;
903 u16 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
904 /* This always points to the shinfo of the skb being checked, which
905 * could be either the first or the one on the frag_list
907 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
908 /* If this is non-NULL, we are currently checking the frag_list skb, and
909 * this points to the shinfo of the first one
911 struct skb_shared_info
*first_shinfo
= NULL
;
912 int nr_frags
= shinfo
->nr_frags
;
913 const bool sharedslot
= nr_frags
&&
914 frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
;
917 /* Check status of header. */
918 err
= (*gopp_copy
)->status
;
921 netdev_dbg(queue
->vif
->dev
,
922 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
923 (*gopp_copy
)->status
,
925 (*gopp_copy
)->source
.u
.ref
);
926 /* The first frag might still have this slot mapped */
928 xenvif_idx_release(queue
, pending_idx
,
929 XEN_NETIF_RSP_ERROR
);
934 for (i
= 0; i
< nr_frags
; i
++, gop_map
++) {
937 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
939 /* Check error status: if okay then remember grant handle. */
940 newerr
= gop_map
->status
;
942 if (likely(!newerr
)) {
943 xenvif_grant_handle_set(queue
,
946 /* Had a previous error? Invalidate this fragment. */
948 xenvif_idx_unmap(queue
, pending_idx
);
949 /* If the mapping of the first frag was OK, but
950 * the header's copy failed, and they are
951 * sharing a slot, send an error
953 if (i
== 0 && sharedslot
)
954 xenvif_idx_release(queue
, pending_idx
,
955 XEN_NETIF_RSP_ERROR
);
957 xenvif_idx_release(queue
, pending_idx
,
963 /* Error on this fragment: respond to client with an error. */
965 netdev_dbg(queue
->vif
->dev
,
966 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
972 xenvif_idx_release(queue
, pending_idx
, XEN_NETIF_RSP_ERROR
);
974 /* Not the first error? Preceding frags already invalidated. */
978 /* First error: if the header haven't shared a slot with the
979 * first frag, release it as well.
982 xenvif_idx_release(queue
,
983 XENVIF_TX_CB(skb
)->pending_idx
,
986 /* Invalidate preceding fragments of this skb. */
987 for (j
= 0; j
< i
; j
++) {
988 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
989 xenvif_idx_unmap(queue
, pending_idx
);
990 xenvif_idx_release(queue
, pending_idx
,
994 /* And if we found the error while checking the frag_list, unmap
995 * the first skb's frags
998 for (j
= 0; j
< first_shinfo
->nr_frags
; j
++) {
999 pending_idx
= frag_get_pending_idx(&first_shinfo
->frags
[j
]);
1000 xenvif_idx_unmap(queue
, pending_idx
);
1001 xenvif_idx_release(queue
, pending_idx
,
1002 XEN_NETIF_RSP_OKAY
);
1006 /* Remember the error: invalidate all subsequent fragments. */
1010 if (skb_has_frag_list(skb
) && !first_shinfo
) {
1011 first_shinfo
= skb_shinfo(skb
);
1012 shinfo
= skb_shinfo(skb_shinfo(skb
)->frag_list
);
1013 nr_frags
= shinfo
->nr_frags
;
1018 *gopp_map
= gop_map
;
1022 static void xenvif_fill_frags(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1024 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1025 int nr_frags
= shinfo
->nr_frags
;
1027 u16 prev_pending_idx
= INVALID_PENDING_IDX
;
1029 for (i
= 0; i
< nr_frags
; i
++) {
1030 skb_frag_t
*frag
= shinfo
->frags
+ i
;
1031 struct xen_netif_tx_request
*txp
;
1035 pending_idx
= frag_get_pending_idx(frag
);
1037 /* If this is not the first frag, chain it to the previous*/
1038 if (prev_pending_idx
== INVALID_PENDING_IDX
)
1039 skb_shinfo(skb
)->destructor_arg
=
1040 &callback_param(queue
, pending_idx
);
1042 callback_param(queue
, prev_pending_idx
).ctx
=
1043 &callback_param(queue
, pending_idx
);
1045 callback_param(queue
, pending_idx
).ctx
= NULL
;
1046 prev_pending_idx
= pending_idx
;
1048 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1049 page
= virt_to_page(idx_to_kaddr(queue
, pending_idx
));
1050 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
1051 skb
->len
+= txp
->size
;
1052 skb
->data_len
+= txp
->size
;
1053 skb
->truesize
+= txp
->size
;
1055 /* Take an extra reference to offset network stack's put_page */
1056 get_page(queue
->mmap_pages
[pending_idx
]);
1060 static int xenvif_get_extras(struct xenvif_queue
*queue
,
1061 struct xen_netif_extra_info
*extras
,
1064 struct xen_netif_extra_info extra
;
1065 RING_IDX cons
= queue
->tx
.req_cons
;
1068 if (unlikely(work_to_do
-- <= 0)) {
1069 netdev_err(queue
->vif
->dev
, "Missing extra info\n");
1070 xenvif_fatal_tx_err(queue
->vif
);
1074 memcpy(&extra
, RING_GET_REQUEST(&queue
->tx
, cons
),
1076 if (unlikely(!extra
.type
||
1077 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1078 queue
->tx
.req_cons
= ++cons
;
1079 netdev_err(queue
->vif
->dev
,
1080 "Invalid extra type: %d\n", extra
.type
);
1081 xenvif_fatal_tx_err(queue
->vif
);
1085 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1086 queue
->tx
.req_cons
= ++cons
;
1087 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1092 static int xenvif_set_skb_gso(struct xenvif
*vif
,
1093 struct sk_buff
*skb
,
1094 struct xen_netif_extra_info
*gso
)
1096 if (!gso
->u
.gso
.size
) {
1097 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
1098 xenvif_fatal_tx_err(vif
);
1102 switch (gso
->u
.gso
.type
) {
1103 case XEN_NETIF_GSO_TYPE_TCPV4
:
1104 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1106 case XEN_NETIF_GSO_TYPE_TCPV6
:
1107 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1110 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1111 xenvif_fatal_tx_err(vif
);
1115 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1116 /* gso_segs will be calculated later */
1121 static int checksum_setup(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1123 bool recalculate_partial_csum
= false;
1125 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1126 * peers can fail to set NETRXF_csum_blank when sending a GSO
1127 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1128 * recalculate the partial checksum.
1130 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1131 queue
->stats
.rx_gso_checksum_fixup
++;
1132 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1133 recalculate_partial_csum
= true;
1136 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1137 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1140 return skb_checksum_setup(skb
, recalculate_partial_csum
);
1143 static bool tx_credit_exceeded(struct xenvif_queue
*queue
, unsigned size
)
1145 u64 now
= get_jiffies_64();
1146 u64 next_credit
= queue
->credit_window_start
+
1147 msecs_to_jiffies(queue
->credit_usec
/ 1000);
1149 /* Timer could already be pending in rare cases. */
1150 if (timer_pending(&queue
->credit_timeout
))
1153 /* Passed the point where we can replenish credit? */
1154 if (time_after_eq64(now
, next_credit
)) {
1155 queue
->credit_window_start
= now
;
1156 tx_add_credit(queue
);
1159 /* Still too big to send right now? Set a callback. */
1160 if (size
> queue
->remaining_credit
) {
1161 queue
->credit_timeout
.data
=
1162 (unsigned long)queue
;
1163 mod_timer(&queue
->credit_timeout
,
1165 queue
->credit_window_start
= next_credit
;
1173 static void xenvif_tx_build_gops(struct xenvif_queue
*queue
,
1178 struct gnttab_map_grant_ref
*gop
= queue
->tx_map_ops
, *request_gop
;
1179 struct sk_buff
*skb
;
1182 while (skb_queue_len(&queue
->tx_queue
) < budget
) {
1183 struct xen_netif_tx_request txreq
;
1184 struct xen_netif_tx_request txfrags
[XEN_NETBK_LEGACY_SLOTS_MAX
];
1185 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1189 unsigned int data_len
;
1190 pending_ring_idx_t index
;
1192 if (queue
->tx
.sring
->req_prod
- queue
->tx
.req_cons
>
1193 XEN_NETIF_TX_RING_SIZE
) {
1194 netdev_err(queue
->vif
->dev
,
1195 "Impossible number of requests. "
1196 "req_prod %d, req_cons %d, size %ld\n",
1197 queue
->tx
.sring
->req_prod
, queue
->tx
.req_cons
,
1198 XEN_NETIF_TX_RING_SIZE
);
1199 xenvif_fatal_tx_err(queue
->vif
);
1203 work_to_do
= RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
);
1207 idx
= queue
->tx
.req_cons
;
1208 rmb(); /* Ensure that we see the request before we copy it. */
1209 memcpy(&txreq
, RING_GET_REQUEST(&queue
->tx
, idx
), sizeof(txreq
));
1211 /* Credit-based scheduling. */
1212 if (txreq
.size
> queue
->remaining_credit
&&
1213 tx_credit_exceeded(queue
, txreq
.size
))
1216 queue
->remaining_credit
-= txreq
.size
;
1219 queue
->tx
.req_cons
= ++idx
;
1221 memset(extras
, 0, sizeof(extras
));
1222 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1223 work_to_do
= xenvif_get_extras(queue
, extras
,
1225 idx
= queue
->tx
.req_cons
;
1226 if (unlikely(work_to_do
< 0))
1230 ret
= xenvif_count_requests(queue
, &txreq
, txfrags
, work_to_do
);
1231 if (unlikely(ret
< 0))
1236 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1237 netdev_dbg(queue
->vif
->dev
,
1238 "Bad packet size: %d\n", txreq
.size
);
1239 xenvif_tx_err(queue
, &txreq
, idx
);
1243 /* No crossing a page as the payload mustn't fragment. */
1244 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1245 netdev_err(queue
->vif
->dev
,
1246 "txreq.offset: %u, size: %u, end: %lu\n",
1247 txreq
.offset
, txreq
.size
,
1248 (unsigned long)(txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1249 xenvif_fatal_tx_err(queue
->vif
);
1253 index
= pending_index(queue
->pending_cons
);
1254 pending_idx
= queue
->pending_ring
[index
];
1256 data_len
= (txreq
.size
> XEN_NETBACK_TX_COPY_LEN
&&
1257 ret
< XEN_NETBK_LEGACY_SLOTS_MAX
) ?
1258 XEN_NETBACK_TX_COPY_LEN
: txreq
.size
;
1260 skb
= xenvif_alloc_skb(data_len
);
1261 if (unlikely(skb
== NULL
)) {
1262 netdev_dbg(queue
->vif
->dev
,
1263 "Can't allocate a skb in start_xmit.\n");
1264 xenvif_tx_err(queue
, &txreq
, idx
);
1268 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1269 struct xen_netif_extra_info
*gso
;
1270 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1272 if (xenvif_set_skb_gso(queue
->vif
, skb
, gso
)) {
1273 /* Failure in xenvif_set_skb_gso is fatal. */
1279 XENVIF_TX_CB(skb
)->pending_idx
= pending_idx
;
1281 __skb_put(skb
, data_len
);
1282 queue
->tx_copy_ops
[*copy_ops
].source
.u
.ref
= txreq
.gref
;
1283 queue
->tx_copy_ops
[*copy_ops
].source
.domid
= queue
->vif
->domid
;
1284 queue
->tx_copy_ops
[*copy_ops
].source
.offset
= txreq
.offset
;
1286 queue
->tx_copy_ops
[*copy_ops
].dest
.u
.gmfn
=
1287 virt_to_mfn(skb
->data
);
1288 queue
->tx_copy_ops
[*copy_ops
].dest
.domid
= DOMID_SELF
;
1289 queue
->tx_copy_ops
[*copy_ops
].dest
.offset
=
1290 offset_in_page(skb
->data
);
1292 queue
->tx_copy_ops
[*copy_ops
].len
= data_len
;
1293 queue
->tx_copy_ops
[*copy_ops
].flags
= GNTCOPY_source_gref
;
1297 skb_shinfo(skb
)->nr_frags
= ret
;
1298 if (data_len
< txreq
.size
) {
1299 skb_shinfo(skb
)->nr_frags
++;
1300 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1302 xenvif_tx_create_map_op(queue
, pending_idx
, &txreq
, gop
);
1305 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1306 INVALID_PENDING_IDX
);
1307 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, &txreq
,
1311 queue
->pending_cons
++;
1313 request_gop
= xenvif_get_requests(queue
, skb
, txfrags
, gop
);
1314 if (request_gop
== NULL
) {
1316 xenvif_tx_err(queue
, &txreq
, idx
);
1321 __skb_queue_tail(&queue
->tx_queue
, skb
);
1323 queue
->tx
.req_cons
= idx
;
1325 if (((gop
-queue
->tx_map_ops
) >= ARRAY_SIZE(queue
->tx_map_ops
)) ||
1326 (*copy_ops
>= ARRAY_SIZE(queue
->tx_copy_ops
)))
1330 (*map_ops
) = gop
- queue
->tx_map_ops
;
1334 /* Consolidate skb with a frag_list into a brand new one with local pages on
1335 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1337 static int xenvif_handle_frag_list(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1339 unsigned int offset
= skb_headlen(skb
);
1340 skb_frag_t frags
[MAX_SKB_FRAGS
];
1342 struct ubuf_info
*uarg
;
1343 struct sk_buff
*nskb
= skb_shinfo(skb
)->frag_list
;
1345 queue
->stats
.tx_zerocopy_sent
+= 2;
1346 queue
->stats
.tx_frag_overflow
++;
1348 xenvif_fill_frags(queue
, nskb
);
1349 /* Subtract frags size, we will correct it later */
1350 skb
->truesize
-= skb
->data_len
;
1351 skb
->len
+= nskb
->len
;
1352 skb
->data_len
+= nskb
->len
;
1354 /* create a brand new frags array and coalesce there */
1355 for (i
= 0; offset
< skb
->len
; i
++) {
1359 BUG_ON(i
>= MAX_SKB_FRAGS
);
1360 page
= alloc_page(GFP_ATOMIC
);
1363 skb
->truesize
+= skb
->data_len
;
1364 for (j
= 0; j
< i
; j
++)
1365 put_page(frags
[j
].page
.p
);
1369 if (offset
+ PAGE_SIZE
< skb
->len
)
1372 len
= skb
->len
- offset
;
1373 if (skb_copy_bits(skb
, offset
, page_address(page
), len
))
1377 frags
[i
].page
.p
= page
;
1378 frags
[i
].page_offset
= 0;
1379 skb_frag_size_set(&frags
[i
], len
);
1382 /* Copied all the bits from the frag list -- free it. */
1383 skb_frag_list_init(skb
);
1384 xenvif_skb_zerocopy_prepare(queue
, nskb
);
1387 /* Release all the original (foreign) frags. */
1388 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
1389 skb_frag_unref(skb
, f
);
1390 uarg
= skb_shinfo(skb
)->destructor_arg
;
1391 /* increase inflight counter to offset decrement in callback */
1392 atomic_inc(&queue
->inflight_packets
);
1393 uarg
->callback(uarg
, true);
1394 skb_shinfo(skb
)->destructor_arg
= NULL
;
1396 /* Fill the skb with the new (local) frags. */
1397 memcpy(skb_shinfo(skb
)->frags
, frags
, i
* sizeof(skb_frag_t
));
1398 skb_shinfo(skb
)->nr_frags
= i
;
1399 skb
->truesize
+= i
* PAGE_SIZE
;
1404 static int xenvif_tx_submit(struct xenvif_queue
*queue
)
1406 struct gnttab_map_grant_ref
*gop_map
= queue
->tx_map_ops
;
1407 struct gnttab_copy
*gop_copy
= queue
->tx_copy_ops
;
1408 struct sk_buff
*skb
;
1411 while ((skb
= __skb_dequeue(&queue
->tx_queue
)) != NULL
) {
1412 struct xen_netif_tx_request
*txp
;
1416 pending_idx
= XENVIF_TX_CB(skb
)->pending_idx
;
1417 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1419 /* Check the remap error code. */
1420 if (unlikely(xenvif_tx_check_gop(queue
, skb
, &gop_map
, &gop_copy
))) {
1421 /* If there was an error, xenvif_tx_check_gop is
1422 * expected to release all the frags which were mapped,
1423 * so kfree_skb shouldn't do it again
1425 skb_shinfo(skb
)->nr_frags
= 0;
1426 if (skb_has_frag_list(skb
)) {
1427 struct sk_buff
*nskb
=
1428 skb_shinfo(skb
)->frag_list
;
1429 skb_shinfo(nskb
)->nr_frags
= 0;
1435 data_len
= skb
->len
;
1436 callback_param(queue
, pending_idx
).ctx
= NULL
;
1437 if (data_len
< txp
->size
) {
1438 /* Append the packet payload as a fragment. */
1439 txp
->offset
+= data_len
;
1440 txp
->size
-= data_len
;
1442 /* Schedule a response immediately. */
1443 xenvif_idx_release(queue
, pending_idx
,
1444 XEN_NETIF_RSP_OKAY
);
1447 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1448 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1449 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1450 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1452 xenvif_fill_frags(queue
, skb
);
1454 if (unlikely(skb_has_frag_list(skb
))) {
1455 if (xenvif_handle_frag_list(queue
, skb
)) {
1456 if (net_ratelimit())
1457 netdev_err(queue
->vif
->dev
,
1458 "Not enough memory to consolidate frag_list!\n");
1459 xenvif_skb_zerocopy_prepare(queue
, skb
);
1465 skb
->dev
= queue
->vif
->dev
;
1466 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1467 skb_reset_network_header(skb
);
1469 if (checksum_setup(queue
, skb
)) {
1470 netdev_dbg(queue
->vif
->dev
,
1471 "Can't setup checksum in net_tx_action\n");
1472 /* We have to set this flag to trigger the callback */
1473 if (skb_shinfo(skb
)->destructor_arg
)
1474 xenvif_skb_zerocopy_prepare(queue
, skb
);
1479 skb_probe_transport_header(skb
, 0);
1481 /* If the packet is GSO then we will have just set up the
1482 * transport header offset in checksum_setup so it's now
1483 * straightforward to calculate gso_segs.
1485 if (skb_is_gso(skb
)) {
1486 int mss
= skb_shinfo(skb
)->gso_size
;
1487 int hdrlen
= skb_transport_header(skb
) -
1488 skb_mac_header(skb
) +
1491 skb_shinfo(skb
)->gso_segs
=
1492 DIV_ROUND_UP(skb
->len
- hdrlen
, mss
);
1495 queue
->stats
.rx_bytes
+= skb
->len
;
1496 queue
->stats
.rx_packets
++;
1500 /* Set this flag right before netif_receive_skb, otherwise
1501 * someone might think this packet already left netback, and
1502 * do a skb_copy_ubufs while we are still in control of the
1503 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1505 if (skb_shinfo(skb
)->destructor_arg
) {
1506 xenvif_skb_zerocopy_prepare(queue
, skb
);
1507 queue
->stats
.tx_zerocopy_sent
++;
1510 netif_receive_skb(skb
);
1516 void xenvif_zerocopy_callback(struct ubuf_info
*ubuf
, bool zerocopy_success
)
1518 unsigned long flags
;
1519 pending_ring_idx_t index
;
1520 struct xenvif_queue
*queue
= ubuf_to_queue(ubuf
);
1522 /* This is the only place where we grab this lock, to protect callbacks
1525 spin_lock_irqsave(&queue
->callback_lock
, flags
);
1527 u16 pending_idx
= ubuf
->desc
;
1528 ubuf
= (struct ubuf_info
*) ubuf
->ctx
;
1529 BUG_ON(queue
->dealloc_prod
- queue
->dealloc_cons
>=
1531 index
= pending_index(queue
->dealloc_prod
);
1532 queue
->dealloc_ring
[index
] = pending_idx
;
1533 /* Sync with xenvif_tx_dealloc_action:
1534 * insert idx then incr producer.
1537 queue
->dealloc_prod
++;
1539 wake_up(&queue
->dealloc_wq
);
1540 spin_unlock_irqrestore(&queue
->callback_lock
, flags
);
1542 if (likely(zerocopy_success
))
1543 queue
->stats
.tx_zerocopy_success
++;
1545 queue
->stats
.tx_zerocopy_fail
++;
1546 xenvif_skb_zerocopy_complete(queue
);
1549 static inline void xenvif_tx_dealloc_action(struct xenvif_queue
*queue
)
1551 struct gnttab_unmap_grant_ref
*gop
;
1552 pending_ring_idx_t dc
, dp
;
1553 u16 pending_idx
, pending_idx_release
[MAX_PENDING_REQS
];
1556 dc
= queue
->dealloc_cons
;
1557 gop
= queue
->tx_unmap_ops
;
1559 /* Free up any grants we have finished using */
1561 dp
= queue
->dealloc_prod
;
1563 /* Ensure we see all indices enqueued by all
1564 * xenvif_zerocopy_callback().
1569 BUG_ON(gop
- queue
->tx_unmap_ops
>= MAX_PENDING_REQS
);
1571 queue
->dealloc_ring
[pending_index(dc
++)];
1573 pending_idx_release
[gop
- queue
->tx_unmap_ops
] =
1575 queue
->pages_to_unmap
[gop
- queue
->tx_unmap_ops
] =
1576 queue
->mmap_pages
[pending_idx
];
1577 gnttab_set_unmap_op(gop
,
1578 idx_to_kaddr(queue
, pending_idx
),
1580 queue
->grant_tx_handle
[pending_idx
]);
1581 xenvif_grant_handle_reset(queue
, pending_idx
);
1585 } while (dp
!= queue
->dealloc_prod
);
1587 queue
->dealloc_cons
= dc
;
1589 if (gop
- queue
->tx_unmap_ops
> 0) {
1591 ret
= gnttab_unmap_refs(queue
->tx_unmap_ops
,
1593 queue
->pages_to_unmap
,
1594 gop
- queue
->tx_unmap_ops
);
1596 netdev_err(queue
->vif
->dev
, "Unmap fail: nr_ops %tu ret %d\n",
1597 gop
- queue
->tx_unmap_ops
, ret
);
1598 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
) {
1599 if (gop
[i
].status
!= GNTST_okay
)
1600 netdev_err(queue
->vif
->dev
,
1601 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1610 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
)
1611 xenvif_idx_release(queue
, pending_idx_release
[i
],
1612 XEN_NETIF_RSP_OKAY
);
1616 /* Called after netfront has transmitted */
1617 int xenvif_tx_action(struct xenvif_queue
*queue
, int budget
)
1619 unsigned nr_mops
, nr_cops
= 0;
1622 if (unlikely(!tx_work_todo(queue
)))
1625 xenvif_tx_build_gops(queue
, budget
, &nr_cops
, &nr_mops
);
1630 gnttab_batch_copy(queue
->tx_copy_ops
, nr_cops
);
1632 ret
= gnttab_map_refs(queue
->tx_map_ops
,
1634 queue
->pages_to_map
,
1639 work_done
= xenvif_tx_submit(queue
);
1644 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
1647 struct pending_tx_info
*pending_tx_info
;
1648 pending_ring_idx_t index
;
1649 unsigned long flags
;
1651 pending_tx_info
= &queue
->pending_tx_info
[pending_idx
];
1653 spin_lock_irqsave(&queue
->response_lock
, flags
);
1655 make_tx_response(queue
, &pending_tx_info
->req
, status
);
1657 /* Release the pending index before pusing the Tx response so
1658 * its available before a new Tx request is pushed by the
1661 index
= pending_index(queue
->pending_prod
++);
1662 queue
->pending_ring
[index
] = pending_idx
;
1664 push_tx_responses(queue
);
1666 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
1670 static void make_tx_response(struct xenvif_queue
*queue
,
1671 struct xen_netif_tx_request
*txp
,
1674 RING_IDX i
= queue
->tx
.rsp_prod_pvt
;
1675 struct xen_netif_tx_response
*resp
;
1677 resp
= RING_GET_RESPONSE(&queue
->tx
, i
);
1681 if (txp
->flags
& XEN_NETTXF_extra_info
)
1682 RING_GET_RESPONSE(&queue
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1684 queue
->tx
.rsp_prod_pvt
= ++i
;
1687 static void push_tx_responses(struct xenvif_queue
*queue
)
1691 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
1693 notify_remote_via_irq(queue
->tx_irq
);
1696 static struct xen_netif_rx_response
*make_rx_response(struct xenvif_queue
*queue
,
1703 RING_IDX i
= queue
->rx
.rsp_prod_pvt
;
1704 struct xen_netif_rx_response
*resp
;
1706 resp
= RING_GET_RESPONSE(&queue
->rx
, i
);
1707 resp
->offset
= offset
;
1708 resp
->flags
= flags
;
1710 resp
->status
= (s16
)size
;
1712 resp
->status
= (s16
)st
;
1714 queue
->rx
.rsp_prod_pvt
= ++i
;
1719 void xenvif_idx_unmap(struct xenvif_queue
*queue
, u16 pending_idx
)
1722 struct gnttab_unmap_grant_ref tx_unmap_op
;
1724 gnttab_set_unmap_op(&tx_unmap_op
,
1725 idx_to_kaddr(queue
, pending_idx
),
1727 queue
->grant_tx_handle
[pending_idx
]);
1728 xenvif_grant_handle_reset(queue
, pending_idx
);
1730 ret
= gnttab_unmap_refs(&tx_unmap_op
, NULL
,
1731 &queue
->mmap_pages
[pending_idx
], 1);
1733 netdev_err(queue
->vif
->dev
,
1734 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1737 tx_unmap_op
.host_addr
,
1739 tx_unmap_op
.status
);
1744 static inline int tx_work_todo(struct xenvif_queue
*queue
)
1746 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
)))
1752 static inline bool tx_dealloc_work_todo(struct xenvif_queue
*queue
)
1754 return queue
->dealloc_cons
!= queue
->dealloc_prod
;
1757 void xenvif_unmap_frontend_rings(struct xenvif_queue
*queue
)
1759 if (queue
->tx
.sring
)
1760 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1762 if (queue
->rx
.sring
)
1763 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1767 int xenvif_map_frontend_rings(struct xenvif_queue
*queue
,
1768 grant_ref_t tx_ring_ref
,
1769 grant_ref_t rx_ring_ref
)
1772 struct xen_netif_tx_sring
*txs
;
1773 struct xen_netif_rx_sring
*rxs
;
1777 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1778 &tx_ring_ref
, 1, &addr
);
1782 txs
= (struct xen_netif_tx_sring
*)addr
;
1783 BACK_RING_INIT(&queue
->tx
, txs
, PAGE_SIZE
);
1785 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1786 &rx_ring_ref
, 1, &addr
);
1790 rxs
= (struct xen_netif_rx_sring
*)addr
;
1791 BACK_RING_INIT(&queue
->rx
, rxs
, PAGE_SIZE
);
1796 xenvif_unmap_frontend_rings(queue
);
1800 static void xenvif_queue_carrier_off(struct xenvif_queue
*queue
)
1802 struct xenvif
*vif
= queue
->vif
;
1804 queue
->stalled
= true;
1806 /* At least one queue has stalled? Disable the carrier. */
1807 spin_lock(&vif
->lock
);
1808 if (vif
->stalled_queues
++ == 0) {
1809 netdev_info(vif
->dev
, "Guest Rx stalled");
1810 netif_carrier_off(vif
->dev
);
1812 spin_unlock(&vif
->lock
);
1815 static void xenvif_queue_carrier_on(struct xenvif_queue
*queue
)
1817 struct xenvif
*vif
= queue
->vif
;
1819 queue
->last_rx_time
= jiffies
; /* Reset Rx stall detection. */
1820 queue
->stalled
= false;
1822 /* All queues are ready? Enable the carrier. */
1823 spin_lock(&vif
->lock
);
1824 if (--vif
->stalled_queues
== 0) {
1825 netdev_info(vif
->dev
, "Guest Rx ready");
1826 netif_carrier_on(vif
->dev
);
1828 spin_unlock(&vif
->lock
);
1831 static bool xenvif_rx_queue_stalled(struct xenvif_queue
*queue
)
1833 RING_IDX prod
, cons
;
1835 prod
= queue
->rx
.sring
->req_prod
;
1836 cons
= queue
->rx
.req_cons
;
1838 return !queue
->stalled
1839 && prod
- cons
< XEN_NETBK_RX_SLOTS_MAX
1840 && time_after(jiffies
,
1841 queue
->last_rx_time
+ queue
->vif
->stall_timeout
);
1844 static bool xenvif_rx_queue_ready(struct xenvif_queue
*queue
)
1846 RING_IDX prod
, cons
;
1848 prod
= queue
->rx
.sring
->req_prod
;
1849 cons
= queue
->rx
.req_cons
;
1851 return queue
->stalled
1852 && prod
- cons
>= XEN_NETBK_RX_SLOTS_MAX
;
1855 static bool xenvif_have_rx_work(struct xenvif_queue
*queue
)
1857 return (!skb_queue_empty(&queue
->rx_queue
)
1858 && xenvif_rx_ring_slots_available(queue
, XEN_NETBK_RX_SLOTS_MAX
))
1859 || (queue
->vif
->stall_timeout
&&
1860 (xenvif_rx_queue_stalled(queue
)
1861 || xenvif_rx_queue_ready(queue
)))
1862 || kthread_should_stop()
1863 || queue
->vif
->disabled
;
1866 static long xenvif_rx_queue_timeout(struct xenvif_queue
*queue
)
1868 struct sk_buff
*skb
;
1871 skb
= skb_peek(&queue
->rx_queue
);
1873 return MAX_SCHEDULE_TIMEOUT
;
1875 timeout
= XENVIF_RX_CB(skb
)->expires
- jiffies
;
1876 return timeout
< 0 ? 0 : timeout
;
1879 /* Wait until the guest Rx thread has work.
1881 * The timeout needs to be adjusted based on the current head of the
1882 * queue (and not just the head at the beginning). In particular, if
1883 * the queue is initially empty an infinite timeout is used and this
1884 * needs to be reduced when a skb is queued.
1886 * This cannot be done with wait_event_timeout() because it only
1887 * calculates the timeout once.
1889 static void xenvif_wait_for_rx_work(struct xenvif_queue
*queue
)
1893 if (xenvif_have_rx_work(queue
))
1899 prepare_to_wait(&queue
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1900 if (xenvif_have_rx_work(queue
))
1902 ret
= schedule_timeout(xenvif_rx_queue_timeout(queue
));
1906 finish_wait(&queue
->wq
, &wait
);
1909 int xenvif_kthread_guest_rx(void *data
)
1911 struct xenvif_queue
*queue
= data
;
1912 struct xenvif
*vif
= queue
->vif
;
1914 if (!vif
->stall_timeout
)
1915 xenvif_queue_carrier_on(queue
);
1918 xenvif_wait_for_rx_work(queue
);
1920 if (kthread_should_stop())
1923 /* This frontend is found to be rogue, disable it in
1924 * kthread context. Currently this is only set when
1925 * netback finds out frontend sends malformed packet,
1926 * but we cannot disable the interface in softirq
1927 * context so we defer it here, if this thread is
1928 * associated with queue 0.
1930 if (unlikely(vif
->disabled
&& queue
->id
== 0)) {
1931 xenvif_carrier_off(vif
);
1935 if (!skb_queue_empty(&queue
->rx_queue
))
1936 xenvif_rx_action(queue
);
1938 /* If the guest hasn't provided any Rx slots for a
1939 * while it's probably not responsive, drop the
1940 * carrier so packets are dropped earlier.
1942 if (vif
->stall_timeout
) {
1943 if (xenvif_rx_queue_stalled(queue
))
1944 xenvif_queue_carrier_off(queue
);
1945 else if (xenvif_rx_queue_ready(queue
))
1946 xenvif_queue_carrier_on(queue
);
1949 /* Queued packets may have foreign pages from other
1950 * domains. These cannot be queued indefinitely as
1951 * this would starve guests of grant refs and transmit
1954 xenvif_rx_queue_drop_expired(queue
);
1956 xenvif_rx_queue_maybe_wake(queue
);
1961 /* Bin any remaining skbs */
1962 xenvif_rx_queue_purge(queue
);
1967 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue
*queue
)
1969 /* Dealloc thread must remain running until all inflight
1972 return kthread_should_stop() &&
1973 !atomic_read(&queue
->inflight_packets
);
1976 int xenvif_dealloc_kthread(void *data
)
1978 struct xenvif_queue
*queue
= data
;
1981 wait_event_interruptible(queue
->dealloc_wq
,
1982 tx_dealloc_work_todo(queue
) ||
1983 xenvif_dealloc_kthread_should_stop(queue
));
1984 if (xenvif_dealloc_kthread_should_stop(queue
))
1987 xenvif_tx_dealloc_action(queue
);
1991 /* Unmap anything remaining*/
1992 if (tx_dealloc_work_todo(queue
))
1993 xenvif_tx_dealloc_action(queue
);
1998 static int __init
netback_init(void)
2005 /* Allow as many queues as there are CPUs, by default */
2006 xenvif_max_queues
= num_online_cpus();
2008 if (fatal_skb_slots
< XEN_NETBK_LEGACY_SLOTS_MAX
) {
2009 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2010 fatal_skb_slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
2011 fatal_skb_slots
= XEN_NETBK_LEGACY_SLOTS_MAX
;
2014 rc
= xenvif_xenbus_init();
2018 #ifdef CONFIG_DEBUG_FS
2019 xen_netback_dbg_root
= debugfs_create_dir("xen-netback", NULL
);
2020 if (IS_ERR_OR_NULL(xen_netback_dbg_root
))
2021 pr_warn("Init of debugfs returned %ld!\n",
2022 PTR_ERR(xen_netback_dbg_root
));
2023 #endif /* CONFIG_DEBUG_FS */
2031 module_init(netback_init
);
2033 static void __exit
netback_fini(void)
2035 #ifdef CONFIG_DEBUG_FS
2036 if (!IS_ERR_OR_NULL(xen_netback_dbg_root
))
2037 debugfs_remove_recursive(xen_netback_dbg_root
);
2038 #endif /* CONFIG_DEBUG_FS */
2039 xenvif_xenbus_fini();
2041 module_exit(netback_fini
);
2043 MODULE_LICENSE("Dual BSD/GPL");
2044 MODULE_ALIAS("xen-backend:vif");