2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
50 /* Provide an option to disable split event channels at load time as
51 * event channels are limited resource. Split event channels are
54 bool separate_tx_rx_irq
= 1;
55 module_param(separate_tx_rx_irq
, bool, 0644);
58 * This is the maximum slots a skb can have. If a guest sends a skb
59 * which exceeds this limit it is considered malicious.
61 #define FATAL_SKB_SLOTS_DEFAULT 20
62 static unsigned int fatal_skb_slots
= FATAL_SKB_SLOTS_DEFAULT
;
63 module_param(fatal_skb_slots
, uint
, 0444);
66 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67 * the maximum slots a valid packet can use. Now this value is defined
68 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
71 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
74 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
75 * one or more merged tx requests, otherwise it is the continuation of
76 * previous tx request.
78 static inline int pending_tx_is_head(struct xenvif
*vif
, RING_IDX idx
)
80 return vif
->pending_tx_info
[idx
].head
!= INVALID_PENDING_RING_IDX
;
83 static void xenvif_idx_release(struct xenvif
*vif
, u16 pending_idx
,
86 static void make_tx_response(struct xenvif
*vif
,
87 struct xen_netif_tx_request
*txp
,
90 static inline int tx_work_todo(struct xenvif
*vif
);
91 static inline int rx_work_todo(struct xenvif
*vif
);
93 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
100 static inline unsigned long idx_to_pfn(struct xenvif
*vif
,
103 return page_to_pfn(vif
->mmap_pages
[idx
]);
106 static inline unsigned long idx_to_kaddr(struct xenvif
*vif
,
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif
, idx
));
112 /* This is a miniumum size for the linear area to avoid lots of
113 * calls to __pskb_pull_tail() as we set up checksum offsets. The
114 * value 128 was chosen as it covers all IPv4 and most likely
117 #define PKT_PROT_LEN 128
119 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
121 return (u16
)frag
->page_offset
;
124 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
126 frag
->page_offset
= pending_idx
;
129 static inline pending_ring_idx_t
pending_index(unsigned i
)
131 return i
& (MAX_PENDING_REQS
-1);
134 static inline pending_ring_idx_t
nr_pending_reqs(struct xenvif
*vif
)
136 return MAX_PENDING_REQS
-
137 vif
->pending_prod
+ vif
->pending_cons
;
140 bool xenvif_rx_ring_slots_available(struct xenvif
*vif
, int needed
)
145 prod
= vif
->rx
.sring
->req_prod
;
146 cons
= vif
->rx
.req_cons
;
148 if (prod
- cons
>= needed
)
151 vif
->rx
.sring
->req_event
= prod
+ 1;
153 /* Make sure event is visible before we check prod
157 } while (vif
->rx
.sring
->req_prod
!= prod
);
163 * Returns true if we should start a new receive buffer instead of
164 * adding 'size' bytes to a buffer which currently contains 'offset'
167 static bool start_new_rx_buffer(int offset
, unsigned long size
, int head
)
169 /* simple case: we have completely filled the current buffer. */
170 if (offset
== MAX_BUFFER_OFFSET
)
174 * complex case: start a fresh buffer if the current frag
175 * would overflow the current buffer but only if:
176 * (i) this frag would fit completely in the next buffer
177 * and (ii) there is already some data in the current buffer
178 * and (iii) this is not the head buffer.
181 * - (i) stops us splitting a frag into two copies
182 * unless the frag is too large for a single buffer.
183 * - (ii) stops us from leaving a buffer pointlessly empty.
184 * - (iii) stops us leaving the first buffer
185 * empty. Strictly speaking this is already covered
186 * by (ii) but is explicitly checked because
187 * netfront relies on the first buffer being
188 * non-empty and can crash otherwise.
190 * This means we will effectively linearise small
191 * frags but do not needlessly split large buffers
192 * into multiple copies tend to give large frags their
193 * own buffers as before.
195 if ((offset
+ size
> MAX_BUFFER_OFFSET
) &&
196 (size
<= MAX_BUFFER_OFFSET
) && offset
&& !head
)
202 struct netrx_pending_operations
{
203 unsigned copy_prod
, copy_cons
;
204 unsigned meta_prod
, meta_cons
;
205 struct gnttab_copy
*copy
;
206 struct xenvif_rx_meta
*meta
;
208 grant_ref_t copy_gref
;
211 static struct xenvif_rx_meta
*get_next_rx_buffer(struct xenvif
*vif
,
212 struct netrx_pending_operations
*npo
)
214 struct xenvif_rx_meta
*meta
;
215 struct xen_netif_rx_request
*req
;
217 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
219 meta
= npo
->meta
+ npo
->meta_prod
++;
220 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
226 npo
->copy_gref
= req
->gref
;
232 * Set up the grant operations for this fragment. If it's a flipping
233 * interface, we also set up the unmap request from here.
235 static void xenvif_gop_frag_copy(struct xenvif
*vif
, struct sk_buff
*skb
,
236 struct netrx_pending_operations
*npo
,
237 struct page
*page
, unsigned long size
,
238 unsigned long offset
, int *head
)
240 struct gnttab_copy
*copy_gop
;
241 struct xenvif_rx_meta
*meta
;
245 /* Data must not cross a page boundary. */
246 BUG_ON(size
+ offset
> PAGE_SIZE
<<compound_order(page
));
248 meta
= npo
->meta
+ npo
->meta_prod
- 1;
250 /* Skip unused frames from start of page */
251 page
+= offset
>> PAGE_SHIFT
;
252 offset
&= ~PAGE_MASK
;
255 BUG_ON(offset
>= PAGE_SIZE
);
256 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
258 bytes
= PAGE_SIZE
- offset
;
263 if (start_new_rx_buffer(npo
->copy_off
, bytes
, *head
)) {
265 * Netfront requires there to be some data in the head
270 meta
= get_next_rx_buffer(vif
, npo
);
273 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
274 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
276 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
277 copy_gop
->flags
= GNTCOPY_dest_gref
;
278 copy_gop
->len
= bytes
;
280 copy_gop
->source
.domid
= DOMID_SELF
;
281 copy_gop
->source
.u
.gmfn
= virt_to_mfn(page_address(page
));
282 copy_gop
->source
.offset
= offset
;
284 copy_gop
->dest
.domid
= vif
->domid
;
285 copy_gop
->dest
.offset
= npo
->copy_off
;
286 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
288 npo
->copy_off
+= bytes
;
295 if (offset
== PAGE_SIZE
&& size
) {
296 BUG_ON(!PageCompound(page
));
301 /* Leave a gap for the GSO descriptor. */
302 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
303 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
304 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
305 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
307 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
309 if (*head
&& ((1 << gso_type
) & vif
->gso_mask
))
312 *head
= 0; /* There must be something in this buffer now. */
318 * Prepare an SKB to be transmitted to the frontend.
320 * This function is responsible for allocating grant operations, meta
323 * It returns the number of meta structures consumed. The number of
324 * ring slots used is always equal to the number of meta slots used
325 * plus the number of GSO descriptors used. Currently, we use either
326 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
327 * frontend-side LRO).
329 static int xenvif_gop_skb(struct sk_buff
*skb
,
330 struct netrx_pending_operations
*npo
)
332 struct xenvif
*vif
= netdev_priv(skb
->dev
);
333 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
335 struct xen_netif_rx_request
*req
;
336 struct xenvif_rx_meta
*meta
;
343 old_meta_prod
= npo
->meta_prod
;
345 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
346 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
347 gso_size
= skb_shinfo(skb
)->gso_size
;
348 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
349 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
350 gso_size
= skb_shinfo(skb
)->gso_size
;
352 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
356 /* Set up a GSO prefix descriptor, if necessary */
357 if ((1 << gso_type
) & vif
->gso_prefix_mask
) {
358 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
359 meta
= npo
->meta
+ npo
->meta_prod
++;
360 meta
->gso_type
= gso_type
;
361 meta
->gso_size
= gso_size
;
366 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
367 meta
= npo
->meta
+ npo
->meta_prod
++;
369 if ((1 << gso_type
) & vif
->gso_mask
) {
370 meta
->gso_type
= gso_type
;
371 meta
->gso_size
= gso_size
;
373 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
380 npo
->copy_gref
= req
->gref
;
383 while (data
< skb_tail_pointer(skb
)) {
384 unsigned int offset
= offset_in_page(data
);
385 unsigned int len
= PAGE_SIZE
- offset
;
387 if (data
+ len
> skb_tail_pointer(skb
))
388 len
= skb_tail_pointer(skb
) - data
;
390 xenvif_gop_frag_copy(vif
, skb
, npo
,
391 virt_to_page(data
), len
, offset
, &head
);
395 for (i
= 0; i
< nr_frags
; i
++) {
396 xenvif_gop_frag_copy(vif
, skb
, npo
,
397 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
398 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
399 skb_shinfo(skb
)->frags
[i
].page_offset
,
403 return npo
->meta_prod
- old_meta_prod
;
407 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
408 * used to set up the operations on the top of
409 * netrx_pending_operations, which have since been done. Check that
410 * they didn't give any errors and advance over them.
412 static int xenvif_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
413 struct netrx_pending_operations
*npo
)
415 struct gnttab_copy
*copy_op
;
416 int status
= XEN_NETIF_RSP_OKAY
;
419 for (i
= 0; i
< nr_meta_slots
; i
++) {
420 copy_op
= npo
->copy
+ npo
->copy_cons
++;
421 if (copy_op
->status
!= GNTST_okay
) {
423 "Bad status %d from copy to DOM%d.\n",
424 copy_op
->status
, vif
->domid
);
425 status
= XEN_NETIF_RSP_ERROR
;
432 static void xenvif_add_frag_responses(struct xenvif
*vif
, int status
,
433 struct xenvif_rx_meta
*meta
,
437 unsigned long offset
;
439 /* No fragments used */
440 if (nr_meta_slots
<= 1)
445 for (i
= 0; i
< nr_meta_slots
; i
++) {
447 if (i
== nr_meta_slots
- 1)
450 flags
= XEN_NETRXF_more_data
;
453 make_rx_response(vif
, meta
[i
].id
, status
, offset
,
454 meta
[i
].size
, flags
);
458 struct skb_cb_overlay
{
462 void xenvif_kick_thread(struct xenvif
*vif
)
467 static void xenvif_rx_action(struct xenvif
*vif
)
471 struct xen_netif_rx_response
*resp
;
472 struct sk_buff_head rxq
;
476 unsigned long offset
;
477 struct skb_cb_overlay
*sco
;
478 bool need_to_notify
= false;
479 bool ring_full
= false;
481 struct netrx_pending_operations npo
= {
482 .copy
= vif
->grant_copy_op
,
486 skb_queue_head_init(&rxq
);
488 while ((skb
= skb_dequeue(&vif
->rx_queue
)) != NULL
) {
489 int max_slots_needed
;
492 /* We need a cheap worse case estimate for the number of
496 max_slots_needed
= DIV_ROUND_UP(offset_in_page(skb
->data
) +
499 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
501 size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
502 max_slots_needed
+= DIV_ROUND_UP(size
, PAGE_SIZE
);
504 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
||
505 skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
508 /* If the skb may not fit then bail out now */
509 if (!xenvif_rx_ring_slots_available(vif
, max_slots_needed
)) {
510 skb_queue_head(&vif
->rx_queue
, skb
);
511 need_to_notify
= true;
516 sco
= (struct skb_cb_overlay
*)skb
->cb
;
517 sco
->meta_slots_used
= xenvif_gop_skb(skb
, &npo
);
518 BUG_ON(sco
->meta_slots_used
> max_slots_needed
);
520 __skb_queue_tail(&rxq
, skb
);
523 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(vif
->meta
));
525 vif
->rx_queue_stopped
= !npo
.copy_prod
&& ring_full
;
530 BUG_ON(npo
.copy_prod
> MAX_GRANT_COPY_OPS
);
531 gnttab_batch_copy(vif
->grant_copy_op
, npo
.copy_prod
);
533 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
534 sco
= (struct skb_cb_overlay
*)skb
->cb
;
536 if ((1 << vif
->meta
[npo
.meta_cons
].gso_type
) &
537 vif
->gso_prefix_mask
) {
538 resp
= RING_GET_RESPONSE(&vif
->rx
,
539 vif
->rx
.rsp_prod_pvt
++);
541 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
543 resp
->offset
= vif
->meta
[npo
.meta_cons
].gso_size
;
544 resp
->id
= vif
->meta
[npo
.meta_cons
].id
;
545 resp
->status
= sco
->meta_slots_used
;
548 sco
->meta_slots_used
--;
552 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
553 vif
->dev
->stats
.tx_packets
++;
555 status
= xenvif_check_gop(vif
, sco
->meta_slots_used
, &npo
);
557 if (sco
->meta_slots_used
== 1)
560 flags
= XEN_NETRXF_more_data
;
562 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
563 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
564 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
565 /* remote but checksummed. */
566 flags
|= XEN_NETRXF_data_validated
;
569 resp
= make_rx_response(vif
, vif
->meta
[npo
.meta_cons
].id
,
571 vif
->meta
[npo
.meta_cons
].size
,
574 if ((1 << vif
->meta
[npo
.meta_cons
].gso_type
) &
576 struct xen_netif_extra_info
*gso
=
577 (struct xen_netif_extra_info
*)
578 RING_GET_RESPONSE(&vif
->rx
,
579 vif
->rx
.rsp_prod_pvt
++);
581 resp
->flags
|= XEN_NETRXF_extra_info
;
583 gso
->u
.gso
.type
= vif
->meta
[npo
.meta_cons
].gso_type
;
584 gso
->u
.gso
.size
= vif
->meta
[npo
.meta_cons
].gso_size
;
586 gso
->u
.gso
.features
= 0;
588 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
592 xenvif_add_frag_responses(vif
, status
,
593 vif
->meta
+ npo
.meta_cons
+ 1,
594 sco
->meta_slots_used
);
596 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->rx
, ret
);
598 need_to_notify
|= !!ret
;
600 npo
.meta_cons
+= sco
->meta_slots_used
;
606 notify_remote_via_irq(vif
->rx_irq
);
609 void xenvif_check_rx_xenvif(struct xenvif
*vif
)
613 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, more_to_do
);
616 napi_schedule(&vif
->napi
);
619 static void tx_add_credit(struct xenvif
*vif
)
621 unsigned long max_burst
, max_credit
;
624 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
625 * Otherwise the interface can seize up due to insufficient credit.
627 max_burst
= RING_GET_REQUEST(&vif
->tx
, vif
->tx
.req_cons
)->size
;
628 max_burst
= min(max_burst
, 131072UL);
629 max_burst
= max(max_burst
, vif
->credit_bytes
);
631 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
632 max_credit
= vif
->remaining_credit
+ vif
->credit_bytes
;
633 if (max_credit
< vif
->remaining_credit
)
634 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
636 vif
->remaining_credit
= min(max_credit
, max_burst
);
639 static void tx_credit_callback(unsigned long data
)
641 struct xenvif
*vif
= (struct xenvif
*)data
;
643 xenvif_check_rx_xenvif(vif
);
646 static void xenvif_tx_err(struct xenvif
*vif
,
647 struct xen_netif_tx_request
*txp
, RING_IDX end
)
649 RING_IDX cons
= vif
->tx
.req_cons
;
652 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
655 txp
= RING_GET_REQUEST(&vif
->tx
, cons
++);
657 vif
->tx
.req_cons
= cons
;
660 static void xenvif_fatal_tx_err(struct xenvif
*vif
)
662 netdev_err(vif
->dev
, "fatal error; disabling device\n");
663 xenvif_carrier_off(vif
);
666 static int xenvif_count_requests(struct xenvif
*vif
,
667 struct xen_netif_tx_request
*first
,
668 struct xen_netif_tx_request
*txp
,
671 RING_IDX cons
= vif
->tx
.req_cons
;
676 if (!(first
->flags
& XEN_NETTXF_more_data
))
680 struct xen_netif_tx_request dropped_tx
= { 0 };
682 if (slots
>= work_to_do
) {
684 "Asked for %d slots but exceeds this limit\n",
686 xenvif_fatal_tx_err(vif
);
690 /* This guest is really using too many slots and
691 * considered malicious.
693 if (unlikely(slots
>= fatal_skb_slots
)) {
695 "Malicious frontend using %d slots, threshold %u\n",
696 slots
, fatal_skb_slots
);
697 xenvif_fatal_tx_err(vif
);
701 /* Xen network protocol had implicit dependency on
702 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
703 * the historical MAX_SKB_FRAGS value 18 to honor the
704 * same behavior as before. Any packet using more than
705 * 18 slots but less than fatal_skb_slots slots is
708 if (!drop_err
&& slots
>= XEN_NETBK_LEGACY_SLOTS_MAX
) {
711 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
712 slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
719 memcpy(txp
, RING_GET_REQUEST(&vif
->tx
, cons
+ slots
),
722 /* If the guest submitted a frame >= 64 KiB then
723 * first->size overflowed and following slots will
724 * appear to be larger than the frame.
726 * This cannot be fatal error as there are buggy
727 * frontends that do this.
729 * Consume all slots and drop the packet.
731 if (!drop_err
&& txp
->size
> first
->size
) {
734 "Invalid tx request, slot size %u > remaining size %u\n",
735 txp
->size
, first
->size
);
739 first
->size
-= txp
->size
;
742 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
743 netdev_err(vif
->dev
, "Cross page boundary, txp->offset: %x, size: %u\n",
744 txp
->offset
, txp
->size
);
745 xenvif_fatal_tx_err(vif
);
749 more_data
= txp
->flags
& XEN_NETTXF_more_data
;
757 xenvif_tx_err(vif
, first
, cons
+ slots
);
764 static struct page
*xenvif_alloc_page(struct xenvif
*vif
,
769 page
= alloc_page(GFP_ATOMIC
|__GFP_COLD
);
772 vif
->mmap_pages
[pending_idx
] = page
;
777 static struct gnttab_copy
*xenvif_get_requests(struct xenvif
*vif
,
779 struct xen_netif_tx_request
*txp
,
780 struct gnttab_copy
*gop
)
782 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
783 skb_frag_t
*frags
= shinfo
->frags
;
784 u16 pending_idx
= *((u16
*)skb
->data
);
788 pending_ring_idx_t index
, start_idx
= 0;
790 unsigned int nr_slots
;
791 struct pending_tx_info
*first
= NULL
;
793 /* At this point shinfo->nr_frags is in fact the number of
794 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
796 nr_slots
= shinfo
->nr_frags
;
798 /* Skip first skb fragment if it is on same page as header fragment. */
799 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
801 /* Coalesce tx requests, at this point the packet passed in
802 * should be <= 64K. Any packets larger than 64K have been
803 * handled in xenvif_count_requests().
805 for (shinfo
->nr_frags
= slot
= start
; slot
< nr_slots
;
806 shinfo
->nr_frags
++) {
807 struct pending_tx_info
*pending_tx_info
=
808 vif
->pending_tx_info
;
810 page
= alloc_page(GFP_ATOMIC
|__GFP_COLD
);
816 while (dst_offset
< PAGE_SIZE
&& slot
< nr_slots
) {
817 gop
->flags
= GNTCOPY_source_gref
;
819 gop
->source
.u
.ref
= txp
->gref
;
820 gop
->source
.domid
= vif
->domid
;
821 gop
->source
.offset
= txp
->offset
;
823 gop
->dest
.domid
= DOMID_SELF
;
825 gop
->dest
.offset
= dst_offset
;
826 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
828 if (dst_offset
+ txp
->size
> PAGE_SIZE
) {
829 /* This page can only merge a portion
830 * of tx request. Do not increment any
831 * pointer / counter here. The txp
832 * will be dealt with in future
833 * rounds, eventually hitting the
836 gop
->len
= PAGE_SIZE
- dst_offset
;
837 txp
->offset
+= gop
->len
;
838 txp
->size
-= gop
->len
;
839 dst_offset
+= gop
->len
; /* quit loop */
841 /* This tx request can be merged in the page */
842 gop
->len
= txp
->size
;
843 dst_offset
+= gop
->len
;
845 index
= pending_index(vif
->pending_cons
++);
847 pending_idx
= vif
->pending_ring
[index
];
849 memcpy(&pending_tx_info
[pending_idx
].req
, txp
,
852 /* Poison these fields, corresponding
853 * fields for head tx req will be set
854 * to correct values after the loop.
856 vif
->mmap_pages
[pending_idx
] = (void *)(~0UL);
857 pending_tx_info
[pending_idx
].head
=
858 INVALID_PENDING_RING_IDX
;
861 first
= &pending_tx_info
[pending_idx
];
863 head_idx
= pending_idx
;
873 first
->req
.offset
= 0;
874 first
->req
.size
= dst_offset
;
875 first
->head
= start_idx
;
876 vif
->mmap_pages
[head_idx
] = page
;
877 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], head_idx
);
880 BUG_ON(shinfo
->nr_frags
> MAX_SKB_FRAGS
);
884 /* Unwind, freeing all pages and sending error responses. */
885 while (shinfo
->nr_frags
-- > start
) {
886 xenvif_idx_release(vif
,
887 frag_get_pending_idx(&frags
[shinfo
->nr_frags
]),
888 XEN_NETIF_RSP_ERROR
);
890 /* The head too, if necessary. */
892 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_ERROR
);
897 static int xenvif_tx_check_gop(struct xenvif
*vif
,
899 struct gnttab_copy
**gopp
)
901 struct gnttab_copy
*gop
= *gopp
;
902 u16 pending_idx
= *((u16
*)skb
->data
);
903 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
904 struct pending_tx_info
*tx_info
;
905 int nr_frags
= shinfo
->nr_frags
;
907 u16 peek
; /* peek into next tx request */
909 /* Check status of header. */
912 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_ERROR
);
914 /* Skip first skb fragment if it is on same page as header fragment. */
915 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
917 for (i
= start
; i
< nr_frags
; i
++) {
919 pending_ring_idx_t head
;
921 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
922 tx_info
= &vif
->pending_tx_info
[pending_idx
];
923 head
= tx_info
->head
;
925 /* Check error status: if okay then remember grant handle. */
927 newerr
= (++gop
)->status
;
930 peek
= vif
->pending_ring
[pending_index(++head
)];
931 } while (!pending_tx_is_head(vif
, peek
));
933 if (likely(!newerr
)) {
934 /* Had a previous error? Invalidate this fragment. */
936 xenvif_idx_release(vif
, pending_idx
,
941 /* Error on this fragment: respond to client with an error. */
942 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_ERROR
);
944 /* Not the first error? Preceding frags already invalidated. */
948 /* First error: invalidate header and preceding fragments. */
949 pending_idx
= *((u16
*)skb
->data
);
950 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_OKAY
);
951 for (j
= start
; j
< i
; j
++) {
952 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
953 xenvif_idx_release(vif
, pending_idx
,
957 /* Remember the error: invalidate all subsequent fragments. */
965 static void xenvif_fill_frags(struct xenvif
*vif
, struct sk_buff
*skb
)
967 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
968 int nr_frags
= shinfo
->nr_frags
;
971 for (i
= 0; i
< nr_frags
; i
++) {
972 skb_frag_t
*frag
= shinfo
->frags
+ i
;
973 struct xen_netif_tx_request
*txp
;
977 pending_idx
= frag_get_pending_idx(frag
);
979 txp
= &vif
->pending_tx_info
[pending_idx
].req
;
980 page
= virt_to_page(idx_to_kaddr(vif
, pending_idx
));
981 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
982 skb
->len
+= txp
->size
;
983 skb
->data_len
+= txp
->size
;
984 skb
->truesize
+= txp
->size
;
986 /* Take an extra reference to offset xenvif_idx_release */
987 get_page(vif
->mmap_pages
[pending_idx
]);
988 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_OKAY
);
992 static int xenvif_get_extras(struct xenvif
*vif
,
993 struct xen_netif_extra_info
*extras
,
996 struct xen_netif_extra_info extra
;
997 RING_IDX cons
= vif
->tx
.req_cons
;
1000 if (unlikely(work_to_do
-- <= 0)) {
1001 netdev_err(vif
->dev
, "Missing extra info\n");
1002 xenvif_fatal_tx_err(vif
);
1006 memcpy(&extra
, RING_GET_REQUEST(&vif
->tx
, cons
),
1008 if (unlikely(!extra
.type
||
1009 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1010 vif
->tx
.req_cons
= ++cons
;
1011 netdev_err(vif
->dev
,
1012 "Invalid extra type: %d\n", extra
.type
);
1013 xenvif_fatal_tx_err(vif
);
1017 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1018 vif
->tx
.req_cons
= ++cons
;
1019 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1024 static int xenvif_set_skb_gso(struct xenvif
*vif
,
1025 struct sk_buff
*skb
,
1026 struct xen_netif_extra_info
*gso
)
1028 if (!gso
->u
.gso
.size
) {
1029 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
1030 xenvif_fatal_tx_err(vif
);
1034 switch (gso
->u
.gso
.type
) {
1035 case XEN_NETIF_GSO_TYPE_TCPV4
:
1036 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1038 case XEN_NETIF_GSO_TYPE_TCPV6
:
1039 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1042 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1043 xenvif_fatal_tx_err(vif
);
1047 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1048 /* gso_segs will be calculated later */
1053 static int checksum_setup(struct xenvif
*vif
, struct sk_buff
*skb
)
1055 bool recalculate_partial_csum
= false;
1057 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1058 * peers can fail to set NETRXF_csum_blank when sending a GSO
1059 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1060 * recalculate the partial checksum.
1062 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1063 vif
->rx_gso_checksum_fixup
++;
1064 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1065 recalculate_partial_csum
= true;
1068 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1069 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1072 return skb_checksum_setup(skb
, recalculate_partial_csum
);
1075 static bool tx_credit_exceeded(struct xenvif
*vif
, unsigned size
)
1077 u64 now
= get_jiffies_64();
1078 u64 next_credit
= vif
->credit_window_start
+
1079 msecs_to_jiffies(vif
->credit_usec
/ 1000);
1081 /* Timer could already be pending in rare cases. */
1082 if (timer_pending(&vif
->credit_timeout
))
1085 /* Passed the point where we can replenish credit? */
1086 if (time_after_eq64(now
, next_credit
)) {
1087 vif
->credit_window_start
= now
;
1091 /* Still too big to send right now? Set a callback. */
1092 if (size
> vif
->remaining_credit
) {
1093 vif
->credit_timeout
.data
=
1095 vif
->credit_timeout
.function
=
1097 mod_timer(&vif
->credit_timeout
,
1099 vif
->credit_window_start
= next_credit
;
1107 static unsigned xenvif_tx_build_gops(struct xenvif
*vif
, int budget
)
1109 struct gnttab_copy
*gop
= vif
->tx_copy_ops
, *request_gop
;
1110 struct sk_buff
*skb
;
1113 while ((nr_pending_reqs(vif
) + XEN_NETBK_LEGACY_SLOTS_MAX
1114 < MAX_PENDING_REQS
) &&
1115 (skb_queue_len(&vif
->tx_queue
) < budget
)) {
1116 struct xen_netif_tx_request txreq
;
1117 struct xen_netif_tx_request txfrags
[XEN_NETBK_LEGACY_SLOTS_MAX
];
1119 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1123 unsigned int data_len
;
1124 pending_ring_idx_t index
;
1126 if (vif
->tx
.sring
->req_prod
- vif
->tx
.req_cons
>
1127 XEN_NETIF_TX_RING_SIZE
) {
1128 netdev_err(vif
->dev
,
1129 "Impossible number of requests. "
1130 "req_prod %d, req_cons %d, size %ld\n",
1131 vif
->tx
.sring
->req_prod
, vif
->tx
.req_cons
,
1132 XEN_NETIF_TX_RING_SIZE
);
1133 xenvif_fatal_tx_err(vif
);
1137 work_to_do
= RING_HAS_UNCONSUMED_REQUESTS(&vif
->tx
);
1141 idx
= vif
->tx
.req_cons
;
1142 rmb(); /* Ensure that we see the request before we copy it. */
1143 memcpy(&txreq
, RING_GET_REQUEST(&vif
->tx
, idx
), sizeof(txreq
));
1145 /* Credit-based scheduling. */
1146 if (txreq
.size
> vif
->remaining_credit
&&
1147 tx_credit_exceeded(vif
, txreq
.size
))
1150 vif
->remaining_credit
-= txreq
.size
;
1153 vif
->tx
.req_cons
= ++idx
;
1155 memset(extras
, 0, sizeof(extras
));
1156 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1157 work_to_do
= xenvif_get_extras(vif
, extras
,
1159 idx
= vif
->tx
.req_cons
;
1160 if (unlikely(work_to_do
< 0))
1164 ret
= xenvif_count_requests(vif
, &txreq
, txfrags
, work_to_do
);
1165 if (unlikely(ret
< 0))
1170 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1171 netdev_dbg(vif
->dev
,
1172 "Bad packet size: %d\n", txreq
.size
);
1173 xenvif_tx_err(vif
, &txreq
, idx
);
1177 /* No crossing a page as the payload mustn't fragment. */
1178 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1179 netdev_err(vif
->dev
,
1180 "txreq.offset: %x, size: %u, end: %lu\n",
1181 txreq
.offset
, txreq
.size
,
1182 (txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1183 xenvif_fatal_tx_err(vif
);
1187 index
= pending_index(vif
->pending_cons
);
1188 pending_idx
= vif
->pending_ring
[index
];
1190 data_len
= (txreq
.size
> PKT_PROT_LEN
&&
1191 ret
< XEN_NETBK_LEGACY_SLOTS_MAX
) ?
1192 PKT_PROT_LEN
: txreq
.size
;
1194 skb
= alloc_skb(data_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
1195 GFP_ATOMIC
| __GFP_NOWARN
);
1196 if (unlikely(skb
== NULL
)) {
1197 netdev_dbg(vif
->dev
,
1198 "Can't allocate a skb in start_xmit.\n");
1199 xenvif_tx_err(vif
, &txreq
, idx
);
1203 /* Packets passed to netif_rx() must have some headroom. */
1204 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1206 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1207 struct xen_netif_extra_info
*gso
;
1208 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1210 if (xenvif_set_skb_gso(vif
, skb
, gso
)) {
1211 /* Failure in xenvif_set_skb_gso is fatal. */
1217 /* XXX could copy straight to head */
1218 page
= xenvif_alloc_page(vif
, pending_idx
);
1221 xenvif_tx_err(vif
, &txreq
, idx
);
1225 gop
->source
.u
.ref
= txreq
.gref
;
1226 gop
->source
.domid
= vif
->domid
;
1227 gop
->source
.offset
= txreq
.offset
;
1229 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
1230 gop
->dest
.domid
= DOMID_SELF
;
1231 gop
->dest
.offset
= txreq
.offset
;
1233 gop
->len
= txreq
.size
;
1234 gop
->flags
= GNTCOPY_source_gref
;
1238 memcpy(&vif
->pending_tx_info
[pending_idx
].req
,
1239 &txreq
, sizeof(txreq
));
1240 vif
->pending_tx_info
[pending_idx
].head
= index
;
1241 *((u16
*)skb
->data
) = pending_idx
;
1243 __skb_put(skb
, data_len
);
1245 skb_shinfo(skb
)->nr_frags
= ret
;
1246 if (data_len
< txreq
.size
) {
1247 skb_shinfo(skb
)->nr_frags
++;
1248 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1251 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1252 INVALID_PENDING_IDX
);
1255 vif
->pending_cons
++;
1257 request_gop
= xenvif_get_requests(vif
, skb
, txfrags
, gop
);
1258 if (request_gop
== NULL
) {
1260 xenvif_tx_err(vif
, &txreq
, idx
);
1265 __skb_queue_tail(&vif
->tx_queue
, skb
);
1267 vif
->tx
.req_cons
= idx
;
1269 if ((gop
-vif
->tx_copy_ops
) >= ARRAY_SIZE(vif
->tx_copy_ops
))
1273 return gop
- vif
->tx_copy_ops
;
1277 static int xenvif_tx_submit(struct xenvif
*vif
)
1279 struct gnttab_copy
*gop
= vif
->tx_copy_ops
;
1280 struct sk_buff
*skb
;
1283 while ((skb
= __skb_dequeue(&vif
->tx_queue
)) != NULL
) {
1284 struct xen_netif_tx_request
*txp
;
1288 pending_idx
= *((u16
*)skb
->data
);
1289 txp
= &vif
->pending_tx_info
[pending_idx
].req
;
1291 /* Check the remap error code. */
1292 if (unlikely(xenvif_tx_check_gop(vif
, skb
, &gop
))) {
1293 netdev_dbg(vif
->dev
, "netback grant failed.\n");
1294 skb_shinfo(skb
)->nr_frags
= 0;
1299 data_len
= skb
->len
;
1301 (void *)(idx_to_kaddr(vif
, pending_idx
)|txp
->offset
),
1303 if (data_len
< txp
->size
) {
1304 /* Append the packet payload as a fragment. */
1305 txp
->offset
+= data_len
;
1306 txp
->size
-= data_len
;
1308 /* Schedule a response immediately. */
1309 xenvif_idx_release(vif
, pending_idx
,
1310 XEN_NETIF_RSP_OKAY
);
1313 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1314 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1315 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1316 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1318 xenvif_fill_frags(vif
, skb
);
1320 if (skb_is_nonlinear(skb
) && skb_headlen(skb
) < PKT_PROT_LEN
) {
1321 int target
= min_t(int, skb
->len
, PKT_PROT_LEN
);
1322 __pskb_pull_tail(skb
, target
- skb_headlen(skb
));
1325 skb
->dev
= vif
->dev
;
1326 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1327 skb_reset_network_header(skb
);
1329 if (checksum_setup(vif
, skb
)) {
1330 netdev_dbg(vif
->dev
,
1331 "Can't setup checksum in net_tx_action\n");
1336 skb_probe_transport_header(skb
, 0);
1338 /* If the packet is GSO then we will have just set up the
1339 * transport header offset in checksum_setup so it's now
1340 * straightforward to calculate gso_segs.
1342 if (skb_is_gso(skb
)) {
1343 int mss
= skb_shinfo(skb
)->gso_size
;
1344 int hdrlen
= skb_transport_header(skb
) -
1345 skb_mac_header(skb
) +
1348 skb_shinfo(skb
)->gso_segs
=
1349 DIV_ROUND_UP(skb
->len
- hdrlen
, mss
);
1352 vif
->dev
->stats
.rx_bytes
+= skb
->len
;
1353 vif
->dev
->stats
.rx_packets
++;
1357 netif_receive_skb(skb
);
1363 /* Called after netfront has transmitted */
1364 int xenvif_tx_action(struct xenvif
*vif
, int budget
)
1369 if (unlikely(!tx_work_todo(vif
)))
1372 nr_gops
= xenvif_tx_build_gops(vif
, budget
);
1377 gnttab_batch_copy(vif
->tx_copy_ops
, nr_gops
);
1379 work_done
= xenvif_tx_submit(vif
);
1384 static void xenvif_idx_release(struct xenvif
*vif
, u16 pending_idx
,
1387 struct pending_tx_info
*pending_tx_info
;
1388 pending_ring_idx_t head
;
1389 u16 peek
; /* peek into next tx request */
1391 BUG_ON(vif
->mmap_pages
[pending_idx
] == (void *)(~0UL));
1393 /* Already complete? */
1394 if (vif
->mmap_pages
[pending_idx
] == NULL
)
1397 pending_tx_info
= &vif
->pending_tx_info
[pending_idx
];
1399 head
= pending_tx_info
->head
;
1401 BUG_ON(!pending_tx_is_head(vif
, head
));
1402 BUG_ON(vif
->pending_ring
[pending_index(head
)] != pending_idx
);
1405 pending_ring_idx_t index
;
1406 pending_ring_idx_t idx
= pending_index(head
);
1407 u16 info_idx
= vif
->pending_ring
[idx
];
1409 pending_tx_info
= &vif
->pending_tx_info
[info_idx
];
1410 make_tx_response(vif
, &pending_tx_info
->req
, status
);
1412 /* Setting any number other than
1413 * INVALID_PENDING_RING_IDX indicates this slot is
1414 * starting a new packet / ending a previous packet.
1416 pending_tx_info
->head
= 0;
1418 index
= pending_index(vif
->pending_prod
++);
1419 vif
->pending_ring
[index
] = vif
->pending_ring
[info_idx
];
1421 peek
= vif
->pending_ring
[pending_index(++head
)];
1423 } while (!pending_tx_is_head(vif
, peek
));
1425 put_page(vif
->mmap_pages
[pending_idx
]);
1426 vif
->mmap_pages
[pending_idx
] = NULL
;
1430 static void make_tx_response(struct xenvif
*vif
,
1431 struct xen_netif_tx_request
*txp
,
1434 RING_IDX i
= vif
->tx
.rsp_prod_pvt
;
1435 struct xen_netif_tx_response
*resp
;
1438 resp
= RING_GET_RESPONSE(&vif
->tx
, i
);
1442 if (txp
->flags
& XEN_NETTXF_extra_info
)
1443 RING_GET_RESPONSE(&vif
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1445 vif
->tx
.rsp_prod_pvt
= ++i
;
1446 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->tx
, notify
);
1448 notify_remote_via_irq(vif
->tx_irq
);
1451 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
1458 RING_IDX i
= vif
->rx
.rsp_prod_pvt
;
1459 struct xen_netif_rx_response
*resp
;
1461 resp
= RING_GET_RESPONSE(&vif
->rx
, i
);
1462 resp
->offset
= offset
;
1463 resp
->flags
= flags
;
1465 resp
->status
= (s16
)size
;
1467 resp
->status
= (s16
)st
;
1469 vif
->rx
.rsp_prod_pvt
= ++i
;
1474 static inline int rx_work_todo(struct xenvif
*vif
)
1476 return (!skb_queue_empty(&vif
->rx_queue
) && !vif
->rx_queue_stopped
) ||
1480 static inline int tx_work_todo(struct xenvif
*vif
)
1483 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif
->tx
)) &&
1484 (nr_pending_reqs(vif
) + XEN_NETBK_LEGACY_SLOTS_MAX
1485 < MAX_PENDING_REQS
))
1491 void xenvif_unmap_frontend_rings(struct xenvif
*vif
)
1494 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1497 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1501 int xenvif_map_frontend_rings(struct xenvif
*vif
,
1502 grant_ref_t tx_ring_ref
,
1503 grant_ref_t rx_ring_ref
)
1506 struct xen_netif_tx_sring
*txs
;
1507 struct xen_netif_rx_sring
*rxs
;
1511 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1512 tx_ring_ref
, &addr
);
1516 txs
= (struct xen_netif_tx_sring
*)addr
;
1517 BACK_RING_INIT(&vif
->tx
, txs
, PAGE_SIZE
);
1519 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1520 rx_ring_ref
, &addr
);
1524 rxs
= (struct xen_netif_rx_sring
*)addr
;
1525 BACK_RING_INIT(&vif
->rx
, rxs
, PAGE_SIZE
);
1530 xenvif_unmap_frontend_rings(vif
);
1534 void xenvif_stop_queue(struct xenvif
*vif
)
1536 if (!vif
->can_queue
)
1539 netif_stop_queue(vif
->dev
);
1542 static void xenvif_start_queue(struct xenvif
*vif
)
1544 if (xenvif_schedulable(vif
))
1545 netif_wake_queue(vif
->dev
);
1548 int xenvif_kthread(void *data
)
1550 struct xenvif
*vif
= data
;
1551 struct sk_buff
*skb
;
1553 while (!kthread_should_stop()) {
1554 wait_event_interruptible(vif
->wq
,
1555 rx_work_todo(vif
) ||
1556 kthread_should_stop());
1557 if (kthread_should_stop())
1560 if (!skb_queue_empty(&vif
->rx_queue
))
1561 xenvif_rx_action(vif
);
1563 vif
->rx_event
= false;
1565 if (skb_queue_empty(&vif
->rx_queue
) &&
1566 netif_queue_stopped(vif
->dev
))
1567 xenvif_start_queue(vif
);
1572 /* Bin any remaining skbs */
1573 while ((skb
= skb_dequeue(&vif
->rx_queue
)) != NULL
)
1579 static int __init
netback_init(void)
1586 if (fatal_skb_slots
< XEN_NETBK_LEGACY_SLOTS_MAX
) {
1587 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1588 fatal_skb_slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
1589 fatal_skb_slots
= XEN_NETBK_LEGACY_SLOTS_MAX
;
1592 rc
= xenvif_xenbus_init();
1602 module_init(netback_init
);
1604 static void __exit
netback_fini(void)
1606 xenvif_xenbus_fini();
1608 module_exit(netback_fini
);
1610 MODULE_LICENSE("Dual BSD/GPL");
1611 MODULE_ALIAS("xen-backend:vif");