2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
44 #include <xen/events.h>
45 #include <xen/interface/memory.h>
47 #include <asm/xen/hypercall.h>
48 #include <asm/xen/page.h>
50 /* Provide an option to disable split event channels at load time as
51 * event channels are limited resource. Split event channels are
54 bool separate_tx_rx_irq
= 1;
55 module_param(separate_tx_rx_irq
, bool, 0644);
58 * This is the maximum slots a skb can have. If a guest sends a skb
59 * which exceeds this limit it is considered malicious.
61 #define FATAL_SKB_SLOTS_DEFAULT 20
62 static unsigned int fatal_skb_slots
= FATAL_SKB_SLOTS_DEFAULT
;
63 module_param(fatal_skb_slots
, uint
, 0444);
66 * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
67 * the maximum slots a valid packet can use. Now this value is defined
68 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
71 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
74 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
75 * one or more merged tx requests, otherwise it is the continuation of
76 * previous tx request.
78 static inline int pending_tx_is_head(struct xenvif
*vif
, RING_IDX idx
)
80 return vif
->pending_tx_info
[idx
].head
!= INVALID_PENDING_RING_IDX
;
83 static void xenvif_idx_release(struct xenvif
*vif
, u16 pending_idx
,
86 static void make_tx_response(struct xenvif
*vif
,
87 struct xen_netif_tx_request
*txp
,
90 static inline int tx_work_todo(struct xenvif
*vif
);
91 static inline int rx_work_todo(struct xenvif
*vif
);
93 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
100 static inline unsigned long idx_to_pfn(struct xenvif
*vif
,
103 return page_to_pfn(vif
->mmap_pages
[idx
]);
106 static inline unsigned long idx_to_kaddr(struct xenvif
*vif
,
109 return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif
, idx
));
112 /* This is a miniumum size for the linear area to avoid lots of
113 * calls to __pskb_pull_tail() as we set up checksum offsets. The
114 * value 128 was chosen as it covers all IPv4 and most likely
117 #define PKT_PROT_LEN 128
119 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
121 return (u16
)frag
->page_offset
;
124 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
126 frag
->page_offset
= pending_idx
;
129 static inline pending_ring_idx_t
pending_index(unsigned i
)
131 return i
& (MAX_PENDING_REQS
-1);
134 static inline pending_ring_idx_t
nr_pending_reqs(struct xenvif
*vif
)
136 return MAX_PENDING_REQS
-
137 vif
->pending_prod
+ vif
->pending_cons
;
140 bool xenvif_rx_ring_slots_available(struct xenvif
*vif
, int needed
)
145 prod
= vif
->rx
.sring
->req_prod
;
146 cons
= vif
->rx
.req_cons
;
148 if (prod
- cons
>= needed
)
151 vif
->rx
.sring
->req_event
= prod
+ 1;
153 /* Make sure event is visible before we check prod
157 } while (vif
->rx
.sring
->req_prod
!= prod
);
163 * Returns true if we should start a new receive buffer instead of
164 * adding 'size' bytes to a buffer which currently contains 'offset'
167 static bool start_new_rx_buffer(int offset
, unsigned long size
, int head
)
169 /* simple case: we have completely filled the current buffer. */
170 if (offset
== MAX_BUFFER_OFFSET
)
174 * complex case: start a fresh buffer if the current frag
175 * would overflow the current buffer but only if:
176 * (i) this frag would fit completely in the next buffer
177 * and (ii) there is already some data in the current buffer
178 * and (iii) this is not the head buffer.
181 * - (i) stops us splitting a frag into two copies
182 * unless the frag is too large for a single buffer.
183 * - (ii) stops us from leaving a buffer pointlessly empty.
184 * - (iii) stops us leaving the first buffer
185 * empty. Strictly speaking this is already covered
186 * by (ii) but is explicitly checked because
187 * netfront relies on the first buffer being
188 * non-empty and can crash otherwise.
190 * This means we will effectively linearise small
191 * frags but do not needlessly split large buffers
192 * into multiple copies tend to give large frags their
193 * own buffers as before.
195 if ((offset
+ size
> MAX_BUFFER_OFFSET
) &&
196 (size
<= MAX_BUFFER_OFFSET
) && offset
&& !head
)
202 struct netrx_pending_operations
{
203 unsigned copy_prod
, copy_cons
;
204 unsigned meta_prod
, meta_cons
;
205 struct gnttab_copy
*copy
;
206 struct xenvif_rx_meta
*meta
;
208 grant_ref_t copy_gref
;
211 static struct xenvif_rx_meta
*get_next_rx_buffer(struct xenvif
*vif
,
212 struct netrx_pending_operations
*npo
)
214 struct xenvif_rx_meta
*meta
;
215 struct xen_netif_rx_request
*req
;
217 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
219 meta
= npo
->meta
+ npo
->meta_prod
++;
220 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
226 npo
->copy_gref
= req
->gref
;
232 * Set up the grant operations for this fragment. If it's a flipping
233 * interface, we also set up the unmap request from here.
235 static void xenvif_gop_frag_copy(struct xenvif
*vif
, struct sk_buff
*skb
,
236 struct netrx_pending_operations
*npo
,
237 struct page
*page
, unsigned long size
,
238 unsigned long offset
, int *head
)
240 struct gnttab_copy
*copy_gop
;
241 struct xenvif_rx_meta
*meta
;
245 /* Data must not cross a page boundary. */
246 BUG_ON(size
+ offset
> PAGE_SIZE
<<compound_order(page
));
248 meta
= npo
->meta
+ npo
->meta_prod
- 1;
250 /* Skip unused frames from start of page */
251 page
+= offset
>> PAGE_SHIFT
;
252 offset
&= ~PAGE_MASK
;
255 BUG_ON(offset
>= PAGE_SIZE
);
256 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
258 bytes
= PAGE_SIZE
- offset
;
263 if (start_new_rx_buffer(npo
->copy_off
, bytes
, *head
)) {
265 * Netfront requires there to be some data in the head
270 meta
= get_next_rx_buffer(vif
, npo
);
273 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
274 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
276 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
277 copy_gop
->flags
= GNTCOPY_dest_gref
;
278 copy_gop
->len
= bytes
;
280 copy_gop
->source
.domid
= DOMID_SELF
;
281 copy_gop
->source
.u
.gmfn
= virt_to_mfn(page_address(page
));
282 copy_gop
->source
.offset
= offset
;
284 copy_gop
->dest
.domid
= vif
->domid
;
285 copy_gop
->dest
.offset
= npo
->copy_off
;
286 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
288 npo
->copy_off
+= bytes
;
295 if (offset
== PAGE_SIZE
&& size
) {
296 BUG_ON(!PageCompound(page
));
301 /* Leave a gap for the GSO descriptor. */
302 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
303 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
304 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
305 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
307 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
309 if (*head
&& ((1 << gso_type
) & vif
->gso_mask
))
312 *head
= 0; /* There must be something in this buffer now. */
318 * Prepare an SKB to be transmitted to the frontend.
320 * This function is responsible for allocating grant operations, meta
323 * It returns the number of meta structures consumed. The number of
324 * ring slots used is always equal to the number of meta slots used
325 * plus the number of GSO descriptors used. Currently, we use either
326 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
327 * frontend-side LRO).
329 static int xenvif_gop_skb(struct sk_buff
*skb
,
330 struct netrx_pending_operations
*npo
)
332 struct xenvif
*vif
= netdev_priv(skb
->dev
);
333 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
335 struct xen_netif_rx_request
*req
;
336 struct xenvif_rx_meta
*meta
;
343 old_meta_prod
= npo
->meta_prod
;
345 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
) {
346 gso_type
= XEN_NETIF_GSO_TYPE_TCPV4
;
347 gso_size
= skb_shinfo(skb
)->gso_size
;
348 } else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
349 gso_type
= XEN_NETIF_GSO_TYPE_TCPV6
;
350 gso_size
= skb_shinfo(skb
)->gso_size
;
352 gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
356 /* Set up a GSO prefix descriptor, if necessary */
357 if ((1 << gso_type
) & vif
->gso_prefix_mask
) {
358 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
359 meta
= npo
->meta
+ npo
->meta_prod
++;
360 meta
->gso_type
= gso_type
;
361 meta
->gso_size
= gso_size
;
366 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
367 meta
= npo
->meta
+ npo
->meta_prod
++;
369 if ((1 << gso_type
) & vif
->gso_mask
) {
370 meta
->gso_type
= gso_type
;
371 meta
->gso_size
= gso_size
;
373 meta
->gso_type
= XEN_NETIF_GSO_TYPE_NONE
;
380 npo
->copy_gref
= req
->gref
;
383 while (data
< skb_tail_pointer(skb
)) {
384 unsigned int offset
= offset_in_page(data
);
385 unsigned int len
= PAGE_SIZE
- offset
;
387 if (data
+ len
> skb_tail_pointer(skb
))
388 len
= skb_tail_pointer(skb
) - data
;
390 xenvif_gop_frag_copy(vif
, skb
, npo
,
391 virt_to_page(data
), len
, offset
, &head
);
395 for (i
= 0; i
< nr_frags
; i
++) {
396 xenvif_gop_frag_copy(vif
, skb
, npo
,
397 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
398 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
399 skb_shinfo(skb
)->frags
[i
].page_offset
,
403 return npo
->meta_prod
- old_meta_prod
;
407 * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
408 * used to set up the operations on the top of
409 * netrx_pending_operations, which have since been done. Check that
410 * they didn't give any errors and advance over them.
412 static int xenvif_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
413 struct netrx_pending_operations
*npo
)
415 struct gnttab_copy
*copy_op
;
416 int status
= XEN_NETIF_RSP_OKAY
;
419 for (i
= 0; i
< nr_meta_slots
; i
++) {
420 copy_op
= npo
->copy
+ npo
->copy_cons
++;
421 if (copy_op
->status
!= GNTST_okay
) {
423 "Bad status %d from copy to DOM%d.\n",
424 copy_op
->status
, vif
->domid
);
425 status
= XEN_NETIF_RSP_ERROR
;
432 static void xenvif_add_frag_responses(struct xenvif
*vif
, int status
,
433 struct xenvif_rx_meta
*meta
,
437 unsigned long offset
;
439 /* No fragments used */
440 if (nr_meta_slots
<= 1)
445 for (i
= 0; i
< nr_meta_slots
; i
++) {
447 if (i
== nr_meta_slots
- 1)
450 flags
= XEN_NETRXF_more_data
;
453 make_rx_response(vif
, meta
[i
].id
, status
, offset
,
454 meta
[i
].size
, flags
);
458 struct skb_cb_overlay
{
462 void xenvif_kick_thread(struct xenvif
*vif
)
467 static void xenvif_rx_action(struct xenvif
*vif
)
471 struct xen_netif_rx_response
*resp
;
472 struct sk_buff_head rxq
;
476 unsigned long offset
;
477 struct skb_cb_overlay
*sco
;
478 bool need_to_notify
= false;
480 struct netrx_pending_operations npo
= {
481 .copy
= vif
->grant_copy_op
,
485 skb_queue_head_init(&rxq
);
487 while ((skb
= skb_dequeue(&vif
->rx_queue
)) != NULL
) {
488 RING_IDX max_slots_needed
;
491 /* We need a cheap worse case estimate for the number of
495 max_slots_needed
= DIV_ROUND_UP(offset_in_page(skb
->data
) +
498 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
500 size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
501 max_slots_needed
+= DIV_ROUND_UP(size
, PAGE_SIZE
);
503 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
||
504 skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
507 /* If the skb may not fit then bail out now */
508 if (!xenvif_rx_ring_slots_available(vif
, max_slots_needed
)) {
509 skb_queue_head(&vif
->rx_queue
, skb
);
510 need_to_notify
= true;
511 vif
->rx_last_skb_slots
= max_slots_needed
;
514 vif
->rx_last_skb_slots
= 0;
516 sco
= (struct skb_cb_overlay
*)skb
->cb
;
517 sco
->meta_slots_used
= xenvif_gop_skb(skb
, &npo
);
518 BUG_ON(sco
->meta_slots_used
> max_slots_needed
);
520 __skb_queue_tail(&rxq
, skb
);
523 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(vif
->meta
));
528 BUG_ON(npo
.copy_prod
> MAX_GRANT_COPY_OPS
);
529 gnttab_batch_copy(vif
->grant_copy_op
, npo
.copy_prod
);
531 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
532 sco
= (struct skb_cb_overlay
*)skb
->cb
;
534 if ((1 << vif
->meta
[npo
.meta_cons
].gso_type
) &
535 vif
->gso_prefix_mask
) {
536 resp
= RING_GET_RESPONSE(&vif
->rx
,
537 vif
->rx
.rsp_prod_pvt
++);
539 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
541 resp
->offset
= vif
->meta
[npo
.meta_cons
].gso_size
;
542 resp
->id
= vif
->meta
[npo
.meta_cons
].id
;
543 resp
->status
= sco
->meta_slots_used
;
546 sco
->meta_slots_used
--;
550 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
551 vif
->dev
->stats
.tx_packets
++;
553 status
= xenvif_check_gop(vif
, sco
->meta_slots_used
, &npo
);
555 if (sco
->meta_slots_used
== 1)
558 flags
= XEN_NETRXF_more_data
;
560 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
561 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
562 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
563 /* remote but checksummed. */
564 flags
|= XEN_NETRXF_data_validated
;
567 resp
= make_rx_response(vif
, vif
->meta
[npo
.meta_cons
].id
,
569 vif
->meta
[npo
.meta_cons
].size
,
572 if ((1 << vif
->meta
[npo
.meta_cons
].gso_type
) &
574 struct xen_netif_extra_info
*gso
=
575 (struct xen_netif_extra_info
*)
576 RING_GET_RESPONSE(&vif
->rx
,
577 vif
->rx
.rsp_prod_pvt
++);
579 resp
->flags
|= XEN_NETRXF_extra_info
;
581 gso
->u
.gso
.type
= vif
->meta
[npo
.meta_cons
].gso_type
;
582 gso
->u
.gso
.size
= vif
->meta
[npo
.meta_cons
].gso_size
;
584 gso
->u
.gso
.features
= 0;
586 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
590 xenvif_add_frag_responses(vif
, status
,
591 vif
->meta
+ npo
.meta_cons
+ 1,
592 sco
->meta_slots_used
);
594 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->rx
, ret
);
596 need_to_notify
|= !!ret
;
598 npo
.meta_cons
+= sco
->meta_slots_used
;
604 notify_remote_via_irq(vif
->rx_irq
);
607 void xenvif_check_rx_xenvif(struct xenvif
*vif
)
611 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, more_to_do
);
614 napi_schedule(&vif
->napi
);
617 static void tx_add_credit(struct xenvif
*vif
)
619 unsigned long max_burst
, max_credit
;
622 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
623 * Otherwise the interface can seize up due to insufficient credit.
625 max_burst
= RING_GET_REQUEST(&vif
->tx
, vif
->tx
.req_cons
)->size
;
626 max_burst
= min(max_burst
, 131072UL);
627 max_burst
= max(max_burst
, vif
->credit_bytes
);
629 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
630 max_credit
= vif
->remaining_credit
+ vif
->credit_bytes
;
631 if (max_credit
< vif
->remaining_credit
)
632 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
634 vif
->remaining_credit
= min(max_credit
, max_burst
);
637 static void tx_credit_callback(unsigned long data
)
639 struct xenvif
*vif
= (struct xenvif
*)data
;
641 xenvif_check_rx_xenvif(vif
);
644 static void xenvif_tx_err(struct xenvif
*vif
,
645 struct xen_netif_tx_request
*txp
, RING_IDX end
)
647 RING_IDX cons
= vif
->tx
.req_cons
;
650 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
653 txp
= RING_GET_REQUEST(&vif
->tx
, cons
++);
655 vif
->tx
.req_cons
= cons
;
658 static void xenvif_fatal_tx_err(struct xenvif
*vif
)
660 netdev_err(vif
->dev
, "fatal error; disabling device\n");
661 xenvif_carrier_off(vif
);
664 static int xenvif_count_requests(struct xenvif
*vif
,
665 struct xen_netif_tx_request
*first
,
666 struct xen_netif_tx_request
*txp
,
669 RING_IDX cons
= vif
->tx
.req_cons
;
674 if (!(first
->flags
& XEN_NETTXF_more_data
))
678 struct xen_netif_tx_request dropped_tx
= { 0 };
680 if (slots
>= work_to_do
) {
682 "Asked for %d slots but exceeds this limit\n",
684 xenvif_fatal_tx_err(vif
);
688 /* This guest is really using too many slots and
689 * considered malicious.
691 if (unlikely(slots
>= fatal_skb_slots
)) {
693 "Malicious frontend using %d slots, threshold %u\n",
694 slots
, fatal_skb_slots
);
695 xenvif_fatal_tx_err(vif
);
699 /* Xen network protocol had implicit dependency on
700 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
701 * the historical MAX_SKB_FRAGS value 18 to honor the
702 * same behavior as before. Any packet using more than
703 * 18 slots but less than fatal_skb_slots slots is
706 if (!drop_err
&& slots
>= XEN_NETBK_LEGACY_SLOTS_MAX
) {
709 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
710 slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
717 memcpy(txp
, RING_GET_REQUEST(&vif
->tx
, cons
+ slots
),
720 /* If the guest submitted a frame >= 64 KiB then
721 * first->size overflowed and following slots will
722 * appear to be larger than the frame.
724 * This cannot be fatal error as there are buggy
725 * frontends that do this.
727 * Consume all slots and drop the packet.
729 if (!drop_err
&& txp
->size
> first
->size
) {
732 "Invalid tx request, slot size %u > remaining size %u\n",
733 txp
->size
, first
->size
);
737 first
->size
-= txp
->size
;
740 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
741 netdev_err(vif
->dev
, "Cross page boundary, txp->offset: %x, size: %u\n",
742 txp
->offset
, txp
->size
);
743 xenvif_fatal_tx_err(vif
);
747 more_data
= txp
->flags
& XEN_NETTXF_more_data
;
755 xenvif_tx_err(vif
, first
, cons
+ slots
);
762 static struct page
*xenvif_alloc_page(struct xenvif
*vif
,
767 page
= alloc_page(GFP_ATOMIC
|__GFP_COLD
);
770 vif
->mmap_pages
[pending_idx
] = page
;
775 static struct gnttab_copy
*xenvif_get_requests(struct xenvif
*vif
,
777 struct xen_netif_tx_request
*txp
,
778 struct gnttab_copy
*gop
)
780 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
781 skb_frag_t
*frags
= shinfo
->frags
;
782 u16 pending_idx
= *((u16
*)skb
->data
);
786 pending_ring_idx_t index
, start_idx
= 0;
788 unsigned int nr_slots
;
789 struct pending_tx_info
*first
= NULL
;
791 /* At this point shinfo->nr_frags is in fact the number of
792 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
794 nr_slots
= shinfo
->nr_frags
;
796 /* Skip first skb fragment if it is on same page as header fragment. */
797 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
799 /* Coalesce tx requests, at this point the packet passed in
800 * should be <= 64K. Any packets larger than 64K have been
801 * handled in xenvif_count_requests().
803 for (shinfo
->nr_frags
= slot
= start
; slot
< nr_slots
;
804 shinfo
->nr_frags
++) {
805 struct pending_tx_info
*pending_tx_info
=
806 vif
->pending_tx_info
;
808 page
= alloc_page(GFP_ATOMIC
|__GFP_COLD
);
814 while (dst_offset
< PAGE_SIZE
&& slot
< nr_slots
) {
815 gop
->flags
= GNTCOPY_source_gref
;
817 gop
->source
.u
.ref
= txp
->gref
;
818 gop
->source
.domid
= vif
->domid
;
819 gop
->source
.offset
= txp
->offset
;
821 gop
->dest
.domid
= DOMID_SELF
;
823 gop
->dest
.offset
= dst_offset
;
824 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
826 if (dst_offset
+ txp
->size
> PAGE_SIZE
) {
827 /* This page can only merge a portion
828 * of tx request. Do not increment any
829 * pointer / counter here. The txp
830 * will be dealt with in future
831 * rounds, eventually hitting the
834 gop
->len
= PAGE_SIZE
- dst_offset
;
835 txp
->offset
+= gop
->len
;
836 txp
->size
-= gop
->len
;
837 dst_offset
+= gop
->len
; /* quit loop */
839 /* This tx request can be merged in the page */
840 gop
->len
= txp
->size
;
841 dst_offset
+= gop
->len
;
843 index
= pending_index(vif
->pending_cons
++);
845 pending_idx
= vif
->pending_ring
[index
];
847 memcpy(&pending_tx_info
[pending_idx
].req
, txp
,
850 /* Poison these fields, corresponding
851 * fields for head tx req will be set
852 * to correct values after the loop.
854 vif
->mmap_pages
[pending_idx
] = (void *)(~0UL);
855 pending_tx_info
[pending_idx
].head
=
856 INVALID_PENDING_RING_IDX
;
859 first
= &pending_tx_info
[pending_idx
];
861 head_idx
= pending_idx
;
871 first
->req
.offset
= 0;
872 first
->req
.size
= dst_offset
;
873 first
->head
= start_idx
;
874 vif
->mmap_pages
[head_idx
] = page
;
875 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], head_idx
);
878 BUG_ON(shinfo
->nr_frags
> MAX_SKB_FRAGS
);
882 /* Unwind, freeing all pages and sending error responses. */
883 while (shinfo
->nr_frags
-- > start
) {
884 xenvif_idx_release(vif
,
885 frag_get_pending_idx(&frags
[shinfo
->nr_frags
]),
886 XEN_NETIF_RSP_ERROR
);
888 /* The head too, if necessary. */
890 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_ERROR
);
895 static int xenvif_tx_check_gop(struct xenvif
*vif
,
897 struct gnttab_copy
**gopp
)
899 struct gnttab_copy
*gop
= *gopp
;
900 u16 pending_idx
= *((u16
*)skb
->data
);
901 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
902 struct pending_tx_info
*tx_info
;
903 int nr_frags
= shinfo
->nr_frags
;
905 u16 peek
; /* peek into next tx request */
907 /* Check status of header. */
910 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_ERROR
);
912 /* Skip first skb fragment if it is on same page as header fragment. */
913 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
915 for (i
= start
; i
< nr_frags
; i
++) {
917 pending_ring_idx_t head
;
919 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
920 tx_info
= &vif
->pending_tx_info
[pending_idx
];
921 head
= tx_info
->head
;
923 /* Check error status: if okay then remember grant handle. */
925 newerr
= (++gop
)->status
;
928 peek
= vif
->pending_ring
[pending_index(++head
)];
929 } while (!pending_tx_is_head(vif
, peek
));
931 if (likely(!newerr
)) {
932 /* Had a previous error? Invalidate this fragment. */
934 xenvif_idx_release(vif
, pending_idx
,
939 /* Error on this fragment: respond to client with an error. */
940 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_ERROR
);
942 /* Not the first error? Preceding frags already invalidated. */
946 /* First error: invalidate header and preceding fragments. */
947 pending_idx
= *((u16
*)skb
->data
);
948 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_OKAY
);
949 for (j
= start
; j
< i
; j
++) {
950 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
951 xenvif_idx_release(vif
, pending_idx
,
955 /* Remember the error: invalidate all subsequent fragments. */
963 static void xenvif_fill_frags(struct xenvif
*vif
, struct sk_buff
*skb
)
965 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
966 int nr_frags
= shinfo
->nr_frags
;
969 for (i
= 0; i
< nr_frags
; i
++) {
970 skb_frag_t
*frag
= shinfo
->frags
+ i
;
971 struct xen_netif_tx_request
*txp
;
975 pending_idx
= frag_get_pending_idx(frag
);
977 txp
= &vif
->pending_tx_info
[pending_idx
].req
;
978 page
= virt_to_page(idx_to_kaddr(vif
, pending_idx
));
979 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
980 skb
->len
+= txp
->size
;
981 skb
->data_len
+= txp
->size
;
982 skb
->truesize
+= txp
->size
;
984 /* Take an extra reference to offset xenvif_idx_release */
985 get_page(vif
->mmap_pages
[pending_idx
]);
986 xenvif_idx_release(vif
, pending_idx
, XEN_NETIF_RSP_OKAY
);
990 static int xenvif_get_extras(struct xenvif
*vif
,
991 struct xen_netif_extra_info
*extras
,
994 struct xen_netif_extra_info extra
;
995 RING_IDX cons
= vif
->tx
.req_cons
;
998 if (unlikely(work_to_do
-- <= 0)) {
999 netdev_err(vif
->dev
, "Missing extra info\n");
1000 xenvif_fatal_tx_err(vif
);
1004 memcpy(&extra
, RING_GET_REQUEST(&vif
->tx
, cons
),
1006 if (unlikely(!extra
.type
||
1007 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1008 vif
->tx
.req_cons
= ++cons
;
1009 netdev_err(vif
->dev
,
1010 "Invalid extra type: %d\n", extra
.type
);
1011 xenvif_fatal_tx_err(vif
);
1015 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1016 vif
->tx
.req_cons
= ++cons
;
1017 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1022 static int xenvif_set_skb_gso(struct xenvif
*vif
,
1023 struct sk_buff
*skb
,
1024 struct xen_netif_extra_info
*gso
)
1026 if (!gso
->u
.gso
.size
) {
1027 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
1028 xenvif_fatal_tx_err(vif
);
1032 switch (gso
->u
.gso
.type
) {
1033 case XEN_NETIF_GSO_TYPE_TCPV4
:
1034 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1036 case XEN_NETIF_GSO_TYPE_TCPV6
:
1037 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
1040 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1041 xenvif_fatal_tx_err(vif
);
1045 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1046 /* gso_segs will be calculated later */
1051 static int checksum_setup(struct xenvif
*vif
, struct sk_buff
*skb
)
1053 bool recalculate_partial_csum
= false;
1055 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1056 * peers can fail to set NETRXF_csum_blank when sending a GSO
1057 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1058 * recalculate the partial checksum.
1060 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1061 vif
->rx_gso_checksum_fixup
++;
1062 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1063 recalculate_partial_csum
= true;
1066 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1067 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1070 return skb_checksum_setup(skb
, recalculate_partial_csum
);
1073 static bool tx_credit_exceeded(struct xenvif
*vif
, unsigned size
)
1075 u64 now
= get_jiffies_64();
1076 u64 next_credit
= vif
->credit_window_start
+
1077 msecs_to_jiffies(vif
->credit_usec
/ 1000);
1079 /* Timer could already be pending in rare cases. */
1080 if (timer_pending(&vif
->credit_timeout
))
1083 /* Passed the point where we can replenish credit? */
1084 if (time_after_eq64(now
, next_credit
)) {
1085 vif
->credit_window_start
= now
;
1089 /* Still too big to send right now? Set a callback. */
1090 if (size
> vif
->remaining_credit
) {
1091 vif
->credit_timeout
.data
=
1093 vif
->credit_timeout
.function
=
1095 mod_timer(&vif
->credit_timeout
,
1097 vif
->credit_window_start
= next_credit
;
1105 static unsigned xenvif_tx_build_gops(struct xenvif
*vif
, int budget
)
1107 struct gnttab_copy
*gop
= vif
->tx_copy_ops
, *request_gop
;
1108 struct sk_buff
*skb
;
1111 while ((nr_pending_reqs(vif
) + XEN_NETBK_LEGACY_SLOTS_MAX
1112 < MAX_PENDING_REQS
) &&
1113 (skb_queue_len(&vif
->tx_queue
) < budget
)) {
1114 struct xen_netif_tx_request txreq
;
1115 struct xen_netif_tx_request txfrags
[XEN_NETBK_LEGACY_SLOTS_MAX
];
1117 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1121 unsigned int data_len
;
1122 pending_ring_idx_t index
;
1124 if (vif
->tx
.sring
->req_prod
- vif
->tx
.req_cons
>
1125 XEN_NETIF_TX_RING_SIZE
) {
1126 netdev_err(vif
->dev
,
1127 "Impossible number of requests. "
1128 "req_prod %d, req_cons %d, size %ld\n",
1129 vif
->tx
.sring
->req_prod
, vif
->tx
.req_cons
,
1130 XEN_NETIF_TX_RING_SIZE
);
1131 xenvif_fatal_tx_err(vif
);
1135 work_to_do
= RING_HAS_UNCONSUMED_REQUESTS(&vif
->tx
);
1139 idx
= vif
->tx
.req_cons
;
1140 rmb(); /* Ensure that we see the request before we copy it. */
1141 memcpy(&txreq
, RING_GET_REQUEST(&vif
->tx
, idx
), sizeof(txreq
));
1143 /* Credit-based scheduling. */
1144 if (txreq
.size
> vif
->remaining_credit
&&
1145 tx_credit_exceeded(vif
, txreq
.size
))
1148 vif
->remaining_credit
-= txreq
.size
;
1151 vif
->tx
.req_cons
= ++idx
;
1153 memset(extras
, 0, sizeof(extras
));
1154 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1155 work_to_do
= xenvif_get_extras(vif
, extras
,
1157 idx
= vif
->tx
.req_cons
;
1158 if (unlikely(work_to_do
< 0))
1162 ret
= xenvif_count_requests(vif
, &txreq
, txfrags
, work_to_do
);
1163 if (unlikely(ret
< 0))
1168 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1169 netdev_dbg(vif
->dev
,
1170 "Bad packet size: %d\n", txreq
.size
);
1171 xenvif_tx_err(vif
, &txreq
, idx
);
1175 /* No crossing a page as the payload mustn't fragment. */
1176 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1177 netdev_err(vif
->dev
,
1178 "txreq.offset: %x, size: %u, end: %lu\n",
1179 txreq
.offset
, txreq
.size
,
1180 (txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1181 xenvif_fatal_tx_err(vif
);
1185 index
= pending_index(vif
->pending_cons
);
1186 pending_idx
= vif
->pending_ring
[index
];
1188 data_len
= (txreq
.size
> PKT_PROT_LEN
&&
1189 ret
< XEN_NETBK_LEGACY_SLOTS_MAX
) ?
1190 PKT_PROT_LEN
: txreq
.size
;
1192 skb
= alloc_skb(data_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
1193 GFP_ATOMIC
| __GFP_NOWARN
);
1194 if (unlikely(skb
== NULL
)) {
1195 netdev_dbg(vif
->dev
,
1196 "Can't allocate a skb in start_xmit.\n");
1197 xenvif_tx_err(vif
, &txreq
, idx
);
1201 /* Packets passed to netif_rx() must have some headroom. */
1202 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1204 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1205 struct xen_netif_extra_info
*gso
;
1206 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1208 if (xenvif_set_skb_gso(vif
, skb
, gso
)) {
1209 /* Failure in xenvif_set_skb_gso is fatal. */
1215 /* XXX could copy straight to head */
1216 page
= xenvif_alloc_page(vif
, pending_idx
);
1219 xenvif_tx_err(vif
, &txreq
, idx
);
1223 gop
->source
.u
.ref
= txreq
.gref
;
1224 gop
->source
.domid
= vif
->domid
;
1225 gop
->source
.offset
= txreq
.offset
;
1227 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
1228 gop
->dest
.domid
= DOMID_SELF
;
1229 gop
->dest
.offset
= txreq
.offset
;
1231 gop
->len
= txreq
.size
;
1232 gop
->flags
= GNTCOPY_source_gref
;
1236 memcpy(&vif
->pending_tx_info
[pending_idx
].req
,
1237 &txreq
, sizeof(txreq
));
1238 vif
->pending_tx_info
[pending_idx
].head
= index
;
1239 *((u16
*)skb
->data
) = pending_idx
;
1241 __skb_put(skb
, data_len
);
1243 skb_shinfo(skb
)->nr_frags
= ret
;
1244 if (data_len
< txreq
.size
) {
1245 skb_shinfo(skb
)->nr_frags
++;
1246 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1249 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1250 INVALID_PENDING_IDX
);
1253 vif
->pending_cons
++;
1255 request_gop
= xenvif_get_requests(vif
, skb
, txfrags
, gop
);
1256 if (request_gop
== NULL
) {
1258 xenvif_tx_err(vif
, &txreq
, idx
);
1263 __skb_queue_tail(&vif
->tx_queue
, skb
);
1265 vif
->tx
.req_cons
= idx
;
1267 if ((gop
-vif
->tx_copy_ops
) >= ARRAY_SIZE(vif
->tx_copy_ops
))
1271 return gop
- vif
->tx_copy_ops
;
1275 static int xenvif_tx_submit(struct xenvif
*vif
)
1277 struct gnttab_copy
*gop
= vif
->tx_copy_ops
;
1278 struct sk_buff
*skb
;
1281 while ((skb
= __skb_dequeue(&vif
->tx_queue
)) != NULL
) {
1282 struct xen_netif_tx_request
*txp
;
1286 pending_idx
= *((u16
*)skb
->data
);
1287 txp
= &vif
->pending_tx_info
[pending_idx
].req
;
1289 /* Check the remap error code. */
1290 if (unlikely(xenvif_tx_check_gop(vif
, skb
, &gop
))) {
1291 netdev_dbg(vif
->dev
, "netback grant failed.\n");
1292 skb_shinfo(skb
)->nr_frags
= 0;
1297 data_len
= skb
->len
;
1299 (void *)(idx_to_kaddr(vif
, pending_idx
)|txp
->offset
),
1301 if (data_len
< txp
->size
) {
1302 /* Append the packet payload as a fragment. */
1303 txp
->offset
+= data_len
;
1304 txp
->size
-= data_len
;
1306 /* Schedule a response immediately. */
1307 xenvif_idx_release(vif
, pending_idx
,
1308 XEN_NETIF_RSP_OKAY
);
1311 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1312 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1313 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1314 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1316 xenvif_fill_frags(vif
, skb
);
1318 if (skb_is_nonlinear(skb
) && skb_headlen(skb
) < PKT_PROT_LEN
) {
1319 int target
= min_t(int, skb
->len
, PKT_PROT_LEN
);
1320 __pskb_pull_tail(skb
, target
- skb_headlen(skb
));
1323 skb
->dev
= vif
->dev
;
1324 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1325 skb_reset_network_header(skb
);
1327 if (checksum_setup(vif
, skb
)) {
1328 netdev_dbg(vif
->dev
,
1329 "Can't setup checksum in net_tx_action\n");
1334 skb_probe_transport_header(skb
, 0);
1336 /* If the packet is GSO then we will have just set up the
1337 * transport header offset in checksum_setup so it's now
1338 * straightforward to calculate gso_segs.
1340 if (skb_is_gso(skb
)) {
1341 int mss
= skb_shinfo(skb
)->gso_size
;
1342 int hdrlen
= skb_transport_header(skb
) -
1343 skb_mac_header(skb
) +
1346 skb_shinfo(skb
)->gso_segs
=
1347 DIV_ROUND_UP(skb
->len
- hdrlen
, mss
);
1350 vif
->dev
->stats
.rx_bytes
+= skb
->len
;
1351 vif
->dev
->stats
.rx_packets
++;
1355 netif_receive_skb(skb
);
1361 /* Called after netfront has transmitted */
1362 int xenvif_tx_action(struct xenvif
*vif
, int budget
)
1367 if (unlikely(!tx_work_todo(vif
)))
1370 nr_gops
= xenvif_tx_build_gops(vif
, budget
);
1375 gnttab_batch_copy(vif
->tx_copy_ops
, nr_gops
);
1377 work_done
= xenvif_tx_submit(vif
);
1382 static void xenvif_idx_release(struct xenvif
*vif
, u16 pending_idx
,
1385 struct pending_tx_info
*pending_tx_info
;
1386 pending_ring_idx_t head
;
1387 u16 peek
; /* peek into next tx request */
1389 BUG_ON(vif
->mmap_pages
[pending_idx
] == (void *)(~0UL));
1391 /* Already complete? */
1392 if (vif
->mmap_pages
[pending_idx
] == NULL
)
1395 pending_tx_info
= &vif
->pending_tx_info
[pending_idx
];
1397 head
= pending_tx_info
->head
;
1399 BUG_ON(!pending_tx_is_head(vif
, head
));
1400 BUG_ON(vif
->pending_ring
[pending_index(head
)] != pending_idx
);
1403 pending_ring_idx_t index
;
1404 pending_ring_idx_t idx
= pending_index(head
);
1405 u16 info_idx
= vif
->pending_ring
[idx
];
1407 pending_tx_info
= &vif
->pending_tx_info
[info_idx
];
1408 make_tx_response(vif
, &pending_tx_info
->req
, status
);
1410 /* Setting any number other than
1411 * INVALID_PENDING_RING_IDX indicates this slot is
1412 * starting a new packet / ending a previous packet.
1414 pending_tx_info
->head
= 0;
1416 index
= pending_index(vif
->pending_prod
++);
1417 vif
->pending_ring
[index
] = vif
->pending_ring
[info_idx
];
1419 peek
= vif
->pending_ring
[pending_index(++head
)];
1421 } while (!pending_tx_is_head(vif
, peek
));
1423 put_page(vif
->mmap_pages
[pending_idx
]);
1424 vif
->mmap_pages
[pending_idx
] = NULL
;
1428 static void make_tx_response(struct xenvif
*vif
,
1429 struct xen_netif_tx_request
*txp
,
1432 RING_IDX i
= vif
->tx
.rsp_prod_pvt
;
1433 struct xen_netif_tx_response
*resp
;
1436 resp
= RING_GET_RESPONSE(&vif
->tx
, i
);
1440 if (txp
->flags
& XEN_NETTXF_extra_info
)
1441 RING_GET_RESPONSE(&vif
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1443 vif
->tx
.rsp_prod_pvt
= ++i
;
1444 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->tx
, notify
);
1446 notify_remote_via_irq(vif
->tx_irq
);
1449 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
1456 RING_IDX i
= vif
->rx
.rsp_prod_pvt
;
1457 struct xen_netif_rx_response
*resp
;
1459 resp
= RING_GET_RESPONSE(&vif
->rx
, i
);
1460 resp
->offset
= offset
;
1461 resp
->flags
= flags
;
1463 resp
->status
= (s16
)size
;
1465 resp
->status
= (s16
)st
;
1467 vif
->rx
.rsp_prod_pvt
= ++i
;
1472 static inline int rx_work_todo(struct xenvif
*vif
)
1474 return !skb_queue_empty(&vif
->rx_queue
) &&
1475 xenvif_rx_ring_slots_available(vif
, vif
->rx_last_skb_slots
);
1478 static inline int tx_work_todo(struct xenvif
*vif
)
1481 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif
->tx
)) &&
1482 (nr_pending_reqs(vif
) + XEN_NETBK_LEGACY_SLOTS_MAX
1483 < MAX_PENDING_REQS
))
1489 void xenvif_unmap_frontend_rings(struct xenvif
*vif
)
1492 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1495 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1499 int xenvif_map_frontend_rings(struct xenvif
*vif
,
1500 grant_ref_t tx_ring_ref
,
1501 grant_ref_t rx_ring_ref
)
1504 struct xen_netif_tx_sring
*txs
;
1505 struct xen_netif_rx_sring
*rxs
;
1509 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1510 tx_ring_ref
, &addr
);
1514 txs
= (struct xen_netif_tx_sring
*)addr
;
1515 BACK_RING_INIT(&vif
->tx
, txs
, PAGE_SIZE
);
1517 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1518 rx_ring_ref
, &addr
);
1522 rxs
= (struct xen_netif_rx_sring
*)addr
;
1523 BACK_RING_INIT(&vif
->rx
, rxs
, PAGE_SIZE
);
1528 xenvif_unmap_frontend_rings(vif
);
1532 void xenvif_stop_queue(struct xenvif
*vif
)
1534 if (!vif
->can_queue
)
1537 netif_stop_queue(vif
->dev
);
1540 static void xenvif_start_queue(struct xenvif
*vif
)
1542 if (xenvif_schedulable(vif
))
1543 netif_wake_queue(vif
->dev
);
1546 int xenvif_kthread(void *data
)
1548 struct xenvif
*vif
= data
;
1549 struct sk_buff
*skb
;
1551 while (!kthread_should_stop()) {
1552 wait_event_interruptible(vif
->wq
,
1553 rx_work_todo(vif
) ||
1554 kthread_should_stop());
1555 if (kthread_should_stop())
1558 if (!skb_queue_empty(&vif
->rx_queue
))
1559 xenvif_rx_action(vif
);
1561 if (skb_queue_empty(&vif
->rx_queue
) &&
1562 netif_queue_stopped(vif
->dev
))
1563 xenvif_start_queue(vif
);
1568 /* Bin any remaining skbs */
1569 while ((skb
= skb_dequeue(&vif
->rx_queue
)) != NULL
)
1575 static int __init
netback_init(void)
1582 if (fatal_skb_slots
< XEN_NETBK_LEGACY_SLOTS_MAX
) {
1583 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1584 fatal_skb_slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
1585 fatal_skb_slots
= XEN_NETBK_LEGACY_SLOTS_MAX
;
1588 rc
= xenvif_xenbus_init();
1598 module_init(netback_init
);
1600 static void __exit
netback_fini(void)
1602 xenvif_xenbus_fini();
1604 module_exit(netback_fini
);
1606 MODULE_LICENSE("Dual BSD/GPL");
1607 MODULE_ALIAS("xen-backend:vif");