2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
43 #include <xen/events.h>
44 #include <xen/interface/memory.h>
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/page.h>
49 struct pending_tx_info
{
50 struct xen_netif_tx_request req
;
53 typedef unsigned int pending_ring_idx_t
;
55 struct netbk_rx_meta
{
61 #define MAX_PENDING_REQS 256
63 /* Discriminate from any valid pending_idx value. */
64 #define INVALID_PENDING_IDX 0xFFFF
66 #define MAX_BUFFER_OFFSET PAGE_SIZE
68 /* extra field used in struct page */
71 #if BITS_PER_LONG < 64
73 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
74 unsigned int group
:GROUP_WIDTH
;
75 unsigned int idx
:IDX_WIDTH
;
77 unsigned int group
, idx
;
85 struct task_struct
*task
;
87 struct sk_buff_head rx_queue
;
88 struct sk_buff_head tx_queue
;
90 struct timer_list net_timer
;
92 struct page
*mmap_pages
[MAX_PENDING_REQS
];
94 pending_ring_idx_t pending_prod
;
95 pending_ring_idx_t pending_cons
;
96 struct list_head net_schedule_list
;
98 /* Protect the net_schedule_list in netif. */
99 spinlock_t net_schedule_list_lock
;
101 atomic_t netfront_count
;
103 struct pending_tx_info pending_tx_info
[MAX_PENDING_REQS
];
104 struct gnttab_copy tx_copy_ops
[MAX_PENDING_REQS
];
106 u16 pending_ring
[MAX_PENDING_REQS
];
109 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
110 * head/fragment page uses 2 copy operations because it
111 * straddles two buffers in the frontend.
113 struct gnttab_copy grant_copy_op
[2*XEN_NETIF_RX_RING_SIZE
];
114 struct netbk_rx_meta meta
[2*XEN_NETIF_RX_RING_SIZE
];
117 static struct xen_netbk
*xen_netbk
;
118 static int xen_netbk_group_nr
;
120 void xen_netbk_add_xenvif(struct xenvif
*vif
)
123 int min_netfront_count
;
125 struct xen_netbk
*netbk
;
127 min_netfront_count
= atomic_read(&xen_netbk
[0].netfront_count
);
128 for (i
= 0; i
< xen_netbk_group_nr
; i
++) {
129 int netfront_count
= atomic_read(&xen_netbk
[i
].netfront_count
);
130 if (netfront_count
< min_netfront_count
) {
132 min_netfront_count
= netfront_count
;
136 netbk
= &xen_netbk
[min_group
];
139 atomic_inc(&netbk
->netfront_count
);
142 void xen_netbk_remove_xenvif(struct xenvif
*vif
)
144 struct xen_netbk
*netbk
= vif
->netbk
;
146 atomic_dec(&netbk
->netfront_count
);
149 static void xen_netbk_idx_release(struct xen_netbk
*netbk
, u16 pending_idx
);
150 static void make_tx_response(struct xenvif
*vif
,
151 struct xen_netif_tx_request
*txp
,
153 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
160 static inline unsigned long idx_to_pfn(struct xen_netbk
*netbk
,
163 return page_to_pfn(netbk
->mmap_pages
[idx
]);
166 static inline unsigned long idx_to_kaddr(struct xen_netbk
*netbk
,
169 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk
, idx
));
172 /* extra field used in struct page */
173 static inline void set_page_ext(struct page
*pg
, struct xen_netbk
*netbk
,
176 unsigned int group
= netbk
- xen_netbk
;
177 union page_ext ext
= { .e
= { .group
= group
+ 1, .idx
= idx
} };
179 BUILD_BUG_ON(sizeof(ext
) > sizeof(ext
.mapping
));
180 pg
->mapping
= ext
.mapping
;
183 static int get_page_ext(struct page
*pg
,
184 unsigned int *pgroup
, unsigned int *pidx
)
186 union page_ext ext
= { .mapping
= pg
->mapping
};
187 struct xen_netbk
*netbk
;
188 unsigned int group
, idx
;
190 group
= ext
.e
.group
- 1;
192 if (group
< 0 || group
>= xen_netbk_group_nr
)
195 netbk
= &xen_netbk
[group
];
199 if ((idx
< 0) || (idx
>= MAX_PENDING_REQS
))
202 if (netbk
->mmap_pages
[idx
] != pg
)
212 * This is the amount of packet we copy rather than map, so that the
213 * guest can't fiddle with the contents of the headers while we do
214 * packet processing on them (netfilter, routing, etc).
216 #define PKT_PROT_LEN (ETH_HLEN + \
218 sizeof(struct iphdr) + MAX_IPOPTLEN + \
219 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
221 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
223 return (u16
)frag
->page_offset
;
226 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
228 frag
->page_offset
= pending_idx
;
231 static inline pending_ring_idx_t
pending_index(unsigned i
)
233 return i
& (MAX_PENDING_REQS
-1);
236 static inline pending_ring_idx_t
nr_pending_reqs(struct xen_netbk
*netbk
)
238 return MAX_PENDING_REQS
-
239 netbk
->pending_prod
+ netbk
->pending_cons
;
242 static void xen_netbk_kick_thread(struct xen_netbk
*netbk
)
247 static int max_required_rx_slots(struct xenvif
*vif
)
249 int max
= DIV_ROUND_UP(vif
->dev
->mtu
, PAGE_SIZE
);
251 if (vif
->can_sg
|| vif
->gso
|| vif
->gso_prefix
)
252 max
+= MAX_SKB_FRAGS
+ 1; /* extra_info + frags */
257 int xen_netbk_rx_ring_full(struct xenvif
*vif
)
259 RING_IDX peek
= vif
->rx_req_cons_peek
;
260 RING_IDX needed
= max_required_rx_slots(vif
);
262 return ((vif
->rx
.sring
->req_prod
- peek
) < needed
) ||
263 ((vif
->rx
.rsp_prod_pvt
+ XEN_NETIF_RX_RING_SIZE
- peek
) < needed
);
266 int xen_netbk_must_stop_queue(struct xenvif
*vif
)
268 if (!xen_netbk_rx_ring_full(vif
))
271 vif
->rx
.sring
->req_event
= vif
->rx_req_cons_peek
+
272 max_required_rx_slots(vif
);
273 mb(); /* request notification /then/ check the queue */
275 return xen_netbk_rx_ring_full(vif
);
279 * Returns true if we should start a new receive buffer instead of
280 * adding 'size' bytes to a buffer which currently contains 'offset'
283 static bool start_new_rx_buffer(int offset
, unsigned long size
, int head
)
285 /* simple case: we have completely filled the current buffer. */
286 if (offset
== MAX_BUFFER_OFFSET
)
290 * complex case: start a fresh buffer if the current frag
291 * would overflow the current buffer but only if:
292 * (i) this frag would fit completely in the next buffer
293 * and (ii) there is already some data in the current buffer
294 * and (iii) this is not the head buffer.
297 * - (i) stops us splitting a frag into two copies
298 * unless the frag is too large for a single buffer.
299 * - (ii) stops us from leaving a buffer pointlessly empty.
300 * - (iii) stops us leaving the first buffer
301 * empty. Strictly speaking this is already covered
302 * by (ii) but is explicitly checked because
303 * netfront relies on the first buffer being
304 * non-empty and can crash otherwise.
306 * This means we will effectively linearise small
307 * frags but do not needlessly split large buffers
308 * into multiple copies tend to give large frags their
309 * own buffers as before.
311 if ((offset
+ size
> MAX_BUFFER_OFFSET
) &&
312 (size
<= MAX_BUFFER_OFFSET
) && offset
&& !head
)
319 * Figure out how many ring slots we're going to need to send @skb to
320 * the guest. This function is essentially a dry run of
321 * netbk_gop_frag_copy.
323 unsigned int xen_netbk_count_skb_slots(struct xenvif
*vif
, struct sk_buff
*skb
)
328 count
= DIV_ROUND_UP(
329 offset_in_page(skb
->data
)+skb_headlen(skb
), PAGE_SIZE
);
331 copy_off
= skb_headlen(skb
) % PAGE_SIZE
;
333 if (skb_shinfo(skb
)->gso_size
)
336 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
337 unsigned long size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
340 BUG_ON(copy_off
> MAX_BUFFER_OFFSET
);
342 if (start_new_rx_buffer(copy_off
, size
, 0)) {
348 if (copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
349 bytes
= MAX_BUFFER_OFFSET
- copy_off
;
358 struct netrx_pending_operations
{
359 unsigned copy_prod
, copy_cons
;
360 unsigned meta_prod
, meta_cons
;
361 struct gnttab_copy
*copy
;
362 struct netbk_rx_meta
*meta
;
364 grant_ref_t copy_gref
;
367 static struct netbk_rx_meta
*get_next_rx_buffer(struct xenvif
*vif
,
368 struct netrx_pending_operations
*npo
)
370 struct netbk_rx_meta
*meta
;
371 struct xen_netif_rx_request
*req
;
373 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
375 meta
= npo
->meta
+ npo
->meta_prod
++;
381 npo
->copy_gref
= req
->gref
;
387 * Set up the grant operations for this fragment. If it's a flipping
388 * interface, we also set up the unmap request from here.
390 static void netbk_gop_frag_copy(struct xenvif
*vif
, struct sk_buff
*skb
,
391 struct netrx_pending_operations
*npo
,
392 struct page
*page
, unsigned long size
,
393 unsigned long offset
, int *head
)
395 struct gnttab_copy
*copy_gop
;
396 struct netbk_rx_meta
*meta
;
398 * These variables are used iff get_page_ext returns true,
399 * in which case they are guaranteed to be initialized.
401 unsigned int uninitialized_var(group
), uninitialized_var(idx
);
402 int foreign
= get_page_ext(page
, &group
, &idx
);
405 /* Data must not cross a page boundary. */
406 BUG_ON(size
+ offset
> PAGE_SIZE
);
408 meta
= npo
->meta
+ npo
->meta_prod
- 1;
411 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
413 if (start_new_rx_buffer(npo
->copy_off
, size
, *head
)) {
415 * Netfront requires there to be some data in the head
420 meta
= get_next_rx_buffer(vif
, npo
);
424 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
425 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
427 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
428 copy_gop
->flags
= GNTCOPY_dest_gref
;
430 struct xen_netbk
*netbk
= &xen_netbk
[group
];
431 struct pending_tx_info
*src_pend
;
433 src_pend
= &netbk
->pending_tx_info
[idx
];
435 copy_gop
->source
.domid
= src_pend
->vif
->domid
;
436 copy_gop
->source
.u
.ref
= src_pend
->req
.gref
;
437 copy_gop
->flags
|= GNTCOPY_source_gref
;
439 void *vaddr
= page_address(page
);
440 copy_gop
->source
.domid
= DOMID_SELF
;
441 copy_gop
->source
.u
.gmfn
= virt_to_mfn(vaddr
);
443 copy_gop
->source
.offset
= offset
;
444 copy_gop
->dest
.domid
= vif
->domid
;
446 copy_gop
->dest
.offset
= npo
->copy_off
;
447 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
448 copy_gop
->len
= bytes
;
450 npo
->copy_off
+= bytes
;
456 /* Leave a gap for the GSO descriptor. */
457 if (*head
&& skb_shinfo(skb
)->gso_size
&& !vif
->gso_prefix
)
460 *head
= 0; /* There must be something in this buffer now. */
466 * Prepare an SKB to be transmitted to the frontend.
468 * This function is responsible for allocating grant operations, meta
471 * It returns the number of meta structures consumed. The number of
472 * ring slots used is always equal to the number of meta slots used
473 * plus the number of GSO descriptors used. Currently, we use either
474 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
475 * frontend-side LRO).
477 static int netbk_gop_skb(struct sk_buff
*skb
,
478 struct netrx_pending_operations
*npo
)
480 struct xenvif
*vif
= netdev_priv(skb
->dev
);
481 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
483 struct xen_netif_rx_request
*req
;
484 struct netbk_rx_meta
*meta
;
489 old_meta_prod
= npo
->meta_prod
;
491 /* Set up a GSO prefix descriptor, if necessary */
492 if (skb_shinfo(skb
)->gso_size
&& vif
->gso_prefix
) {
493 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
494 meta
= npo
->meta
+ npo
->meta_prod
++;
495 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
500 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
501 meta
= npo
->meta
+ npo
->meta_prod
++;
503 if (!vif
->gso_prefix
)
504 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
511 npo
->copy_gref
= req
->gref
;
514 while (data
< skb_tail_pointer(skb
)) {
515 unsigned int offset
= offset_in_page(data
);
516 unsigned int len
= PAGE_SIZE
- offset
;
518 if (data
+ len
> skb_tail_pointer(skb
))
519 len
= skb_tail_pointer(skb
) - data
;
521 netbk_gop_frag_copy(vif
, skb
, npo
,
522 virt_to_page(data
), len
, offset
, &head
);
526 for (i
= 0; i
< nr_frags
; i
++) {
527 netbk_gop_frag_copy(vif
, skb
, npo
,
528 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
529 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
530 skb_shinfo(skb
)->frags
[i
].page_offset
,
534 return npo
->meta_prod
- old_meta_prod
;
538 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
539 * used to set up the operations on the top of
540 * netrx_pending_operations, which have since been done. Check that
541 * they didn't give any errors and advance over them.
543 static int netbk_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
544 struct netrx_pending_operations
*npo
)
546 struct gnttab_copy
*copy_op
;
547 int status
= XEN_NETIF_RSP_OKAY
;
550 for (i
= 0; i
< nr_meta_slots
; i
++) {
551 copy_op
= npo
->copy
+ npo
->copy_cons
++;
552 if (copy_op
->status
!= GNTST_okay
) {
554 "Bad status %d from copy to DOM%d.\n",
555 copy_op
->status
, vif
->domid
);
556 status
= XEN_NETIF_RSP_ERROR
;
563 static void netbk_add_frag_responses(struct xenvif
*vif
, int status
,
564 struct netbk_rx_meta
*meta
,
568 unsigned long offset
;
570 /* No fragments used */
571 if (nr_meta_slots
<= 1)
576 for (i
= 0; i
< nr_meta_slots
; i
++) {
578 if (i
== nr_meta_slots
- 1)
581 flags
= XEN_NETRXF_more_data
;
584 make_rx_response(vif
, meta
[i
].id
, status
, offset
,
585 meta
[i
].size
, flags
);
589 struct skb_cb_overlay
{
593 static void xen_netbk_rx_action(struct xen_netbk
*netbk
)
595 struct xenvif
*vif
= NULL
, *tmp
;
598 struct xen_netif_rx_response
*resp
;
599 struct sk_buff_head rxq
;
605 unsigned long offset
;
606 struct skb_cb_overlay
*sco
;
608 struct netrx_pending_operations npo
= {
609 .copy
= netbk
->grant_copy_op
,
613 skb_queue_head_init(&rxq
);
617 while ((skb
= skb_dequeue(&netbk
->rx_queue
)) != NULL
) {
618 vif
= netdev_priv(skb
->dev
);
619 nr_frags
= skb_shinfo(skb
)->nr_frags
;
621 sco
= (struct skb_cb_overlay
*)skb
->cb
;
622 sco
->meta_slots_used
= netbk_gop_skb(skb
, &npo
);
624 count
+= nr_frags
+ 1;
626 __skb_queue_tail(&rxq
, skb
);
628 /* Filled the batch queue? */
629 if (count
+ MAX_SKB_FRAGS
>= XEN_NETIF_RX_RING_SIZE
)
633 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(netbk
->meta
));
638 BUG_ON(npo
.copy_prod
> ARRAY_SIZE(netbk
->grant_copy_op
));
639 ret
= HYPERVISOR_grant_table_op(GNTTABOP_copy
, &netbk
->grant_copy_op
,
643 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
644 sco
= (struct skb_cb_overlay
*)skb
->cb
;
646 vif
= netdev_priv(skb
->dev
);
648 if (netbk
->meta
[npo
.meta_cons
].gso_size
&& vif
->gso_prefix
) {
649 resp
= RING_GET_RESPONSE(&vif
->rx
,
650 vif
->rx
.rsp_prod_pvt
++);
652 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
654 resp
->offset
= netbk
->meta
[npo
.meta_cons
].gso_size
;
655 resp
->id
= netbk
->meta
[npo
.meta_cons
].id
;
656 resp
->status
= sco
->meta_slots_used
;
659 sco
->meta_slots_used
--;
663 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
664 vif
->dev
->stats
.tx_packets
++;
666 status
= netbk_check_gop(vif
, sco
->meta_slots_used
, &npo
);
668 if (sco
->meta_slots_used
== 1)
671 flags
= XEN_NETRXF_more_data
;
673 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
674 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
675 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
676 /* remote but checksummed. */
677 flags
|= XEN_NETRXF_data_validated
;
680 resp
= make_rx_response(vif
, netbk
->meta
[npo
.meta_cons
].id
,
682 netbk
->meta
[npo
.meta_cons
].size
,
685 if (netbk
->meta
[npo
.meta_cons
].gso_size
&& !vif
->gso_prefix
) {
686 struct xen_netif_extra_info
*gso
=
687 (struct xen_netif_extra_info
*)
688 RING_GET_RESPONSE(&vif
->rx
,
689 vif
->rx
.rsp_prod_pvt
++);
691 resp
->flags
|= XEN_NETRXF_extra_info
;
693 gso
->u
.gso
.size
= netbk
->meta
[npo
.meta_cons
].gso_size
;
694 gso
->u
.gso
.type
= XEN_NETIF_GSO_TYPE_TCPV4
;
696 gso
->u
.gso
.features
= 0;
698 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
702 netbk_add_frag_responses(vif
, status
,
703 netbk
->meta
+ npo
.meta_cons
+ 1,
704 sco
->meta_slots_used
);
706 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->rx
, ret
);
708 if (ret
&& list_empty(&vif
->notify_list
))
709 list_add_tail(&vif
->notify_list
, ¬ify
);
711 xenvif_notify_tx_completion(vif
);
714 npo
.meta_cons
+= sco
->meta_slots_used
;
718 list_for_each_entry_safe(vif
, tmp
, ¬ify
, notify_list
) {
719 notify_remote_via_irq(vif
->irq
);
720 list_del_init(&vif
->notify_list
);
723 /* More work to do? */
724 if (!skb_queue_empty(&netbk
->rx_queue
) &&
725 !timer_pending(&netbk
->net_timer
))
726 xen_netbk_kick_thread(netbk
);
729 void xen_netbk_queue_tx_skb(struct xenvif
*vif
, struct sk_buff
*skb
)
731 struct xen_netbk
*netbk
= vif
->netbk
;
733 skb_queue_tail(&netbk
->rx_queue
, skb
);
735 xen_netbk_kick_thread(netbk
);
738 static void xen_netbk_alarm(unsigned long data
)
740 struct xen_netbk
*netbk
= (struct xen_netbk
*)data
;
741 xen_netbk_kick_thread(netbk
);
744 static int __on_net_schedule_list(struct xenvif
*vif
)
746 return !list_empty(&vif
->schedule_list
);
749 /* Must be called with net_schedule_list_lock held */
750 static void remove_from_net_schedule_list(struct xenvif
*vif
)
752 if (likely(__on_net_schedule_list(vif
))) {
753 list_del_init(&vif
->schedule_list
);
758 static struct xenvif
*poll_net_schedule_list(struct xen_netbk
*netbk
)
760 struct xenvif
*vif
= NULL
;
762 spin_lock_irq(&netbk
->net_schedule_list_lock
);
763 if (list_empty(&netbk
->net_schedule_list
))
766 vif
= list_first_entry(&netbk
->net_schedule_list
,
767 struct xenvif
, schedule_list
);
773 remove_from_net_schedule_list(vif
);
775 spin_unlock_irq(&netbk
->net_schedule_list_lock
);
779 void xen_netbk_schedule_xenvif(struct xenvif
*vif
)
782 struct xen_netbk
*netbk
= vif
->netbk
;
784 if (__on_net_schedule_list(vif
))
787 spin_lock_irqsave(&netbk
->net_schedule_list_lock
, flags
);
788 if (!__on_net_schedule_list(vif
) &&
789 likely(xenvif_schedulable(vif
))) {
790 list_add_tail(&vif
->schedule_list
, &netbk
->net_schedule_list
);
793 spin_unlock_irqrestore(&netbk
->net_schedule_list_lock
, flags
);
797 if ((nr_pending_reqs(netbk
) < (MAX_PENDING_REQS
/2)) &&
798 !list_empty(&netbk
->net_schedule_list
))
799 xen_netbk_kick_thread(netbk
);
802 void xen_netbk_deschedule_xenvif(struct xenvif
*vif
)
804 struct xen_netbk
*netbk
= vif
->netbk
;
805 spin_lock_irq(&netbk
->net_schedule_list_lock
);
806 remove_from_net_schedule_list(vif
);
807 spin_unlock_irq(&netbk
->net_schedule_list_lock
);
810 void xen_netbk_check_rx_xenvif(struct xenvif
*vif
)
814 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, more_to_do
);
817 xen_netbk_schedule_xenvif(vif
);
820 static void tx_add_credit(struct xenvif
*vif
)
822 unsigned long max_burst
, max_credit
;
825 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
826 * Otherwise the interface can seize up due to insufficient credit.
828 max_burst
= RING_GET_REQUEST(&vif
->tx
, vif
->tx
.req_cons
)->size
;
829 max_burst
= min(max_burst
, 131072UL);
830 max_burst
= max(max_burst
, vif
->credit_bytes
);
832 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
833 max_credit
= vif
->remaining_credit
+ vif
->credit_bytes
;
834 if (max_credit
< vif
->remaining_credit
)
835 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
837 vif
->remaining_credit
= min(max_credit
, max_burst
);
840 static void tx_credit_callback(unsigned long data
)
842 struct xenvif
*vif
= (struct xenvif
*)data
;
844 xen_netbk_check_rx_xenvif(vif
);
847 static void netbk_tx_err(struct xenvif
*vif
,
848 struct xen_netif_tx_request
*txp
, RING_IDX end
)
850 RING_IDX cons
= vif
->tx
.req_cons
;
853 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
856 txp
= RING_GET_REQUEST(&vif
->tx
, cons
++);
858 vif
->tx
.req_cons
= cons
;
859 xen_netbk_check_rx_xenvif(vif
);
863 static int netbk_count_requests(struct xenvif
*vif
,
864 struct xen_netif_tx_request
*first
,
865 struct xen_netif_tx_request
*txp
,
868 RING_IDX cons
= vif
->tx
.req_cons
;
871 if (!(first
->flags
& XEN_NETTXF_more_data
))
875 if (frags
>= work_to_do
) {
876 netdev_dbg(vif
->dev
, "Need more frags\n");
880 if (unlikely(frags
>= MAX_SKB_FRAGS
)) {
881 netdev_dbg(vif
->dev
, "Too many frags\n");
885 memcpy(txp
, RING_GET_REQUEST(&vif
->tx
, cons
+ frags
),
887 if (txp
->size
> first
->size
) {
888 netdev_dbg(vif
->dev
, "Frags galore\n");
892 first
->size
-= txp
->size
;
895 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
896 netdev_dbg(vif
->dev
, "txp->offset: %x, size: %u\n",
897 txp
->offset
, txp
->size
);
900 } while ((txp
++)->flags
& XEN_NETTXF_more_data
);
904 static struct page
*xen_netbk_alloc_page(struct xen_netbk
*netbk
,
909 page
= alloc_page(GFP_KERNEL
|__GFP_COLD
);
912 set_page_ext(page
, netbk
, pending_idx
);
913 netbk
->mmap_pages
[pending_idx
] = page
;
917 static struct gnttab_copy
*xen_netbk_get_requests(struct xen_netbk
*netbk
,
920 struct xen_netif_tx_request
*txp
,
921 struct gnttab_copy
*gop
)
923 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
924 skb_frag_t
*frags
= shinfo
->frags
;
925 u16 pending_idx
= *((u16
*)skb
->data
);
928 /* Skip first skb fragment if it is on same page as header fragment. */
929 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
931 for (i
= start
; i
< shinfo
->nr_frags
; i
++, txp
++) {
933 pending_ring_idx_t index
;
934 struct pending_tx_info
*pending_tx_info
=
935 netbk
->pending_tx_info
;
937 index
= pending_index(netbk
->pending_cons
++);
938 pending_idx
= netbk
->pending_ring
[index
];
939 page
= xen_netbk_alloc_page(netbk
, skb
, pending_idx
);
943 gop
->source
.u
.ref
= txp
->gref
;
944 gop
->source
.domid
= vif
->domid
;
945 gop
->source
.offset
= txp
->offset
;
947 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
948 gop
->dest
.domid
= DOMID_SELF
;
949 gop
->dest
.offset
= txp
->offset
;
951 gop
->len
= txp
->size
;
952 gop
->flags
= GNTCOPY_source_gref
;
956 memcpy(&pending_tx_info
[pending_idx
].req
, txp
, sizeof(*txp
));
958 pending_tx_info
[pending_idx
].vif
= vif
;
959 frag_set_pending_idx(&frags
[i
], pending_idx
);
965 static int xen_netbk_tx_check_gop(struct xen_netbk
*netbk
,
967 struct gnttab_copy
**gopp
)
969 struct gnttab_copy
*gop
= *gopp
;
970 u16 pending_idx
= *((u16
*)skb
->data
);
971 struct pending_tx_info
*pending_tx_info
= netbk
->pending_tx_info
;
972 struct xenvif
*vif
= pending_tx_info
[pending_idx
].vif
;
973 struct xen_netif_tx_request
*txp
;
974 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
975 int nr_frags
= shinfo
->nr_frags
;
978 /* Check status of header. */
981 pending_ring_idx_t index
;
982 index
= pending_index(netbk
->pending_prod
++);
983 txp
= &pending_tx_info
[pending_idx
].req
;
984 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
985 netbk
->pending_ring
[index
] = pending_idx
;
989 /* Skip first skb fragment if it is on same page as header fragment. */
990 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
992 for (i
= start
; i
< nr_frags
; i
++) {
994 pending_ring_idx_t index
;
996 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
998 /* Check error status: if okay then remember grant handle. */
999 newerr
= (++gop
)->status
;
1000 if (likely(!newerr
)) {
1001 /* Had a previous error? Invalidate this fragment. */
1003 xen_netbk_idx_release(netbk
, pending_idx
);
1007 /* Error on this fragment: respond to client with an error. */
1008 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1009 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
1010 index
= pending_index(netbk
->pending_prod
++);
1011 netbk
->pending_ring
[index
] = pending_idx
;
1014 /* Not the first error? Preceding frags already invalidated. */
1018 /* First error: invalidate header and preceding fragments. */
1019 pending_idx
= *((u16
*)skb
->data
);
1020 xen_netbk_idx_release(netbk
, pending_idx
);
1021 for (j
= start
; j
< i
; j
++) {
1022 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
1023 xen_netbk_idx_release(netbk
, pending_idx
);
1026 /* Remember the error: invalidate all subsequent fragments. */
1034 static void xen_netbk_fill_frags(struct xen_netbk
*netbk
, struct sk_buff
*skb
)
1036 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1037 int nr_frags
= shinfo
->nr_frags
;
1040 for (i
= 0; i
< nr_frags
; i
++) {
1041 skb_frag_t
*frag
= shinfo
->frags
+ i
;
1042 struct xen_netif_tx_request
*txp
;
1046 pending_idx
= frag_get_pending_idx(frag
);
1048 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1049 page
= virt_to_page(idx_to_kaddr(netbk
, pending_idx
));
1050 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
1051 skb
->len
+= txp
->size
;
1052 skb
->data_len
+= txp
->size
;
1053 skb
->truesize
+= txp
->size
;
1055 /* Take an extra reference to offset xen_netbk_idx_release */
1056 get_page(netbk
->mmap_pages
[pending_idx
]);
1057 xen_netbk_idx_release(netbk
, pending_idx
);
1061 static int xen_netbk_get_extras(struct xenvif
*vif
,
1062 struct xen_netif_extra_info
*extras
,
1065 struct xen_netif_extra_info extra
;
1066 RING_IDX cons
= vif
->tx
.req_cons
;
1069 if (unlikely(work_to_do
-- <= 0)) {
1070 netdev_dbg(vif
->dev
, "Missing extra info\n");
1074 memcpy(&extra
, RING_GET_REQUEST(&vif
->tx
, cons
),
1076 if (unlikely(!extra
.type
||
1077 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1078 vif
->tx
.req_cons
= ++cons
;
1079 netdev_dbg(vif
->dev
,
1080 "Invalid extra type: %d\n", extra
.type
);
1084 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1085 vif
->tx
.req_cons
= ++cons
;
1086 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1091 static int netbk_set_skb_gso(struct xenvif
*vif
,
1092 struct sk_buff
*skb
,
1093 struct xen_netif_extra_info
*gso
)
1095 if (!gso
->u
.gso
.size
) {
1096 netdev_dbg(vif
->dev
, "GSO size must not be zero.\n");
1100 /* Currently only TCPv4 S.O. is supported. */
1101 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
) {
1102 netdev_dbg(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1106 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1107 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1109 /* Header must be checked, and gso_segs computed. */
1110 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1111 skb_shinfo(skb
)->gso_segs
= 0;
1116 static int checksum_setup(struct xenvif
*vif
, struct sk_buff
*skb
)
1121 int recalculate_partial_csum
= 0;
1124 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1125 * peers can fail to set NETRXF_csum_blank when sending a GSO
1126 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1127 * recalculate the partial checksum.
1129 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1130 vif
->rx_gso_checksum_fixup
++;
1131 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1132 recalculate_partial_csum
= 1;
1135 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1136 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1139 if (skb
->protocol
!= htons(ETH_P_IP
))
1142 iph
= (void *)skb
->data
;
1143 th
= skb
->data
+ 4 * iph
->ihl
;
1144 if (th
>= skb_tail_pointer(skb
))
1147 skb
->csum_start
= th
- skb
->head
;
1148 switch (iph
->protocol
) {
1150 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
1152 if (recalculate_partial_csum
) {
1153 struct tcphdr
*tcph
= (struct tcphdr
*)th
;
1154 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1155 skb
->len
- iph
->ihl
*4,
1160 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1162 if (recalculate_partial_csum
) {
1163 struct udphdr
*udph
= (struct udphdr
*)th
;
1164 udph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1165 skb
->len
- iph
->ihl
*4,
1170 if (net_ratelimit())
1171 netdev_err(vif
->dev
,
1172 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1177 if ((th
+ skb
->csum_offset
+ 2) > skb_tail_pointer(skb
))
1186 static bool tx_credit_exceeded(struct xenvif
*vif
, unsigned size
)
1188 unsigned long now
= jiffies
;
1189 unsigned long next_credit
=
1190 vif
->credit_timeout
.expires
+
1191 msecs_to_jiffies(vif
->credit_usec
/ 1000);
1193 /* Timer could already be pending in rare cases. */
1194 if (timer_pending(&vif
->credit_timeout
))
1197 /* Passed the point where we can replenish credit? */
1198 if (time_after_eq(now
, next_credit
)) {
1199 vif
->credit_timeout
.expires
= now
;
1203 /* Still too big to send right now? Set a callback. */
1204 if (size
> vif
->remaining_credit
) {
1205 vif
->credit_timeout
.data
=
1207 vif
->credit_timeout
.function
=
1209 mod_timer(&vif
->credit_timeout
,
1218 static unsigned xen_netbk_tx_build_gops(struct xen_netbk
*netbk
)
1220 struct gnttab_copy
*gop
= netbk
->tx_copy_ops
, *request_gop
;
1221 struct sk_buff
*skb
;
1224 while (((nr_pending_reqs(netbk
) + MAX_SKB_FRAGS
) < MAX_PENDING_REQS
) &&
1225 !list_empty(&netbk
->net_schedule_list
)) {
1227 struct xen_netif_tx_request txreq
;
1228 struct xen_netif_tx_request txfrags
[MAX_SKB_FRAGS
];
1230 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1234 unsigned int data_len
;
1235 pending_ring_idx_t index
;
1237 /* Get a netif from the list with work to do. */
1238 vif
= poll_net_schedule_list(netbk
);
1242 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, work_to_do
);
1248 idx
= vif
->tx
.req_cons
;
1249 rmb(); /* Ensure that we see the request before we copy it. */
1250 memcpy(&txreq
, RING_GET_REQUEST(&vif
->tx
, idx
), sizeof(txreq
));
1252 /* Credit-based scheduling. */
1253 if (txreq
.size
> vif
->remaining_credit
&&
1254 tx_credit_exceeded(vif
, txreq
.size
)) {
1259 vif
->remaining_credit
-= txreq
.size
;
1262 vif
->tx
.req_cons
= ++idx
;
1264 memset(extras
, 0, sizeof(extras
));
1265 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1266 work_to_do
= xen_netbk_get_extras(vif
, extras
,
1268 idx
= vif
->tx
.req_cons
;
1269 if (unlikely(work_to_do
< 0)) {
1270 netbk_tx_err(vif
, &txreq
, idx
);
1275 ret
= netbk_count_requests(vif
, &txreq
, txfrags
, work_to_do
);
1276 if (unlikely(ret
< 0)) {
1277 netbk_tx_err(vif
, &txreq
, idx
- ret
);
1282 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1283 netdev_dbg(vif
->dev
,
1284 "Bad packet size: %d\n", txreq
.size
);
1285 netbk_tx_err(vif
, &txreq
, idx
);
1289 /* No crossing a page as the payload mustn't fragment. */
1290 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1291 netdev_dbg(vif
->dev
,
1292 "txreq.offset: %x, size: %u, end: %lu\n",
1293 txreq
.offset
, txreq
.size
,
1294 (txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1295 netbk_tx_err(vif
, &txreq
, idx
);
1299 index
= pending_index(netbk
->pending_cons
);
1300 pending_idx
= netbk
->pending_ring
[index
];
1302 data_len
= (txreq
.size
> PKT_PROT_LEN
&&
1303 ret
< MAX_SKB_FRAGS
) ?
1304 PKT_PROT_LEN
: txreq
.size
;
1306 skb
= alloc_skb(data_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
1307 GFP_ATOMIC
| __GFP_NOWARN
);
1308 if (unlikely(skb
== NULL
)) {
1309 netdev_dbg(vif
->dev
,
1310 "Can't allocate a skb in start_xmit.\n");
1311 netbk_tx_err(vif
, &txreq
, idx
);
1315 /* Packets passed to netif_rx() must have some headroom. */
1316 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1318 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1319 struct xen_netif_extra_info
*gso
;
1320 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1322 if (netbk_set_skb_gso(vif
, skb
, gso
)) {
1324 netbk_tx_err(vif
, &txreq
, idx
);
1329 /* XXX could copy straight to head */
1330 page
= xen_netbk_alloc_page(netbk
, skb
, pending_idx
);
1333 netbk_tx_err(vif
, &txreq
, idx
);
1337 gop
->source
.u
.ref
= txreq
.gref
;
1338 gop
->source
.domid
= vif
->domid
;
1339 gop
->source
.offset
= txreq
.offset
;
1341 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
1342 gop
->dest
.domid
= DOMID_SELF
;
1343 gop
->dest
.offset
= txreq
.offset
;
1345 gop
->len
= txreq
.size
;
1346 gop
->flags
= GNTCOPY_source_gref
;
1350 memcpy(&netbk
->pending_tx_info
[pending_idx
].req
,
1351 &txreq
, sizeof(txreq
));
1352 netbk
->pending_tx_info
[pending_idx
].vif
= vif
;
1353 *((u16
*)skb
->data
) = pending_idx
;
1355 __skb_put(skb
, data_len
);
1357 skb_shinfo(skb
)->nr_frags
= ret
;
1358 if (data_len
< txreq
.size
) {
1359 skb_shinfo(skb
)->nr_frags
++;
1360 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1363 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1364 INVALID_PENDING_IDX
);
1367 __skb_queue_tail(&netbk
->tx_queue
, skb
);
1369 netbk
->pending_cons
++;
1371 request_gop
= xen_netbk_get_requests(netbk
, vif
,
1373 if (request_gop
== NULL
) {
1375 netbk_tx_err(vif
, &txreq
, idx
);
1380 vif
->tx
.req_cons
= idx
;
1381 xen_netbk_check_rx_xenvif(vif
);
1383 if ((gop
-netbk
->tx_copy_ops
) >= ARRAY_SIZE(netbk
->tx_copy_ops
))
1387 return gop
- netbk
->tx_copy_ops
;
1390 static void xen_netbk_tx_submit(struct xen_netbk
*netbk
)
1392 struct gnttab_copy
*gop
= netbk
->tx_copy_ops
;
1393 struct sk_buff
*skb
;
1395 while ((skb
= __skb_dequeue(&netbk
->tx_queue
)) != NULL
) {
1396 struct xen_netif_tx_request
*txp
;
1401 pending_idx
= *((u16
*)skb
->data
);
1402 vif
= netbk
->pending_tx_info
[pending_idx
].vif
;
1403 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1405 /* Check the remap error code. */
1406 if (unlikely(xen_netbk_tx_check_gop(netbk
, skb
, &gop
))) {
1407 netdev_dbg(vif
->dev
, "netback grant failed.\n");
1408 skb_shinfo(skb
)->nr_frags
= 0;
1413 data_len
= skb
->len
;
1415 (void *)(idx_to_kaddr(netbk
, pending_idx
)|txp
->offset
),
1417 if (data_len
< txp
->size
) {
1418 /* Append the packet payload as a fragment. */
1419 txp
->offset
+= data_len
;
1420 txp
->size
-= data_len
;
1422 /* Schedule a response immediately. */
1423 xen_netbk_idx_release(netbk
, pending_idx
);
1426 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1427 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1428 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1429 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1431 xen_netbk_fill_frags(netbk
, skb
);
1434 * If the initial fragment was < PKT_PROT_LEN then
1435 * pull through some bytes from the other fragments to
1436 * increase the linear region to PKT_PROT_LEN bytes.
1438 if (skb_headlen(skb
) < PKT_PROT_LEN
&& skb_is_nonlinear(skb
)) {
1439 int target
= min_t(int, skb
->len
, PKT_PROT_LEN
);
1440 __pskb_pull_tail(skb
, target
- skb_headlen(skb
));
1443 skb
->dev
= vif
->dev
;
1444 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1446 if (checksum_setup(vif
, skb
)) {
1447 netdev_dbg(vif
->dev
,
1448 "Can't setup checksum in net_tx_action\n");
1453 vif
->dev
->stats
.rx_bytes
+= skb
->len
;
1454 vif
->dev
->stats
.rx_packets
++;
1456 xenvif_receive_skb(vif
, skb
);
1460 /* Called after netfront has transmitted */
1461 static void xen_netbk_tx_action(struct xen_netbk
*netbk
)
1466 nr_gops
= xen_netbk_tx_build_gops(netbk
);
1470 ret
= HYPERVISOR_grant_table_op(GNTTABOP_copy
,
1471 netbk
->tx_copy_ops
, nr_gops
);
1474 xen_netbk_tx_submit(netbk
);
1478 static void xen_netbk_idx_release(struct xen_netbk
*netbk
, u16 pending_idx
)
1481 struct pending_tx_info
*pending_tx_info
;
1482 pending_ring_idx_t index
;
1484 /* Already complete? */
1485 if (netbk
->mmap_pages
[pending_idx
] == NULL
)
1488 pending_tx_info
= &netbk
->pending_tx_info
[pending_idx
];
1490 vif
= pending_tx_info
->vif
;
1492 make_tx_response(vif
, &pending_tx_info
->req
, XEN_NETIF_RSP_OKAY
);
1494 index
= pending_index(netbk
->pending_prod
++);
1495 netbk
->pending_ring
[index
] = pending_idx
;
1499 netbk
->mmap_pages
[pending_idx
]->mapping
= 0;
1500 put_page(netbk
->mmap_pages
[pending_idx
]);
1501 netbk
->mmap_pages
[pending_idx
] = NULL
;
1504 static void make_tx_response(struct xenvif
*vif
,
1505 struct xen_netif_tx_request
*txp
,
1508 RING_IDX i
= vif
->tx
.rsp_prod_pvt
;
1509 struct xen_netif_tx_response
*resp
;
1512 resp
= RING_GET_RESPONSE(&vif
->tx
, i
);
1516 if (txp
->flags
& XEN_NETTXF_extra_info
)
1517 RING_GET_RESPONSE(&vif
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1519 vif
->tx
.rsp_prod_pvt
= ++i
;
1520 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->tx
, notify
);
1522 notify_remote_via_irq(vif
->irq
);
1525 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
1532 RING_IDX i
= vif
->rx
.rsp_prod_pvt
;
1533 struct xen_netif_rx_response
*resp
;
1535 resp
= RING_GET_RESPONSE(&vif
->rx
, i
);
1536 resp
->offset
= offset
;
1537 resp
->flags
= flags
;
1539 resp
->status
= (s16
)size
;
1541 resp
->status
= (s16
)st
;
1543 vif
->rx
.rsp_prod_pvt
= ++i
;
1548 static inline int rx_work_todo(struct xen_netbk
*netbk
)
1550 return !skb_queue_empty(&netbk
->rx_queue
);
1553 static inline int tx_work_todo(struct xen_netbk
*netbk
)
1556 if (((nr_pending_reqs(netbk
) + MAX_SKB_FRAGS
) < MAX_PENDING_REQS
) &&
1557 !list_empty(&netbk
->net_schedule_list
))
1563 static int xen_netbk_kthread(void *data
)
1565 struct xen_netbk
*netbk
= data
;
1566 while (!kthread_should_stop()) {
1567 wait_event_interruptible(netbk
->wq
,
1568 rx_work_todo(netbk
) ||
1569 tx_work_todo(netbk
) ||
1570 kthread_should_stop());
1573 if (kthread_should_stop())
1576 if (rx_work_todo(netbk
))
1577 xen_netbk_rx_action(netbk
);
1579 if (tx_work_todo(netbk
))
1580 xen_netbk_tx_action(netbk
);
1586 void xen_netbk_unmap_frontend_rings(struct xenvif
*vif
)
1589 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1592 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1596 int xen_netbk_map_frontend_rings(struct xenvif
*vif
,
1597 grant_ref_t tx_ring_ref
,
1598 grant_ref_t rx_ring_ref
)
1601 struct xen_netif_tx_sring
*txs
;
1602 struct xen_netif_rx_sring
*rxs
;
1606 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1607 tx_ring_ref
, &addr
);
1611 txs
= (struct xen_netif_tx_sring
*)addr
;
1612 BACK_RING_INIT(&vif
->tx
, txs
, PAGE_SIZE
);
1614 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1615 rx_ring_ref
, &addr
);
1619 rxs
= (struct xen_netif_rx_sring
*)addr
;
1620 BACK_RING_INIT(&vif
->rx
, rxs
, PAGE_SIZE
);
1622 vif
->rx_req_cons_peek
= 0;
1627 xen_netbk_unmap_frontend_rings(vif
);
1631 static int __init
netback_init(void)
1640 xen_netbk_group_nr
= num_online_cpus();
1641 xen_netbk
= vzalloc(sizeof(struct xen_netbk
) * xen_netbk_group_nr
);
1643 printk(KERN_ALERT
"%s: out of memory\n", __func__
);
1647 for (group
= 0; group
< xen_netbk_group_nr
; group
++) {
1648 struct xen_netbk
*netbk
= &xen_netbk
[group
];
1649 skb_queue_head_init(&netbk
->rx_queue
);
1650 skb_queue_head_init(&netbk
->tx_queue
);
1652 init_timer(&netbk
->net_timer
);
1653 netbk
->net_timer
.data
= (unsigned long)netbk
;
1654 netbk
->net_timer
.function
= xen_netbk_alarm
;
1656 netbk
->pending_cons
= 0;
1657 netbk
->pending_prod
= MAX_PENDING_REQS
;
1658 for (i
= 0; i
< MAX_PENDING_REQS
; i
++)
1659 netbk
->pending_ring
[i
] = i
;
1661 init_waitqueue_head(&netbk
->wq
);
1662 netbk
->task
= kthread_create(xen_netbk_kthread
,
1664 "netback/%u", group
);
1666 if (IS_ERR(netbk
->task
)) {
1667 printk(KERN_ALERT
"kthread_create() fails at netback\n");
1668 del_timer(&netbk
->net_timer
);
1669 rc
= PTR_ERR(netbk
->task
);
1673 kthread_bind(netbk
->task
, group
);
1675 INIT_LIST_HEAD(&netbk
->net_schedule_list
);
1677 spin_lock_init(&netbk
->net_schedule_list_lock
);
1679 atomic_set(&netbk
->netfront_count
, 0);
1681 wake_up_process(netbk
->task
);
1684 rc
= xenvif_xenbus_init();
1691 while (--group
>= 0) {
1692 struct xen_netbk
*netbk
= &xen_netbk
[group
];
1693 for (i
= 0; i
< MAX_PENDING_REQS
; i
++) {
1694 if (netbk
->mmap_pages
[i
])
1695 __free_page(netbk
->mmap_pages
[i
]);
1697 del_timer(&netbk
->net_timer
);
1698 kthread_stop(netbk
->task
);
1705 module_init(netback_init
);
1707 MODULE_LICENSE("Dual BSD/GPL");
1708 MODULE_ALIAS("xen-backend:vif");