2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
43 #include <xen/events.h>
44 #include <xen/interface/memory.h>
46 #include <asm/xen/hypercall.h>
47 #include <asm/xen/page.h>
50 * This is the maximum slots a skb can have. If a guest sends a skb
51 * which exceeds this limit it is considered malicious.
53 #define MAX_SKB_SLOTS_DEFAULT 20
54 static unsigned int max_skb_slots
= MAX_SKB_SLOTS_DEFAULT
;
55 module_param(max_skb_slots
, uint
, 0444);
57 typedef unsigned int pending_ring_idx_t
;
58 #define INVALID_PENDING_RING_IDX (~0U)
60 struct pending_tx_info
{
61 struct xen_netif_tx_request req
; /* coalesced tx request */
63 pending_ring_idx_t head
; /* head != INVALID_PENDING_RING_IDX
64 * if it is head of one or more tx
69 struct netbk_rx_meta
{
75 #define MAX_PENDING_REQS 256
77 /* Discriminate from any valid pending_idx value. */
78 #define INVALID_PENDING_IDX 0xFFFF
80 #define MAX_BUFFER_OFFSET PAGE_SIZE
82 /* extra field used in struct page */
85 #if BITS_PER_LONG < 64
87 #define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
88 unsigned int group
:GROUP_WIDTH
;
89 unsigned int idx
:IDX_WIDTH
;
91 unsigned int group
, idx
;
99 struct task_struct
*task
;
101 struct sk_buff_head rx_queue
;
102 struct sk_buff_head tx_queue
;
104 struct timer_list net_timer
;
106 struct page
*mmap_pages
[MAX_PENDING_REQS
];
108 pending_ring_idx_t pending_prod
;
109 pending_ring_idx_t pending_cons
;
110 struct list_head net_schedule_list
;
112 /* Protect the net_schedule_list in netif. */
113 spinlock_t net_schedule_list_lock
;
115 atomic_t netfront_count
;
117 struct pending_tx_info pending_tx_info
[MAX_PENDING_REQS
];
118 /* Coalescing tx requests before copying makes number of grant
119 * copy ops greater or equal to number of slots required. In
120 * worst case a tx request consumes 2 gnttab_copy.
122 struct gnttab_copy tx_copy_ops
[2*MAX_PENDING_REQS
];
124 u16 pending_ring
[MAX_PENDING_REQS
];
127 * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
128 * head/fragment page uses 2 copy operations because it
129 * straddles two buffers in the frontend.
131 struct gnttab_copy grant_copy_op
[2*XEN_NETIF_RX_RING_SIZE
];
132 struct netbk_rx_meta meta
[2*XEN_NETIF_RX_RING_SIZE
];
135 static struct xen_netbk
*xen_netbk
;
136 static int xen_netbk_group_nr
;
139 * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
140 * one or more merged tx requests, otherwise it is the continuation of
141 * previous tx request.
143 static inline int pending_tx_is_head(struct xen_netbk
*netbk
, RING_IDX idx
)
145 return netbk
->pending_tx_info
[idx
].head
!= INVALID_PENDING_RING_IDX
;
148 void xen_netbk_add_xenvif(struct xenvif
*vif
)
151 int min_netfront_count
;
153 struct xen_netbk
*netbk
;
155 min_netfront_count
= atomic_read(&xen_netbk
[0].netfront_count
);
156 for (i
= 0; i
< xen_netbk_group_nr
; i
++) {
157 int netfront_count
= atomic_read(&xen_netbk
[i
].netfront_count
);
158 if (netfront_count
< min_netfront_count
) {
160 min_netfront_count
= netfront_count
;
164 netbk
= &xen_netbk
[min_group
];
167 atomic_inc(&netbk
->netfront_count
);
170 void xen_netbk_remove_xenvif(struct xenvif
*vif
)
172 struct xen_netbk
*netbk
= vif
->netbk
;
174 atomic_dec(&netbk
->netfront_count
);
177 static void xen_netbk_idx_release(struct xen_netbk
*netbk
, u16 pending_idx
,
179 static void make_tx_response(struct xenvif
*vif
,
180 struct xen_netif_tx_request
*txp
,
182 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
189 static inline unsigned long idx_to_pfn(struct xen_netbk
*netbk
,
192 return page_to_pfn(netbk
->mmap_pages
[idx
]);
195 static inline unsigned long idx_to_kaddr(struct xen_netbk
*netbk
,
198 return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk
, idx
));
201 /* extra field used in struct page */
202 static inline void set_page_ext(struct page
*pg
, struct xen_netbk
*netbk
,
205 unsigned int group
= netbk
- xen_netbk
;
206 union page_ext ext
= { .e
= { .group
= group
+ 1, .idx
= idx
} };
208 BUILD_BUG_ON(sizeof(ext
) > sizeof(ext
.mapping
));
209 pg
->mapping
= ext
.mapping
;
212 static int get_page_ext(struct page
*pg
,
213 unsigned int *pgroup
, unsigned int *pidx
)
215 union page_ext ext
= { .mapping
= pg
->mapping
};
216 struct xen_netbk
*netbk
;
217 unsigned int group
, idx
;
219 group
= ext
.e
.group
- 1;
221 if (group
< 0 || group
>= xen_netbk_group_nr
)
224 netbk
= &xen_netbk
[group
];
228 if ((idx
< 0) || (idx
>= MAX_PENDING_REQS
))
231 if (netbk
->mmap_pages
[idx
] != pg
)
241 * This is the amount of packet we copy rather than map, so that the
242 * guest can't fiddle with the contents of the headers while we do
243 * packet processing on them (netfilter, routing, etc).
245 #define PKT_PROT_LEN (ETH_HLEN + \
247 sizeof(struct iphdr) + MAX_IPOPTLEN + \
248 sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
250 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
252 return (u16
)frag
->page_offset
;
255 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
257 frag
->page_offset
= pending_idx
;
260 static inline pending_ring_idx_t
pending_index(unsigned i
)
262 return i
& (MAX_PENDING_REQS
-1);
265 static inline pending_ring_idx_t
nr_pending_reqs(struct xen_netbk
*netbk
)
267 return MAX_PENDING_REQS
-
268 netbk
->pending_prod
+ netbk
->pending_cons
;
271 static void xen_netbk_kick_thread(struct xen_netbk
*netbk
)
276 static int max_required_rx_slots(struct xenvif
*vif
)
278 int max
= DIV_ROUND_UP(vif
->dev
->mtu
, PAGE_SIZE
);
280 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
281 if (vif
->can_sg
|| vif
->gso
|| vif
->gso_prefix
)
282 max
+= MAX_SKB_FRAGS
+ 1; /* extra_info + frags */
287 int xen_netbk_rx_ring_full(struct xenvif
*vif
)
289 RING_IDX peek
= vif
->rx_req_cons_peek
;
290 RING_IDX needed
= max_required_rx_slots(vif
);
292 return ((vif
->rx
.sring
->req_prod
- peek
) < needed
) ||
293 ((vif
->rx
.rsp_prod_pvt
+ XEN_NETIF_RX_RING_SIZE
- peek
) < needed
);
296 int xen_netbk_must_stop_queue(struct xenvif
*vif
)
298 if (!xen_netbk_rx_ring_full(vif
))
301 vif
->rx
.sring
->req_event
= vif
->rx_req_cons_peek
+
302 max_required_rx_slots(vif
);
303 mb(); /* request notification /then/ check the queue */
305 return xen_netbk_rx_ring_full(vif
);
309 * Returns true if we should start a new receive buffer instead of
310 * adding 'size' bytes to a buffer which currently contains 'offset'
313 static bool start_new_rx_buffer(int offset
, unsigned long size
, int head
)
315 /* simple case: we have completely filled the current buffer. */
316 if (offset
== MAX_BUFFER_OFFSET
)
320 * complex case: start a fresh buffer if the current frag
321 * would overflow the current buffer but only if:
322 * (i) this frag would fit completely in the next buffer
323 * and (ii) there is already some data in the current buffer
324 * and (iii) this is not the head buffer.
327 * - (i) stops us splitting a frag into two copies
328 * unless the frag is too large for a single buffer.
329 * - (ii) stops us from leaving a buffer pointlessly empty.
330 * - (iii) stops us leaving the first buffer
331 * empty. Strictly speaking this is already covered
332 * by (ii) but is explicitly checked because
333 * netfront relies on the first buffer being
334 * non-empty and can crash otherwise.
336 * This means we will effectively linearise small
337 * frags but do not needlessly split large buffers
338 * into multiple copies tend to give large frags their
339 * own buffers as before.
341 BUG_ON(size
> MAX_BUFFER_OFFSET
);
342 if ((offset
+ size
> MAX_BUFFER_OFFSET
) && offset
&& !head
)
349 * Figure out how many ring slots we're going to need to send @skb to
350 * the guest. This function is essentially a dry run of
351 * netbk_gop_frag_copy.
353 unsigned int xen_netbk_count_skb_slots(struct xenvif
*vif
, struct sk_buff
*skb
)
358 count
= DIV_ROUND_UP(
359 offset_in_page(skb
->data
)+skb_headlen(skb
), PAGE_SIZE
);
361 copy_off
= skb_headlen(skb
) % PAGE_SIZE
;
363 if (skb_shinfo(skb
)->gso_size
)
366 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
367 unsigned long size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
370 BUG_ON(copy_off
> MAX_BUFFER_OFFSET
);
372 if (start_new_rx_buffer(copy_off
, size
, 0)) {
378 if (copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
379 bytes
= MAX_BUFFER_OFFSET
- copy_off
;
388 struct netrx_pending_operations
{
389 unsigned copy_prod
, copy_cons
;
390 unsigned meta_prod
, meta_cons
;
391 struct gnttab_copy
*copy
;
392 struct netbk_rx_meta
*meta
;
394 grant_ref_t copy_gref
;
397 static struct netbk_rx_meta
*get_next_rx_buffer(struct xenvif
*vif
,
398 struct netrx_pending_operations
*npo
)
400 struct netbk_rx_meta
*meta
;
401 struct xen_netif_rx_request
*req
;
403 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
405 meta
= npo
->meta
+ npo
->meta_prod
++;
411 npo
->copy_gref
= req
->gref
;
417 * Set up the grant operations for this fragment. If it's a flipping
418 * interface, we also set up the unmap request from here.
420 static void netbk_gop_frag_copy(struct xenvif
*vif
, struct sk_buff
*skb
,
421 struct netrx_pending_operations
*npo
,
422 struct page
*page
, unsigned long size
,
423 unsigned long offset
, int *head
)
425 struct gnttab_copy
*copy_gop
;
426 struct netbk_rx_meta
*meta
;
428 * These variables are used iff get_page_ext returns true,
429 * in which case they are guaranteed to be initialized.
431 unsigned int uninitialized_var(group
), uninitialized_var(idx
);
432 int foreign
= get_page_ext(page
, &group
, &idx
);
435 /* Data must not cross a page boundary. */
436 BUG_ON(size
+ offset
> PAGE_SIZE
);
438 meta
= npo
->meta
+ npo
->meta_prod
- 1;
441 BUG_ON(npo
->copy_off
> MAX_BUFFER_OFFSET
);
443 if (start_new_rx_buffer(npo
->copy_off
, size
, *head
)) {
445 * Netfront requires there to be some data in the head
450 meta
= get_next_rx_buffer(vif
, npo
);
454 if (npo
->copy_off
+ bytes
> MAX_BUFFER_OFFSET
)
455 bytes
= MAX_BUFFER_OFFSET
- npo
->copy_off
;
457 copy_gop
= npo
->copy
+ npo
->copy_prod
++;
458 copy_gop
->flags
= GNTCOPY_dest_gref
;
460 struct xen_netbk
*netbk
= &xen_netbk
[group
];
461 struct pending_tx_info
*src_pend
;
463 src_pend
= &netbk
->pending_tx_info
[idx
];
465 copy_gop
->source
.domid
= src_pend
->vif
->domid
;
466 copy_gop
->source
.u
.ref
= src_pend
->req
.gref
;
467 copy_gop
->flags
|= GNTCOPY_source_gref
;
469 void *vaddr
= page_address(page
);
470 copy_gop
->source
.domid
= DOMID_SELF
;
471 copy_gop
->source
.u
.gmfn
= virt_to_mfn(vaddr
);
473 copy_gop
->source
.offset
= offset
;
474 copy_gop
->dest
.domid
= vif
->domid
;
476 copy_gop
->dest
.offset
= npo
->copy_off
;
477 copy_gop
->dest
.u
.ref
= npo
->copy_gref
;
478 copy_gop
->len
= bytes
;
480 npo
->copy_off
+= bytes
;
486 /* Leave a gap for the GSO descriptor. */
487 if (*head
&& skb_shinfo(skb
)->gso_size
&& !vif
->gso_prefix
)
490 *head
= 0; /* There must be something in this buffer now. */
496 * Prepare an SKB to be transmitted to the frontend.
498 * This function is responsible for allocating grant operations, meta
501 * It returns the number of meta structures consumed. The number of
502 * ring slots used is always equal to the number of meta slots used
503 * plus the number of GSO descriptors used. Currently, we use either
504 * zero GSO descriptors (for non-GSO packets) or one descriptor (for
505 * frontend-side LRO).
507 static int netbk_gop_skb(struct sk_buff
*skb
,
508 struct netrx_pending_operations
*npo
)
510 struct xenvif
*vif
= netdev_priv(skb
->dev
);
511 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
513 struct xen_netif_rx_request
*req
;
514 struct netbk_rx_meta
*meta
;
519 old_meta_prod
= npo
->meta_prod
;
521 /* Set up a GSO prefix descriptor, if necessary */
522 if (skb_shinfo(skb
)->gso_size
&& vif
->gso_prefix
) {
523 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
524 meta
= npo
->meta
+ npo
->meta_prod
++;
525 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
530 req
= RING_GET_REQUEST(&vif
->rx
, vif
->rx
.req_cons
++);
531 meta
= npo
->meta
+ npo
->meta_prod
++;
533 if (!vif
->gso_prefix
)
534 meta
->gso_size
= skb_shinfo(skb
)->gso_size
;
541 npo
->copy_gref
= req
->gref
;
544 while (data
< skb_tail_pointer(skb
)) {
545 unsigned int offset
= offset_in_page(data
);
546 unsigned int len
= PAGE_SIZE
- offset
;
548 if (data
+ len
> skb_tail_pointer(skb
))
549 len
= skb_tail_pointer(skb
) - data
;
551 netbk_gop_frag_copy(vif
, skb
, npo
,
552 virt_to_page(data
), len
, offset
, &head
);
556 for (i
= 0; i
< nr_frags
; i
++) {
557 netbk_gop_frag_copy(vif
, skb
, npo
,
558 skb_frag_page(&skb_shinfo(skb
)->frags
[i
]),
559 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
560 skb_shinfo(skb
)->frags
[i
].page_offset
,
564 return npo
->meta_prod
- old_meta_prod
;
568 * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
569 * used to set up the operations on the top of
570 * netrx_pending_operations, which have since been done. Check that
571 * they didn't give any errors and advance over them.
573 static int netbk_check_gop(struct xenvif
*vif
, int nr_meta_slots
,
574 struct netrx_pending_operations
*npo
)
576 struct gnttab_copy
*copy_op
;
577 int status
= XEN_NETIF_RSP_OKAY
;
580 for (i
= 0; i
< nr_meta_slots
; i
++) {
581 copy_op
= npo
->copy
+ npo
->copy_cons
++;
582 if (copy_op
->status
!= GNTST_okay
) {
584 "Bad status %d from copy to DOM%d.\n",
585 copy_op
->status
, vif
->domid
);
586 status
= XEN_NETIF_RSP_ERROR
;
593 static void netbk_add_frag_responses(struct xenvif
*vif
, int status
,
594 struct netbk_rx_meta
*meta
,
598 unsigned long offset
;
600 /* No fragments used */
601 if (nr_meta_slots
<= 1)
606 for (i
= 0; i
< nr_meta_slots
; i
++) {
608 if (i
== nr_meta_slots
- 1)
611 flags
= XEN_NETRXF_more_data
;
614 make_rx_response(vif
, meta
[i
].id
, status
, offset
,
615 meta
[i
].size
, flags
);
619 struct skb_cb_overlay
{
623 static void xen_netbk_rx_action(struct xen_netbk
*netbk
)
625 struct xenvif
*vif
= NULL
, *tmp
;
628 struct xen_netif_rx_response
*resp
;
629 struct sk_buff_head rxq
;
635 unsigned long offset
;
636 struct skb_cb_overlay
*sco
;
638 struct netrx_pending_operations npo
= {
639 .copy
= netbk
->grant_copy_op
,
643 skb_queue_head_init(&rxq
);
647 while ((skb
= skb_dequeue(&netbk
->rx_queue
)) != NULL
) {
648 vif
= netdev_priv(skb
->dev
);
649 nr_frags
= skb_shinfo(skb
)->nr_frags
;
651 sco
= (struct skb_cb_overlay
*)skb
->cb
;
652 sco
->meta_slots_used
= netbk_gop_skb(skb
, &npo
);
654 count
+= nr_frags
+ 1;
656 __skb_queue_tail(&rxq
, skb
);
658 /* Filled the batch queue? */
659 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */
660 if (count
+ MAX_SKB_FRAGS
>= XEN_NETIF_RX_RING_SIZE
)
664 BUG_ON(npo
.meta_prod
> ARRAY_SIZE(netbk
->meta
));
669 BUG_ON(npo
.copy_prod
> ARRAY_SIZE(netbk
->grant_copy_op
));
670 ret
= HYPERVISOR_grant_table_op(GNTTABOP_copy
, &netbk
->grant_copy_op
,
674 while ((skb
= __skb_dequeue(&rxq
)) != NULL
) {
675 sco
= (struct skb_cb_overlay
*)skb
->cb
;
677 vif
= netdev_priv(skb
->dev
);
679 if (netbk
->meta
[npo
.meta_cons
].gso_size
&& vif
->gso_prefix
) {
680 resp
= RING_GET_RESPONSE(&vif
->rx
,
681 vif
->rx
.rsp_prod_pvt
++);
683 resp
->flags
= XEN_NETRXF_gso_prefix
| XEN_NETRXF_more_data
;
685 resp
->offset
= netbk
->meta
[npo
.meta_cons
].gso_size
;
686 resp
->id
= netbk
->meta
[npo
.meta_cons
].id
;
687 resp
->status
= sco
->meta_slots_used
;
690 sco
->meta_slots_used
--;
694 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
695 vif
->dev
->stats
.tx_packets
++;
697 status
= netbk_check_gop(vif
, sco
->meta_slots_used
, &npo
);
699 if (sco
->meta_slots_used
== 1)
702 flags
= XEN_NETRXF_more_data
;
704 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) /* local packet? */
705 flags
|= XEN_NETRXF_csum_blank
| XEN_NETRXF_data_validated
;
706 else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
)
707 /* remote but checksummed. */
708 flags
|= XEN_NETRXF_data_validated
;
711 resp
= make_rx_response(vif
, netbk
->meta
[npo
.meta_cons
].id
,
713 netbk
->meta
[npo
.meta_cons
].size
,
716 if (netbk
->meta
[npo
.meta_cons
].gso_size
&& !vif
->gso_prefix
) {
717 struct xen_netif_extra_info
*gso
=
718 (struct xen_netif_extra_info
*)
719 RING_GET_RESPONSE(&vif
->rx
,
720 vif
->rx
.rsp_prod_pvt
++);
722 resp
->flags
|= XEN_NETRXF_extra_info
;
724 gso
->u
.gso
.size
= netbk
->meta
[npo
.meta_cons
].gso_size
;
725 gso
->u
.gso
.type
= XEN_NETIF_GSO_TYPE_TCPV4
;
727 gso
->u
.gso
.features
= 0;
729 gso
->type
= XEN_NETIF_EXTRA_TYPE_GSO
;
733 netbk_add_frag_responses(vif
, status
,
734 netbk
->meta
+ npo
.meta_cons
+ 1,
735 sco
->meta_slots_used
);
737 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->rx
, ret
);
739 if (ret
&& list_empty(&vif
->notify_list
))
740 list_add_tail(&vif
->notify_list
, ¬ify
);
742 xenvif_notify_tx_completion(vif
);
745 npo
.meta_cons
+= sco
->meta_slots_used
;
749 list_for_each_entry_safe(vif
, tmp
, ¬ify
, notify_list
) {
750 notify_remote_via_irq(vif
->irq
);
751 list_del_init(&vif
->notify_list
);
754 /* More work to do? */
755 if (!skb_queue_empty(&netbk
->rx_queue
) &&
756 !timer_pending(&netbk
->net_timer
))
757 xen_netbk_kick_thread(netbk
);
760 void xen_netbk_queue_tx_skb(struct xenvif
*vif
, struct sk_buff
*skb
)
762 struct xen_netbk
*netbk
= vif
->netbk
;
764 skb_queue_tail(&netbk
->rx_queue
, skb
);
766 xen_netbk_kick_thread(netbk
);
769 static void xen_netbk_alarm(unsigned long data
)
771 struct xen_netbk
*netbk
= (struct xen_netbk
*)data
;
772 xen_netbk_kick_thread(netbk
);
775 static int __on_net_schedule_list(struct xenvif
*vif
)
777 return !list_empty(&vif
->schedule_list
);
780 /* Must be called with net_schedule_list_lock held */
781 static void remove_from_net_schedule_list(struct xenvif
*vif
)
783 if (likely(__on_net_schedule_list(vif
))) {
784 list_del_init(&vif
->schedule_list
);
789 static struct xenvif
*poll_net_schedule_list(struct xen_netbk
*netbk
)
791 struct xenvif
*vif
= NULL
;
793 spin_lock_irq(&netbk
->net_schedule_list_lock
);
794 if (list_empty(&netbk
->net_schedule_list
))
797 vif
= list_first_entry(&netbk
->net_schedule_list
,
798 struct xenvif
, schedule_list
);
804 remove_from_net_schedule_list(vif
);
806 spin_unlock_irq(&netbk
->net_schedule_list_lock
);
810 void xen_netbk_schedule_xenvif(struct xenvif
*vif
)
813 struct xen_netbk
*netbk
= vif
->netbk
;
815 if (__on_net_schedule_list(vif
))
818 spin_lock_irqsave(&netbk
->net_schedule_list_lock
, flags
);
819 if (!__on_net_schedule_list(vif
) &&
820 likely(xenvif_schedulable(vif
))) {
821 list_add_tail(&vif
->schedule_list
, &netbk
->net_schedule_list
);
824 spin_unlock_irqrestore(&netbk
->net_schedule_list_lock
, flags
);
828 if ((nr_pending_reqs(netbk
) < (MAX_PENDING_REQS
/2)) &&
829 !list_empty(&netbk
->net_schedule_list
))
830 xen_netbk_kick_thread(netbk
);
833 void xen_netbk_deschedule_xenvif(struct xenvif
*vif
)
835 struct xen_netbk
*netbk
= vif
->netbk
;
836 spin_lock_irq(&netbk
->net_schedule_list_lock
);
837 remove_from_net_schedule_list(vif
);
838 spin_unlock_irq(&netbk
->net_schedule_list_lock
);
841 void xen_netbk_check_rx_xenvif(struct xenvif
*vif
)
845 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, more_to_do
);
848 xen_netbk_schedule_xenvif(vif
);
851 static void tx_add_credit(struct xenvif
*vif
)
853 unsigned long max_burst
, max_credit
;
856 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
857 * Otherwise the interface can seize up due to insufficient credit.
859 max_burst
= RING_GET_REQUEST(&vif
->tx
, vif
->tx
.req_cons
)->size
;
860 max_burst
= min(max_burst
, 131072UL);
861 max_burst
= max(max_burst
, vif
->credit_bytes
);
863 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
864 max_credit
= vif
->remaining_credit
+ vif
->credit_bytes
;
865 if (max_credit
< vif
->remaining_credit
)
866 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
868 vif
->remaining_credit
= min(max_credit
, max_burst
);
871 static void tx_credit_callback(unsigned long data
)
873 struct xenvif
*vif
= (struct xenvif
*)data
;
875 xen_netbk_check_rx_xenvif(vif
);
878 static void netbk_tx_err(struct xenvif
*vif
,
879 struct xen_netif_tx_request
*txp
, RING_IDX end
)
881 RING_IDX cons
= vif
->tx
.req_cons
;
884 make_tx_response(vif
, txp
, XEN_NETIF_RSP_ERROR
);
887 txp
= RING_GET_REQUEST(&vif
->tx
, cons
++);
889 vif
->tx
.req_cons
= cons
;
890 xen_netbk_check_rx_xenvif(vif
);
894 static void netbk_fatal_tx_err(struct xenvif
*vif
)
896 netdev_err(vif
->dev
, "fatal error; disabling device\n");
897 xenvif_carrier_off(vif
);
901 static int netbk_count_requests(struct xenvif
*vif
,
902 struct xen_netif_tx_request
*first
,
904 struct xen_netif_tx_request
*txp
,
907 RING_IDX cons
= vif
->tx
.req_cons
;
911 if (!(first
->flags
& XEN_NETTXF_more_data
))
915 if (slots
>= work_to_do
) {
917 "Asked for %d slots but exceeds this limit\n",
919 netbk_fatal_tx_err(vif
);
923 /* This guest is really using too many slots and
924 * considered malicious.
926 if (unlikely(slots
>= max_skb_slots
)) {
928 "Malicious frontend using %d slots, threshold %u\n",
929 slots
, max_skb_slots
);
930 netbk_fatal_tx_err(vif
);
934 /* Xen network protocol had implicit dependency on
935 * MAX_SKB_FRAGS. XEN_NETIF_NR_SLOTS_MIN is set to the
936 * historical MAX_SKB_FRAGS value 18 to honor the same
937 * behavior as before. Any packet using more than 18
938 * slots but less than max_skb_slots slots is dropped
940 if (!drop_err
&& slots
>= XEN_NETIF_NR_SLOTS_MIN
) {
943 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
944 slots
, XEN_NETIF_NR_SLOTS_MIN
);
948 memcpy(txp
, RING_GET_REQUEST(&vif
->tx
, cons
+ slots
),
951 /* If the guest submitted a frame >= 64 KiB then
952 * first->size overflowed and following slots will
953 * appear to be larger than the frame.
955 * This cannot be fatal error as there are buggy
956 * frontends that do this.
958 * Consume all slots and drop the packet.
960 if (!drop_err
&& txp
->size
> first
->size
) {
963 "Invalid tx request, slot size %u > remaining size %u\n",
964 txp
->size
, first
->size
);
968 first
->size
-= txp
->size
;
971 if (unlikely((txp
->offset
+ txp
->size
) > PAGE_SIZE
)) {
972 netdev_err(vif
->dev
, "Cross page boundary, txp->offset: %x, size: %u\n",
973 txp
->offset
, txp
->size
);
974 netbk_fatal_tx_err(vif
);
977 } while ((txp
++)->flags
& XEN_NETTXF_more_data
);
980 netbk_tx_err(vif
, first
, first_idx
+ slots
);
987 static struct page
*xen_netbk_alloc_page(struct xen_netbk
*netbk
,
991 page
= alloc_page(GFP_KERNEL
|__GFP_COLD
);
994 set_page_ext(page
, netbk
, pending_idx
);
995 netbk
->mmap_pages
[pending_idx
] = page
;
999 static struct gnttab_copy
*xen_netbk_get_requests(struct xen_netbk
*netbk
,
1001 struct sk_buff
*skb
,
1002 struct xen_netif_tx_request
*txp
,
1003 struct gnttab_copy
*gop
)
1005 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1006 skb_frag_t
*frags
= shinfo
->frags
;
1007 u16 pending_idx
= *((u16
*)skb
->data
);
1011 pending_ring_idx_t index
, start_idx
= 0;
1012 uint16_t dst_offset
;
1013 unsigned int nr_slots
;
1014 struct pending_tx_info
*first
= NULL
;
1016 /* At this point shinfo->nr_frags is in fact the number of
1017 * slots, which can be as large as XEN_NETIF_NR_SLOTS_MIN.
1019 nr_slots
= shinfo
->nr_frags
;
1021 /* Skip first skb fragment if it is on same page as header fragment. */
1022 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
1024 /* Coalesce tx requests, at this point the packet passed in
1025 * should be <= 64K. Any packets larger than 64K have been
1026 * handled in netbk_count_requests().
1028 for (shinfo
->nr_frags
= slot
= start
; slot
< nr_slots
;
1029 shinfo
->nr_frags
++) {
1030 struct pending_tx_info
*pending_tx_info
=
1031 netbk
->pending_tx_info
;
1033 page
= alloc_page(GFP_KERNEL
|__GFP_COLD
);
1039 while (dst_offset
< PAGE_SIZE
&& slot
< nr_slots
) {
1040 gop
->flags
= GNTCOPY_source_gref
;
1042 gop
->source
.u
.ref
= txp
->gref
;
1043 gop
->source
.domid
= vif
->domid
;
1044 gop
->source
.offset
= txp
->offset
;
1046 gop
->dest
.domid
= DOMID_SELF
;
1048 gop
->dest
.offset
= dst_offset
;
1049 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
1051 if (dst_offset
+ txp
->size
> PAGE_SIZE
) {
1052 /* This page can only merge a portion
1053 * of tx request. Do not increment any
1054 * pointer / counter here. The txp
1055 * will be dealt with in future
1056 * rounds, eventually hitting the
1059 gop
->len
= PAGE_SIZE
- dst_offset
;
1060 txp
->offset
+= gop
->len
;
1061 txp
->size
-= gop
->len
;
1062 dst_offset
+= gop
->len
; /* quit loop */
1064 /* This tx request can be merged in the page */
1065 gop
->len
= txp
->size
;
1066 dst_offset
+= gop
->len
;
1068 index
= pending_index(netbk
->pending_cons
++);
1070 pending_idx
= netbk
->pending_ring
[index
];
1072 memcpy(&pending_tx_info
[pending_idx
].req
, txp
,
1076 pending_tx_info
[pending_idx
].vif
= vif
;
1078 /* Poison these fields, corresponding
1079 * fields for head tx req will be set
1080 * to correct values after the loop.
1082 netbk
->mmap_pages
[pending_idx
] = (void *)(~0UL);
1083 pending_tx_info
[pending_idx
].head
=
1084 INVALID_PENDING_RING_IDX
;
1087 first
= &pending_tx_info
[pending_idx
];
1089 head_idx
= pending_idx
;
1099 first
->req
.offset
= 0;
1100 first
->req
.size
= dst_offset
;
1101 first
->head
= start_idx
;
1102 set_page_ext(page
, netbk
, head_idx
);
1103 netbk
->mmap_pages
[head_idx
] = page
;
1104 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], head_idx
);
1107 BUG_ON(shinfo
->nr_frags
> MAX_SKB_FRAGS
);
1111 /* Unwind, freeing all pages and sending error responses. */
1112 while (shinfo
->nr_frags
-- > start
) {
1113 xen_netbk_idx_release(netbk
,
1114 frag_get_pending_idx(&frags
[shinfo
->nr_frags
]),
1115 XEN_NETIF_RSP_ERROR
);
1117 /* The head too, if necessary. */
1119 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_ERROR
);
1124 static int xen_netbk_tx_check_gop(struct xen_netbk
*netbk
,
1125 struct sk_buff
*skb
,
1126 struct gnttab_copy
**gopp
)
1128 struct gnttab_copy
*gop
= *gopp
;
1129 u16 pending_idx
= *((u16
*)skb
->data
);
1130 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1131 struct pending_tx_info
*tx_info
;
1132 int nr_frags
= shinfo
->nr_frags
;
1134 u16 peek
; /* peek into next tx request */
1136 /* Check status of header. */
1139 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_ERROR
);
1141 /* Skip first skb fragment if it is on same page as header fragment. */
1142 start
= (frag_get_pending_idx(&shinfo
->frags
[0]) == pending_idx
);
1144 for (i
= start
; i
< nr_frags
; i
++) {
1146 pending_ring_idx_t head
;
1148 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
1149 tx_info
= &netbk
->pending_tx_info
[pending_idx
];
1150 head
= tx_info
->head
;
1152 /* Check error status: if okay then remember grant handle. */
1154 newerr
= (++gop
)->status
;
1157 peek
= netbk
->pending_ring
[pending_index(++head
)];
1158 } while (!pending_tx_is_head(netbk
, peek
));
1160 if (likely(!newerr
)) {
1161 /* Had a previous error? Invalidate this fragment. */
1163 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_OKAY
);
1167 /* Error on this fragment: respond to client with an error. */
1168 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_ERROR
);
1170 /* Not the first error? Preceding frags already invalidated. */
1174 /* First error: invalidate header and preceding fragments. */
1175 pending_idx
= *((u16
*)skb
->data
);
1176 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_OKAY
);
1177 for (j
= start
; j
< i
; j
++) {
1178 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
1179 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_OKAY
);
1182 /* Remember the error: invalidate all subsequent fragments. */
1190 static void xen_netbk_fill_frags(struct xen_netbk
*netbk
, struct sk_buff
*skb
)
1192 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
1193 int nr_frags
= shinfo
->nr_frags
;
1196 for (i
= 0; i
< nr_frags
; i
++) {
1197 skb_frag_t
*frag
= shinfo
->frags
+ i
;
1198 struct xen_netif_tx_request
*txp
;
1202 pending_idx
= frag_get_pending_idx(frag
);
1204 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1205 page
= virt_to_page(idx_to_kaddr(netbk
, pending_idx
));
1206 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
1207 skb
->len
+= txp
->size
;
1208 skb
->data_len
+= txp
->size
;
1209 skb
->truesize
+= txp
->size
;
1211 /* Take an extra reference to offset xen_netbk_idx_release */
1212 get_page(netbk
->mmap_pages
[pending_idx
]);
1213 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_OKAY
);
1217 static int xen_netbk_get_extras(struct xenvif
*vif
,
1218 struct xen_netif_extra_info
*extras
,
1221 struct xen_netif_extra_info extra
;
1222 RING_IDX cons
= vif
->tx
.req_cons
;
1225 if (unlikely(work_to_do
-- <= 0)) {
1226 netdev_err(vif
->dev
, "Missing extra info\n");
1227 netbk_fatal_tx_err(vif
);
1231 memcpy(&extra
, RING_GET_REQUEST(&vif
->tx
, cons
),
1233 if (unlikely(!extra
.type
||
1234 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
1235 vif
->tx
.req_cons
= ++cons
;
1236 netdev_err(vif
->dev
,
1237 "Invalid extra type: %d\n", extra
.type
);
1238 netbk_fatal_tx_err(vif
);
1242 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
1243 vif
->tx
.req_cons
= ++cons
;
1244 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
1249 static int netbk_set_skb_gso(struct xenvif
*vif
,
1250 struct sk_buff
*skb
,
1251 struct xen_netif_extra_info
*gso
)
1253 if (!gso
->u
.gso
.size
) {
1254 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
1255 netbk_fatal_tx_err(vif
);
1259 /* Currently only TCPv4 S.O. is supported. */
1260 if (gso
->u
.gso
.type
!= XEN_NETIF_GSO_TYPE_TCPV4
) {
1261 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
1262 netbk_fatal_tx_err(vif
);
1266 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
1267 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
1269 /* Header must be checked, and gso_segs computed. */
1270 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1271 skb_shinfo(skb
)->gso_segs
= 0;
1276 static int checksum_setup(struct xenvif
*vif
, struct sk_buff
*skb
)
1281 int recalculate_partial_csum
= 0;
1284 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1285 * peers can fail to set NETRXF_csum_blank when sending a GSO
1286 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1287 * recalculate the partial checksum.
1289 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
1290 vif
->rx_gso_checksum_fixup
++;
1291 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1292 recalculate_partial_csum
= 1;
1295 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1296 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1299 if (skb
->protocol
!= htons(ETH_P_IP
))
1302 iph
= (void *)skb
->data
;
1303 th
= skb
->data
+ 4 * iph
->ihl
;
1304 if (th
>= skb_tail_pointer(skb
))
1307 skb
->csum_start
= th
- skb
->head
;
1308 switch (iph
->protocol
) {
1310 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
1312 if (recalculate_partial_csum
) {
1313 struct tcphdr
*tcph
= (struct tcphdr
*)th
;
1314 tcph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1315 skb
->len
- iph
->ihl
*4,
1320 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1322 if (recalculate_partial_csum
) {
1323 struct udphdr
*udph
= (struct udphdr
*)th
;
1324 udph
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
1325 skb
->len
- iph
->ihl
*4,
1330 if (net_ratelimit())
1331 netdev_err(vif
->dev
,
1332 "Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
1337 if ((th
+ skb
->csum_offset
+ 2) > skb_tail_pointer(skb
))
1346 static bool tx_credit_exceeded(struct xenvif
*vif
, unsigned size
)
1348 u64 now
= get_jiffies_64();
1349 u64 next_credit
= vif
->credit_window_start
+
1350 msecs_to_jiffies(vif
->credit_usec
/ 1000);
1352 /* Timer could already be pending in rare cases. */
1353 if (timer_pending(&vif
->credit_timeout
))
1356 /* Passed the point where we can replenish credit? */
1357 if (time_after_eq64(now
, next_credit
)) {
1358 vif
->credit_window_start
= now
;
1362 /* Still too big to send right now? Set a callback. */
1363 if (size
> vif
->remaining_credit
) {
1364 vif
->credit_timeout
.data
=
1366 vif
->credit_timeout
.function
=
1368 mod_timer(&vif
->credit_timeout
,
1370 vif
->credit_window_start
= next_credit
;
1378 static unsigned xen_netbk_tx_build_gops(struct xen_netbk
*netbk
)
1380 struct gnttab_copy
*gop
= netbk
->tx_copy_ops
, *request_gop
;
1381 struct sk_buff
*skb
;
1384 while ((nr_pending_reqs(netbk
) + XEN_NETIF_NR_SLOTS_MIN
1385 < MAX_PENDING_REQS
) &&
1386 !list_empty(&netbk
->net_schedule_list
)) {
1388 struct xen_netif_tx_request txreq
;
1389 struct xen_netif_tx_request txfrags
[max_skb_slots
];
1391 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
1395 unsigned int data_len
;
1396 pending_ring_idx_t index
;
1398 /* Get a netif from the list with work to do. */
1399 vif
= poll_net_schedule_list(netbk
);
1400 /* This can sometimes happen because the test of
1401 * list_empty(net_schedule_list) at the top of the
1402 * loop is unlocked. Just go back and have another
1408 if (vif
->tx
.sring
->req_prod
- vif
->tx
.req_cons
>
1409 XEN_NETIF_TX_RING_SIZE
) {
1410 netdev_err(vif
->dev
,
1411 "Impossible number of requests. "
1412 "req_prod %d, req_cons %d, size %ld\n",
1413 vif
->tx
.sring
->req_prod
, vif
->tx
.req_cons
,
1414 XEN_NETIF_TX_RING_SIZE
);
1415 netbk_fatal_tx_err(vif
);
1419 RING_FINAL_CHECK_FOR_REQUESTS(&vif
->tx
, work_to_do
);
1425 idx
= vif
->tx
.req_cons
;
1426 rmb(); /* Ensure that we see the request before we copy it. */
1427 memcpy(&txreq
, RING_GET_REQUEST(&vif
->tx
, idx
), sizeof(txreq
));
1429 /* Credit-based scheduling. */
1430 if (txreq
.size
> vif
->remaining_credit
&&
1431 tx_credit_exceeded(vif
, txreq
.size
)) {
1436 vif
->remaining_credit
-= txreq
.size
;
1439 vif
->tx
.req_cons
= ++idx
;
1441 memset(extras
, 0, sizeof(extras
));
1442 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
1443 work_to_do
= xen_netbk_get_extras(vif
, extras
,
1445 idx
= vif
->tx
.req_cons
;
1446 if (unlikely(work_to_do
< 0))
1450 ret
= netbk_count_requests(vif
, &txreq
, idx
,
1451 txfrags
, work_to_do
);
1452 if (unlikely(ret
< 0))
1457 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1458 netdev_dbg(vif
->dev
,
1459 "Bad packet size: %d\n", txreq
.size
);
1460 netbk_tx_err(vif
, &txreq
, idx
);
1464 /* No crossing a page as the payload mustn't fragment. */
1465 if (unlikely((txreq
.offset
+ txreq
.size
) > PAGE_SIZE
)) {
1466 netdev_err(vif
->dev
,
1467 "txreq.offset: %x, size: %u, end: %lu\n",
1468 txreq
.offset
, txreq
.size
,
1469 (txreq
.offset
&~PAGE_MASK
) + txreq
.size
);
1470 netbk_fatal_tx_err(vif
);
1474 index
= pending_index(netbk
->pending_cons
);
1475 pending_idx
= netbk
->pending_ring
[index
];
1477 data_len
= (txreq
.size
> PKT_PROT_LEN
&&
1478 ret
< XEN_NETIF_NR_SLOTS_MIN
) ?
1479 PKT_PROT_LEN
: txreq
.size
;
1481 skb
= alloc_skb(data_len
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
1482 GFP_ATOMIC
| __GFP_NOWARN
);
1483 if (unlikely(skb
== NULL
)) {
1484 netdev_dbg(vif
->dev
,
1485 "Can't allocate a skb in start_xmit.\n");
1486 netbk_tx_err(vif
, &txreq
, idx
);
1490 /* Packets passed to netif_rx() must have some headroom. */
1491 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
1493 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1494 struct xen_netif_extra_info
*gso
;
1495 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1497 if (netbk_set_skb_gso(vif
, skb
, gso
)) {
1498 /* Failure in netbk_set_skb_gso is fatal. */
1504 /* XXX could copy straight to head */
1505 page
= xen_netbk_alloc_page(netbk
, pending_idx
);
1508 netbk_tx_err(vif
, &txreq
, idx
);
1512 gop
->source
.u
.ref
= txreq
.gref
;
1513 gop
->source
.domid
= vif
->domid
;
1514 gop
->source
.offset
= txreq
.offset
;
1516 gop
->dest
.u
.gmfn
= virt_to_mfn(page_address(page
));
1517 gop
->dest
.domid
= DOMID_SELF
;
1518 gop
->dest
.offset
= txreq
.offset
;
1520 gop
->len
= txreq
.size
;
1521 gop
->flags
= GNTCOPY_source_gref
;
1525 memcpy(&netbk
->pending_tx_info
[pending_idx
].req
,
1526 &txreq
, sizeof(txreq
));
1527 netbk
->pending_tx_info
[pending_idx
].vif
= vif
;
1528 netbk
->pending_tx_info
[pending_idx
].head
= index
;
1529 *((u16
*)skb
->data
) = pending_idx
;
1531 __skb_put(skb
, data_len
);
1533 skb_shinfo(skb
)->nr_frags
= ret
;
1534 if (data_len
< txreq
.size
) {
1535 skb_shinfo(skb
)->nr_frags
++;
1536 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1539 frag_set_pending_idx(&skb_shinfo(skb
)->frags
[0],
1540 INVALID_PENDING_IDX
);
1543 __skb_queue_tail(&netbk
->tx_queue
, skb
);
1545 netbk
->pending_cons
++;
1547 request_gop
= xen_netbk_get_requests(netbk
, vif
,
1549 if (request_gop
== NULL
) {
1551 netbk_tx_err(vif
, &txreq
, idx
);
1556 vif
->tx
.req_cons
= idx
;
1557 xen_netbk_check_rx_xenvif(vif
);
1559 if ((gop
-netbk
->tx_copy_ops
) >= ARRAY_SIZE(netbk
->tx_copy_ops
))
1563 return gop
- netbk
->tx_copy_ops
;
1566 static void xen_netbk_tx_submit(struct xen_netbk
*netbk
)
1568 struct gnttab_copy
*gop
= netbk
->tx_copy_ops
;
1569 struct sk_buff
*skb
;
1571 while ((skb
= __skb_dequeue(&netbk
->tx_queue
)) != NULL
) {
1572 struct xen_netif_tx_request
*txp
;
1577 pending_idx
= *((u16
*)skb
->data
);
1578 vif
= netbk
->pending_tx_info
[pending_idx
].vif
;
1579 txp
= &netbk
->pending_tx_info
[pending_idx
].req
;
1581 /* Check the remap error code. */
1582 if (unlikely(xen_netbk_tx_check_gop(netbk
, skb
, &gop
))) {
1583 netdev_dbg(vif
->dev
, "netback grant failed.\n");
1584 skb_shinfo(skb
)->nr_frags
= 0;
1589 data_len
= skb
->len
;
1591 (void *)(idx_to_kaddr(netbk
, pending_idx
)|txp
->offset
),
1593 if (data_len
< txp
->size
) {
1594 /* Append the packet payload as a fragment. */
1595 txp
->offset
+= data_len
;
1596 txp
->size
-= data_len
;
1598 /* Schedule a response immediately. */
1599 xen_netbk_idx_release(netbk
, pending_idx
, XEN_NETIF_RSP_OKAY
);
1602 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1603 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1604 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1605 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1607 xen_netbk_fill_frags(netbk
, skb
);
1610 * If the initial fragment was < PKT_PROT_LEN then
1611 * pull through some bytes from the other fragments to
1612 * increase the linear region to PKT_PROT_LEN bytes.
1614 if (skb_headlen(skb
) < PKT_PROT_LEN
&& skb_is_nonlinear(skb
)) {
1615 int target
= min_t(int, skb
->len
, PKT_PROT_LEN
);
1616 __pskb_pull_tail(skb
, target
- skb_headlen(skb
));
1619 skb
->dev
= vif
->dev
;
1620 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1622 if (checksum_setup(vif
, skb
)) {
1623 netdev_dbg(vif
->dev
,
1624 "Can't setup checksum in net_tx_action\n");
1629 vif
->dev
->stats
.rx_bytes
+= skb
->len
;
1630 vif
->dev
->stats
.rx_packets
++;
1632 xenvif_receive_skb(vif
, skb
);
1636 /* Called after netfront has transmitted */
1637 static void xen_netbk_tx_action(struct xen_netbk
*netbk
)
1642 nr_gops
= xen_netbk_tx_build_gops(netbk
);
1646 ret
= HYPERVISOR_grant_table_op(GNTTABOP_copy
,
1647 netbk
->tx_copy_ops
, nr_gops
);
1650 xen_netbk_tx_submit(netbk
);
1654 static void xen_netbk_idx_release(struct xen_netbk
*netbk
, u16 pending_idx
,
1658 struct pending_tx_info
*pending_tx_info
;
1659 pending_ring_idx_t head
;
1660 u16 peek
; /* peek into next tx request */
1662 BUG_ON(netbk
->mmap_pages
[pending_idx
] == (void *)(~0UL));
1664 /* Already complete? */
1665 if (netbk
->mmap_pages
[pending_idx
] == NULL
)
1668 pending_tx_info
= &netbk
->pending_tx_info
[pending_idx
];
1670 vif
= pending_tx_info
->vif
;
1671 head
= pending_tx_info
->head
;
1673 BUG_ON(!pending_tx_is_head(netbk
, head
));
1674 BUG_ON(netbk
->pending_ring
[pending_index(head
)] != pending_idx
);
1677 pending_ring_idx_t index
;
1678 pending_ring_idx_t idx
= pending_index(head
);
1679 u16 info_idx
= netbk
->pending_ring
[idx
];
1681 pending_tx_info
= &netbk
->pending_tx_info
[info_idx
];
1682 make_tx_response(vif
, &pending_tx_info
->req
, status
);
1684 /* Setting any number other than
1685 * INVALID_PENDING_RING_IDX indicates this slot is
1686 * starting a new packet / ending a previous packet.
1688 pending_tx_info
->head
= 0;
1690 index
= pending_index(netbk
->pending_prod
++);
1691 netbk
->pending_ring
[index
] = netbk
->pending_ring
[info_idx
];
1695 peek
= netbk
->pending_ring
[pending_index(++head
)];
1697 } while (!pending_tx_is_head(netbk
, peek
));
1699 netbk
->mmap_pages
[pending_idx
]->mapping
= 0;
1700 put_page(netbk
->mmap_pages
[pending_idx
]);
1701 netbk
->mmap_pages
[pending_idx
] = NULL
;
1705 static void make_tx_response(struct xenvif
*vif
,
1706 struct xen_netif_tx_request
*txp
,
1709 RING_IDX i
= vif
->tx
.rsp_prod_pvt
;
1710 struct xen_netif_tx_response
*resp
;
1713 resp
= RING_GET_RESPONSE(&vif
->tx
, i
);
1717 if (txp
->flags
& XEN_NETTXF_extra_info
)
1718 RING_GET_RESPONSE(&vif
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1720 vif
->tx
.rsp_prod_pvt
= ++i
;
1721 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->tx
, notify
);
1723 notify_remote_via_irq(vif
->irq
);
1726 static struct xen_netif_rx_response
*make_rx_response(struct xenvif
*vif
,
1733 RING_IDX i
= vif
->rx
.rsp_prod_pvt
;
1734 struct xen_netif_rx_response
*resp
;
1736 resp
= RING_GET_RESPONSE(&vif
->rx
, i
);
1737 resp
->offset
= offset
;
1738 resp
->flags
= flags
;
1740 resp
->status
= (s16
)size
;
1742 resp
->status
= (s16
)st
;
1744 vif
->rx
.rsp_prod_pvt
= ++i
;
1749 static inline int rx_work_todo(struct xen_netbk
*netbk
)
1751 return !skb_queue_empty(&netbk
->rx_queue
);
1754 static inline int tx_work_todo(struct xen_netbk
*netbk
)
1757 if ((nr_pending_reqs(netbk
) + XEN_NETIF_NR_SLOTS_MIN
1758 < MAX_PENDING_REQS
) &&
1759 !list_empty(&netbk
->net_schedule_list
))
1765 static int xen_netbk_kthread(void *data
)
1767 struct xen_netbk
*netbk
= data
;
1768 while (!kthread_should_stop()) {
1769 wait_event_interruptible(netbk
->wq
,
1770 rx_work_todo(netbk
) ||
1771 tx_work_todo(netbk
) ||
1772 kthread_should_stop());
1775 if (kthread_should_stop())
1778 if (rx_work_todo(netbk
))
1779 xen_netbk_rx_action(netbk
);
1781 if (tx_work_todo(netbk
))
1782 xen_netbk_tx_action(netbk
);
1788 void xen_netbk_unmap_frontend_rings(struct xenvif
*vif
)
1791 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1794 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif
),
1798 int xen_netbk_map_frontend_rings(struct xenvif
*vif
,
1799 grant_ref_t tx_ring_ref
,
1800 grant_ref_t rx_ring_ref
)
1803 struct xen_netif_tx_sring
*txs
;
1804 struct xen_netif_rx_sring
*rxs
;
1808 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1809 tx_ring_ref
, &addr
);
1813 txs
= (struct xen_netif_tx_sring
*)addr
;
1814 BACK_RING_INIT(&vif
->tx
, txs
, PAGE_SIZE
);
1816 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif
),
1817 rx_ring_ref
, &addr
);
1821 rxs
= (struct xen_netif_rx_sring
*)addr
;
1822 BACK_RING_INIT(&vif
->rx
, rxs
, PAGE_SIZE
);
1824 vif
->rx_req_cons_peek
= 0;
1829 xen_netbk_unmap_frontend_rings(vif
);
1833 static int __init
netback_init(void)
1842 if (max_skb_slots
< XEN_NETIF_NR_SLOTS_MIN
) {
1844 "xen-netback: max_skb_slots too small (%d), bump it to XEN_NETIF_NR_SLOTS_MIN (%d)\n",
1845 max_skb_slots
, XEN_NETIF_NR_SLOTS_MIN
);
1846 max_skb_slots
= XEN_NETIF_NR_SLOTS_MIN
;
1849 xen_netbk_group_nr
= num_online_cpus();
1850 xen_netbk
= vzalloc(sizeof(struct xen_netbk
) * xen_netbk_group_nr
);
1854 for (group
= 0; group
< xen_netbk_group_nr
; group
++) {
1855 struct xen_netbk
*netbk
= &xen_netbk
[group
];
1856 skb_queue_head_init(&netbk
->rx_queue
);
1857 skb_queue_head_init(&netbk
->tx_queue
);
1859 init_timer(&netbk
->net_timer
);
1860 netbk
->net_timer
.data
= (unsigned long)netbk
;
1861 netbk
->net_timer
.function
= xen_netbk_alarm
;
1863 netbk
->pending_cons
= 0;
1864 netbk
->pending_prod
= MAX_PENDING_REQS
;
1865 for (i
= 0; i
< MAX_PENDING_REQS
; i
++)
1866 netbk
->pending_ring
[i
] = i
;
1868 init_waitqueue_head(&netbk
->wq
);
1869 netbk
->task
= kthread_create(xen_netbk_kthread
,
1871 "netback/%u", group
);
1873 if (IS_ERR(netbk
->task
)) {
1874 printk(KERN_ALERT
"kthread_create() fails at netback\n");
1875 del_timer(&netbk
->net_timer
);
1876 rc
= PTR_ERR(netbk
->task
);
1880 kthread_bind(netbk
->task
, group
);
1882 INIT_LIST_HEAD(&netbk
->net_schedule_list
);
1884 spin_lock_init(&netbk
->net_schedule_list_lock
);
1886 atomic_set(&netbk
->netfront_count
, 0);
1888 wake_up_process(netbk
->task
);
1891 rc
= xenvif_xenbus_init();
1898 while (--group
>= 0) {
1899 struct xen_netbk
*netbk
= &xen_netbk
[group
];
1900 for (i
= 0; i
< MAX_PENDING_REQS
; i
++) {
1901 if (netbk
->mmap_pages
[i
])
1902 __free_page(netbk
->mmap_pages
[i
]);
1904 del_timer(&netbk
->net_timer
);
1905 kthread_stop(netbk
->task
);
1912 module_init(netback_init
);
1914 MODULE_LICENSE("Dual BSD/GPL");
1915 MODULE_ALIAS("xen-backend:vif");