2 * Back-end of the driver for virtual network devices. This portion of the
3 * driver exports a 'unified' network-device interface that can be accessed
4 * by any operating system that implements a compatible front end. A
5 * reference front-end implementation can be found in:
6 * drivers/net/xen-netfront.c
8 * Copyright (c) 2002-2005, K A Fraser
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41 #include <linux/skbuff_ref.h>
46 #include <xen/events.h>
47 #include <xen/interface/memory.h>
50 #include <asm/xen/hypercall.h>
52 /* Provide an option to disable split event channels at load time as
53 * event channels are limited resource. Split event channels are
56 bool separate_tx_rx_irq
= true;
57 module_param(separate_tx_rx_irq
, bool, 0644);
59 /* The time that packets can stay on the guest Rx internal queue
60 * before they are dropped.
62 unsigned int rx_drain_timeout_msecs
= 10000;
63 module_param(rx_drain_timeout_msecs
, uint
, 0444);
65 /* The length of time before the frontend is considered unresponsive
66 * because it isn't providing Rx slots.
68 unsigned int rx_stall_timeout_msecs
= 60000;
69 module_param(rx_stall_timeout_msecs
, uint
, 0444);
71 #define MAX_QUEUES_DEFAULT 8
72 unsigned int xenvif_max_queues
;
73 module_param_named(max_queues
, xenvif_max_queues
, uint
, 0644);
74 MODULE_PARM_DESC(max_queues
,
75 "Maximum number of queues per virtual interface");
78 * This is the maximum slots a skb can have. If a guest sends a skb
79 * which exceeds this limit it is considered malicious.
81 #define FATAL_SKB_SLOTS_DEFAULT 20
82 static unsigned int fatal_skb_slots
= FATAL_SKB_SLOTS_DEFAULT
;
83 module_param(fatal_skb_slots
, uint
, 0444);
85 /* The amount to copy out of the first guest Tx slot into the skb's
86 * linear area. If the first slot has more data, it will be mapped
87 * and put into the first frag.
89 * This is sized to avoid pulling headers from the frags for most
92 #define XEN_NETBACK_TX_COPY_LEN 128
94 /* This is the maximum number of flows in the hash cache. */
95 #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
96 unsigned int xenvif_hash_cache_size
= XENVIF_HASH_CACHE_SIZE_DEFAULT
;
97 module_param_named(hash_cache_size
, xenvif_hash_cache_size
, uint
, 0644);
98 MODULE_PARM_DESC(hash_cache_size
, "Number of flows in the hash cache");
100 /* The module parameter tells that we have to put data
101 * for xen-netfront with the XDP_PACKET_HEADROOM offset
102 * needed for XDP processing
104 bool provides_xdp_headroom
= true;
105 module_param(provides_xdp_headroom
, bool, 0644);
107 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
110 static void make_tx_response(struct xenvif_queue
*queue
,
111 const struct xen_netif_tx_request
*txp
,
112 unsigned int extra_count
,
115 static void xenvif_idx_unmap(struct xenvif_queue
*queue
, u16 pending_idx
);
117 static inline int tx_work_todo(struct xenvif_queue
*queue
);
119 static inline unsigned long idx_to_pfn(struct xenvif_queue
*queue
,
122 return page_to_pfn(queue
->mmap_pages
[idx
]);
125 static inline unsigned long idx_to_kaddr(struct xenvif_queue
*queue
,
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue
, idx
));
131 #define callback_param(vif, pending_idx) \
132 (vif->pending_tx_info[pending_idx].callback_struct)
134 /* Find the containing VIF's structure from a pointer in pending_tx_info array
136 static inline struct xenvif_queue
*ubuf_to_queue(const struct ubuf_info_msgzc
*ubuf
)
138 u16 pending_idx
= ubuf
->desc
;
139 struct pending_tx_info
*temp
=
140 container_of(ubuf
, struct pending_tx_info
, callback_struct
);
141 return container_of(temp
- pending_idx
,
146 static u16
frag_get_pending_idx(skb_frag_t
*frag
)
148 return (u16
)skb_frag_off(frag
);
151 static void frag_set_pending_idx(skb_frag_t
*frag
, u16 pending_idx
)
153 skb_frag_off_set(frag
, pending_idx
);
156 static inline pending_ring_idx_t
pending_index(unsigned i
)
158 return i
& (MAX_PENDING_REQS
-1);
161 void xenvif_kick_thread(struct xenvif_queue
*queue
)
166 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue
*queue
)
170 RING_FINAL_CHECK_FOR_REQUESTS(&queue
->tx
, more_to_do
);
173 napi_schedule(&queue
->napi
);
174 else if (atomic_fetch_andnot(NETBK_TX_EOI
| NETBK_COMMON_EOI
,
175 &queue
->eoi_pending
) &
176 (NETBK_TX_EOI
| NETBK_COMMON_EOI
))
177 xen_irq_lateeoi(queue
->tx_irq
, 0);
180 static void tx_add_credit(struct xenvif_queue
*queue
)
182 unsigned long max_burst
, max_credit
;
185 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
186 * Otherwise the interface can seize up due to insufficient credit.
188 max_burst
= max(131072UL, queue
->credit_bytes
);
190 /* Take care that adding a new chunk of credit doesn't wrap to zero. */
191 max_credit
= queue
->remaining_credit
+ queue
->credit_bytes
;
192 if (max_credit
< queue
->remaining_credit
)
193 max_credit
= ULONG_MAX
; /* wrapped: clamp to ULONG_MAX */
195 queue
->remaining_credit
= min(max_credit
, max_burst
);
196 queue
->rate_limited
= false;
199 void xenvif_tx_credit_callback(struct timer_list
*t
)
201 struct xenvif_queue
*queue
= from_timer(queue
, t
, credit_timeout
);
202 tx_add_credit(queue
);
203 xenvif_napi_schedule_or_enable_events(queue
);
206 static void xenvif_tx_err(struct xenvif_queue
*queue
,
207 struct xen_netif_tx_request
*txp
,
208 unsigned int extra_count
, RING_IDX end
)
210 RING_IDX cons
= queue
->tx
.req_cons
;
213 make_tx_response(queue
, txp
, extra_count
, XEN_NETIF_RSP_ERROR
);
216 RING_COPY_REQUEST(&queue
->tx
, cons
++, txp
);
217 extra_count
= 0; /* only the first frag can have extras */
219 queue
->tx
.req_cons
= cons
;
222 static void xenvif_fatal_tx_err(struct xenvif
*vif
)
224 netdev_err(vif
->dev
, "fatal error; disabling device\n");
225 vif
->disabled
= true;
226 /* Disable the vif from queue 0's kthread */
228 xenvif_kick_thread(&vif
->queues
[0]);
231 static int xenvif_count_requests(struct xenvif_queue
*queue
,
232 struct xen_netif_tx_request
*first
,
233 unsigned int extra_count
,
234 struct xen_netif_tx_request
*txp
,
237 RING_IDX cons
= queue
->tx
.req_cons
;
242 if (!(first
->flags
& XEN_NETTXF_more_data
))
246 struct xen_netif_tx_request dropped_tx
= { 0 };
248 if (slots
>= work_to_do
) {
249 netdev_err(queue
->vif
->dev
,
250 "Asked for %d slots but exceeds this limit\n",
252 xenvif_fatal_tx_err(queue
->vif
);
256 /* This guest is really using too many slots and
257 * considered malicious.
259 if (unlikely(slots
>= fatal_skb_slots
)) {
260 netdev_err(queue
->vif
->dev
,
261 "Malicious frontend using %d slots, threshold %u\n",
262 slots
, fatal_skb_slots
);
263 xenvif_fatal_tx_err(queue
->vif
);
267 /* Xen network protocol had implicit dependency on
268 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
269 * the historical MAX_SKB_FRAGS value 18 to honor the
270 * same behavior as before. Any packet using more than
271 * 18 slots but less than fatal_skb_slots slots is
274 if (!drop_err
&& slots
>= XEN_NETBK_LEGACY_SLOTS_MAX
) {
276 netdev_dbg(queue
->vif
->dev
,
277 "Too many slots (%d) exceeding limit (%d), dropping packet\n",
278 slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
285 RING_COPY_REQUEST(&queue
->tx
, cons
+ slots
, txp
);
287 /* If the guest submitted a frame >= 64 KiB then
288 * first->size overflowed and following slots will
289 * appear to be larger than the frame.
291 * This cannot be fatal error as there are buggy
292 * frontends that do this.
294 * Consume all slots and drop the packet.
296 if (!drop_err
&& txp
->size
> first
->size
) {
298 netdev_dbg(queue
->vif
->dev
,
299 "Invalid tx request, slot size %u > remaining size %u\n",
300 txp
->size
, first
->size
);
304 first
->size
-= txp
->size
;
307 if (unlikely((txp
->offset
+ txp
->size
) > XEN_PAGE_SIZE
)) {
308 netdev_err(queue
->vif
->dev
, "Cross page boundary, txp->offset: %u, size: %u\n",
309 txp
->offset
, txp
->size
);
310 xenvif_fatal_tx_err(queue
->vif
);
314 more_data
= txp
->flags
& XEN_NETTXF_more_data
;
322 xenvif_tx_err(queue
, first
, extra_count
, cons
+ slots
);
330 struct xenvif_tx_cb
{
331 u16 copy_pending_idx
[XEN_NETBK_LEGACY_SLOTS_MAX
+ 1];
336 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
337 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
338 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
340 static inline void xenvif_tx_create_map_op(struct xenvif_queue
*queue
,
342 struct xen_netif_tx_request
*txp
,
343 unsigned int extra_count
,
344 struct gnttab_map_grant_ref
*mop
)
346 queue
->pages_to_map
[mop
-queue
->tx_map_ops
] = queue
->mmap_pages
[pending_idx
];
347 gnttab_set_map_op(mop
, idx_to_kaddr(queue
, pending_idx
),
348 GNTMAP_host_map
| GNTMAP_readonly
,
349 txp
->gref
, queue
->vif
->domid
);
351 memcpy(&queue
->pending_tx_info
[pending_idx
].req
, txp
,
353 queue
->pending_tx_info
[pending_idx
].extra_count
= extra_count
;
356 static inline struct sk_buff
*xenvif_alloc_skb(unsigned int size
)
358 struct sk_buff
*skb
=
359 alloc_skb(size
+ NET_SKB_PAD
+ NET_IP_ALIGN
,
360 GFP_ATOMIC
| __GFP_NOWARN
);
362 BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb
)) > sizeof(skb
->cb
));
363 if (unlikely(skb
== NULL
))
366 /* Packets passed to netif_rx() must have some headroom. */
367 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
369 /* Initialize it here to avoid later surprises */
370 skb_shinfo(skb
)->destructor_arg
= NULL
;
375 static void xenvif_get_requests(struct xenvif_queue
*queue
,
377 struct xen_netif_tx_request
*first
,
378 struct xen_netif_tx_request
*txfrags
,
381 unsigned int frag_overflow
,
382 struct sk_buff
*nskb
,
383 unsigned int extra_count
,
384 unsigned int data_len
)
386 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
387 skb_frag_t
*frags
= shinfo
->frags
;
389 pending_ring_idx_t index
;
390 unsigned int nr_slots
;
391 struct gnttab_copy
*cop
= queue
->tx_copy_ops
+ *copy_ops
;
392 struct gnttab_map_grant_ref
*gop
= queue
->tx_map_ops
+ *map_ops
;
393 struct xen_netif_tx_request
*txp
= first
;
395 nr_slots
= shinfo
->nr_frags
+ frag_overflow
+ 1;
398 XENVIF_TX_CB(skb
)->split_mask
= 0;
400 /* Create copy ops for exactly data_len bytes into the skb head. */
401 __skb_put(skb
, data_len
);
402 while (data_len
> 0) {
403 int amount
= data_len
> txp
->size
? txp
->size
: data_len
;
406 cop
->source
.u
.ref
= txp
->gref
;
407 cop
->source
.domid
= queue
->vif
->domid
;
408 cop
->source
.offset
= txp
->offset
;
410 cop
->dest
.domid
= DOMID_SELF
;
411 cop
->dest
.offset
= (offset_in_page(skb
->data
+
413 data_len
)) & ~XEN_PAGE_MASK
;
414 cop
->dest
.u
.gmfn
= virt_to_gfn(skb
->data
+ skb_headlen(skb
)
417 /* Don't cross local page boundary! */
418 if (cop
->dest
.offset
+ amount
> XEN_PAGE_SIZE
) {
419 amount
= XEN_PAGE_SIZE
- cop
->dest
.offset
;
420 XENVIF_TX_CB(skb
)->split_mask
|= 1U << copy_count(skb
);
425 cop
->flags
= GNTCOPY_source_gref
;
427 index
= pending_index(queue
->pending_cons
);
428 pending_idx
= queue
->pending_ring
[index
];
429 callback_param(queue
, pending_idx
).ctx
= NULL
;
430 copy_pending_idx(skb
, copy_count(skb
)) = pending_idx
;
437 if (amount
== txp
->size
) {
438 /* The copy op covered the full tx_request */
440 memcpy(&queue
->pending_tx_info
[pending_idx
].req
,
442 queue
->pending_tx_info
[pending_idx
].extra_count
=
443 (txp
== first
) ? extra_count
: 0;
449 queue
->pending_cons
++;
452 /* The copy op partially covered the tx_request.
453 * The remainder will be mapped or copied in the next
456 txp
->offset
+= amount
;
461 for (shinfo
->nr_frags
= 0; nr_slots
> 0 && shinfo
->nr_frags
< MAX_SKB_FRAGS
;
463 if (unlikely(!txp
->size
)) {
464 make_tx_response(queue
, txp
, 0, XEN_NETIF_RSP_OKAY
);
469 index
= pending_index(queue
->pending_cons
++);
470 pending_idx
= queue
->pending_ring
[index
];
471 xenvif_tx_create_map_op(queue
, pending_idx
, txp
,
472 txp
== first
? extra_count
: 0, gop
);
473 frag_set_pending_idx(&frags
[shinfo
->nr_frags
], pending_idx
);
485 shinfo
= skb_shinfo(nskb
);
486 frags
= shinfo
->frags
;
488 for (shinfo
->nr_frags
= 0; shinfo
->nr_frags
< nr_slots
; ++txp
) {
489 if (unlikely(!txp
->size
)) {
490 make_tx_response(queue
, txp
, 0,
495 index
= pending_index(queue
->pending_cons
++);
496 pending_idx
= queue
->pending_ring
[index
];
497 xenvif_tx_create_map_op(queue
, pending_idx
, txp
, 0,
499 frag_set_pending_idx(&frags
[shinfo
->nr_frags
],
505 if (shinfo
->nr_frags
) {
506 skb_shinfo(skb
)->frag_list
= nskb
;
512 /* A frag_list skb was allocated but it is no longer needed
513 * because enough slots were converted to copy ops above or some
519 (*copy_ops
) = cop
- queue
->tx_copy_ops
;
520 (*map_ops
) = gop
- queue
->tx_map_ops
;
523 static inline void xenvif_grant_handle_set(struct xenvif_queue
*queue
,
525 grant_handle_t handle
)
527 if (unlikely(queue
->grant_tx_handle
[pending_idx
] !=
528 NETBACK_INVALID_HANDLE
)) {
529 netdev_err(queue
->vif
->dev
,
530 "Trying to overwrite active handle! pending_idx: 0x%x\n",
534 queue
->grant_tx_handle
[pending_idx
] = handle
;
537 static inline void xenvif_grant_handle_reset(struct xenvif_queue
*queue
,
540 if (unlikely(queue
->grant_tx_handle
[pending_idx
] ==
541 NETBACK_INVALID_HANDLE
)) {
542 netdev_err(queue
->vif
->dev
,
543 "Trying to unmap invalid handle! pending_idx: 0x%x\n",
547 queue
->grant_tx_handle
[pending_idx
] = NETBACK_INVALID_HANDLE
;
550 static int xenvif_tx_check_gop(struct xenvif_queue
*queue
,
552 struct gnttab_map_grant_ref
**gopp_map
,
553 struct gnttab_copy
**gopp_copy
)
555 struct gnttab_map_grant_ref
*gop_map
= *gopp_map
;
557 /* This always points to the shinfo of the skb being checked, which
558 * could be either the first or the one on the frag_list
560 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
561 /* If this is non-NULL, we are currently checking the frag_list skb, and
562 * this points to the shinfo of the first one
564 struct skb_shared_info
*first_shinfo
= NULL
;
565 int nr_frags
= shinfo
->nr_frags
;
566 const bool sharedslot
= nr_frags
&&
567 frag_get_pending_idx(&shinfo
->frags
[0]) ==
568 copy_pending_idx(skb
, copy_count(skb
) - 1);
571 for (i
= 0; i
< copy_count(skb
); i
++) {
574 /* Check status of header. */
575 pending_idx
= copy_pending_idx(skb
, i
);
577 newerr
= (*gopp_copy
)->status
;
579 /* Split copies need to be handled together. */
580 if (XENVIF_TX_CB(skb
)->split_mask
& (1U << i
)) {
583 newerr
= (*gopp_copy
)->status
;
585 if (likely(!newerr
)) {
586 /* The first frag might still have this slot mapped */
587 if (i
< copy_count(skb
) - 1 || !sharedslot
)
588 xenvif_idx_release(queue
, pending_idx
,
593 netdev_dbg(queue
->vif
->dev
,
594 "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
595 (*gopp_copy
)->status
,
597 (*gopp_copy
)->source
.u
.ref
);
598 /* The first frag might still have this slot mapped */
599 if (i
< copy_count(skb
) - 1 || !sharedslot
)
600 xenvif_idx_release(queue
, pending_idx
,
601 XEN_NETIF_RSP_ERROR
);
607 for (i
= 0; i
< nr_frags
; i
++, gop_map
++) {
610 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[i
]);
612 /* Check error status: if okay then remember grant handle. */
613 newerr
= gop_map
->status
;
615 if (likely(!newerr
)) {
616 xenvif_grant_handle_set(queue
,
619 /* Had a previous error? Invalidate this fragment. */
621 xenvif_idx_unmap(queue
, pending_idx
);
622 /* If the mapping of the first frag was OK, but
623 * the header's copy failed, and they are
624 * sharing a slot, send an error
626 if (i
== 0 && !first_shinfo
&& sharedslot
)
627 xenvif_idx_release(queue
, pending_idx
,
628 XEN_NETIF_RSP_ERROR
);
630 xenvif_idx_release(queue
, pending_idx
,
636 /* Error on this fragment: respond to client with an error. */
638 netdev_dbg(queue
->vif
->dev
,
639 "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
645 xenvif_idx_release(queue
, pending_idx
, XEN_NETIF_RSP_ERROR
);
647 /* Not the first error? Preceding frags already invalidated. */
651 /* Invalidate preceding fragments of this skb. */
652 for (j
= 0; j
< i
; j
++) {
653 pending_idx
= frag_get_pending_idx(&shinfo
->frags
[j
]);
654 xenvif_idx_unmap(queue
, pending_idx
);
655 xenvif_idx_release(queue
, pending_idx
,
659 /* And if we found the error while checking the frag_list, unmap
660 * the first skb's frags
663 for (j
= 0; j
< first_shinfo
->nr_frags
; j
++) {
664 pending_idx
= frag_get_pending_idx(&first_shinfo
->frags
[j
]);
665 xenvif_idx_unmap(queue
, pending_idx
);
666 xenvif_idx_release(queue
, pending_idx
,
671 /* Remember the error: invalidate all subsequent fragments. */
675 if (skb_has_frag_list(skb
) && !first_shinfo
) {
676 first_shinfo
= shinfo
;
677 shinfo
= skb_shinfo(shinfo
->frag_list
);
678 nr_frags
= shinfo
->nr_frags
;
687 static void xenvif_fill_frags(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
689 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
690 int nr_frags
= shinfo
->nr_frags
;
692 u16 prev_pending_idx
= INVALID_PENDING_IDX
;
694 for (i
= 0; i
< nr_frags
; i
++) {
695 skb_frag_t
*frag
= shinfo
->frags
+ i
;
696 struct xen_netif_tx_request
*txp
;
700 pending_idx
= frag_get_pending_idx(frag
);
702 /* If this is not the first frag, chain it to the previous*/
703 if (prev_pending_idx
== INVALID_PENDING_IDX
)
704 skb_shinfo(skb
)->destructor_arg
=
705 &callback_param(queue
, pending_idx
);
707 callback_param(queue
, prev_pending_idx
).ctx
=
708 &callback_param(queue
, pending_idx
);
710 callback_param(queue
, pending_idx
).ctx
= NULL
;
711 prev_pending_idx
= pending_idx
;
713 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
714 page
= virt_to_page((void *)idx_to_kaddr(queue
, pending_idx
));
715 __skb_fill_page_desc(skb
, i
, page
, txp
->offset
, txp
->size
);
716 skb
->len
+= txp
->size
;
717 skb
->data_len
+= txp
->size
;
718 skb
->truesize
+= txp
->size
;
720 /* Take an extra reference to offset network stack's put_page */
721 get_page(queue
->mmap_pages
[pending_idx
]);
725 static int xenvif_get_extras(struct xenvif_queue
*queue
,
726 struct xen_netif_extra_info
*extras
,
727 unsigned int *extra_count
,
730 struct xen_netif_extra_info extra
;
731 RING_IDX cons
= queue
->tx
.req_cons
;
734 if (unlikely(work_to_do
-- <= 0)) {
735 netdev_err(queue
->vif
->dev
, "Missing extra info\n");
736 xenvif_fatal_tx_err(queue
->vif
);
740 RING_COPY_REQUEST(&queue
->tx
, cons
, &extra
);
742 queue
->tx
.req_cons
= ++cons
;
745 if (unlikely(!extra
.type
||
746 extra
.type
>= XEN_NETIF_EXTRA_TYPE_MAX
)) {
747 netdev_err(queue
->vif
->dev
,
748 "Invalid extra type: %d\n", extra
.type
);
749 xenvif_fatal_tx_err(queue
->vif
);
753 memcpy(&extras
[extra
.type
- 1], &extra
, sizeof(extra
));
754 } while (extra
.flags
& XEN_NETIF_EXTRA_FLAG_MORE
);
759 static int xenvif_set_skb_gso(struct xenvif
*vif
,
761 struct xen_netif_extra_info
*gso
)
763 if (!gso
->u
.gso
.size
) {
764 netdev_err(vif
->dev
, "GSO size must not be zero.\n");
765 xenvif_fatal_tx_err(vif
);
769 switch (gso
->u
.gso
.type
) {
770 case XEN_NETIF_GSO_TYPE_TCPV4
:
771 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
773 case XEN_NETIF_GSO_TYPE_TCPV6
:
774 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
777 netdev_err(vif
->dev
, "Bad GSO type %d.\n", gso
->u
.gso
.type
);
778 xenvif_fatal_tx_err(vif
);
782 skb_shinfo(skb
)->gso_size
= gso
->u
.gso
.size
;
783 /* gso_segs will be calculated later */
788 static int checksum_setup(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
790 bool recalculate_partial_csum
= false;
792 /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
793 * peers can fail to set NETRXF_csum_blank when sending a GSO
794 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
795 * recalculate the partial checksum.
797 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
&& skb_is_gso(skb
)) {
798 queue
->stats
.rx_gso_checksum_fixup
++;
799 skb
->ip_summed
= CHECKSUM_PARTIAL
;
800 recalculate_partial_csum
= true;
803 /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
804 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
807 return skb_checksum_setup(skb
, recalculate_partial_csum
);
810 static bool tx_credit_exceeded(struct xenvif_queue
*queue
, unsigned size
)
812 u64 now
= get_jiffies_64();
813 u64 next_credit
= queue
->credit_window_start
+
814 msecs_to_jiffies(queue
->credit_usec
/ 1000);
816 /* Timer could already be pending in rare cases. */
817 if (timer_pending(&queue
->credit_timeout
)) {
818 queue
->rate_limited
= true;
822 /* Passed the point where we can replenish credit? */
823 if (time_after_eq64(now
, next_credit
)) {
824 queue
->credit_window_start
= now
;
825 tx_add_credit(queue
);
828 /* Still too big to send right now? Set a callback. */
829 if (size
> queue
->remaining_credit
) {
830 mod_timer(&queue
->credit_timeout
,
832 queue
->credit_window_start
= next_credit
;
833 queue
->rate_limited
= true;
841 /* No locking is required in xenvif_mcast_add/del() as they are
842 * only ever invoked from NAPI poll. An RCU list is used because
843 * xenvif_mcast_match() is called asynchronously, during start_xmit.
846 static int xenvif_mcast_add(struct xenvif
*vif
, const u8
*addr
)
848 struct xenvif_mcast_addr
*mcast
;
850 if (vif
->fe_mcast_count
== XEN_NETBK_MCAST_MAX
) {
853 "Too many multicast addresses\n");
857 mcast
= kzalloc(sizeof(*mcast
), GFP_ATOMIC
);
861 ether_addr_copy(mcast
->addr
, addr
);
862 list_add_tail_rcu(&mcast
->entry
, &vif
->fe_mcast_addr
);
863 vif
->fe_mcast_count
++;
868 static void xenvif_mcast_del(struct xenvif
*vif
, const u8
*addr
)
870 struct xenvif_mcast_addr
*mcast
;
872 list_for_each_entry_rcu(mcast
, &vif
->fe_mcast_addr
, entry
) {
873 if (ether_addr_equal(addr
, mcast
->addr
)) {
874 --vif
->fe_mcast_count
;
875 list_del_rcu(&mcast
->entry
);
876 kfree_rcu(mcast
, rcu
);
882 bool xenvif_mcast_match(struct xenvif
*vif
, const u8
*addr
)
884 struct xenvif_mcast_addr
*mcast
;
887 list_for_each_entry_rcu(mcast
, &vif
->fe_mcast_addr
, entry
) {
888 if (ether_addr_equal(addr
, mcast
->addr
)) {
898 void xenvif_mcast_addr_list_free(struct xenvif
*vif
)
900 /* No need for locking or RCU here. NAPI poll and TX queue
903 while (!list_empty(&vif
->fe_mcast_addr
)) {
904 struct xenvif_mcast_addr
*mcast
;
906 mcast
= list_first_entry(&vif
->fe_mcast_addr
,
907 struct xenvif_mcast_addr
,
909 --vif
->fe_mcast_count
;
910 list_del(&mcast
->entry
);
915 static void xenvif_tx_build_gops(struct xenvif_queue
*queue
,
920 struct sk_buff
*skb
, *nskb
;
922 unsigned int frag_overflow
;
924 while (skb_queue_len(&queue
->tx_queue
) < budget
) {
925 struct xen_netif_tx_request txreq
;
926 struct xen_netif_tx_request txfrags
[XEN_NETBK_LEGACY_SLOTS_MAX
];
927 struct xen_netif_extra_info extras
[XEN_NETIF_EXTRA_TYPE_MAX
-1];
928 unsigned int extra_count
;
931 unsigned int data_len
;
933 if (queue
->tx
.sring
->req_prod
- queue
->tx
.req_cons
>
934 XEN_NETIF_TX_RING_SIZE
) {
935 netdev_err(queue
->vif
->dev
,
936 "Impossible number of requests. "
937 "req_prod %d, req_cons %d, size %ld\n",
938 queue
->tx
.sring
->req_prod
, queue
->tx
.req_cons
,
939 XEN_NETIF_TX_RING_SIZE
);
940 xenvif_fatal_tx_err(queue
->vif
);
944 work_to_do
= XEN_RING_NR_UNCONSUMED_REQUESTS(&queue
->tx
);
948 idx
= queue
->tx
.req_cons
;
949 rmb(); /* Ensure that we see the request before we copy it. */
950 RING_COPY_REQUEST(&queue
->tx
, idx
, &txreq
);
952 /* Credit-based scheduling. */
953 if (txreq
.size
> queue
->remaining_credit
&&
954 tx_credit_exceeded(queue
, txreq
.size
))
957 queue
->remaining_credit
-= txreq
.size
;
960 queue
->tx
.req_cons
= ++idx
;
962 memset(extras
, 0, sizeof(extras
));
964 if (txreq
.flags
& XEN_NETTXF_extra_info
) {
965 work_to_do
= xenvif_get_extras(queue
, extras
,
968 idx
= queue
->tx
.req_cons
;
969 if (unlikely(work_to_do
< 0))
973 if (extras
[XEN_NETIF_EXTRA_TYPE_MCAST_ADD
- 1].type
) {
974 struct xen_netif_extra_info
*extra
;
976 extra
= &extras
[XEN_NETIF_EXTRA_TYPE_MCAST_ADD
- 1];
977 ret
= xenvif_mcast_add(queue
->vif
, extra
->u
.mcast
.addr
);
979 make_tx_response(queue
, &txreq
, extra_count
,
982 XEN_NETIF_RSP_ERROR
);
986 if (extras
[XEN_NETIF_EXTRA_TYPE_MCAST_DEL
- 1].type
) {
987 struct xen_netif_extra_info
*extra
;
989 extra
= &extras
[XEN_NETIF_EXTRA_TYPE_MCAST_DEL
- 1];
990 xenvif_mcast_del(queue
->vif
, extra
->u
.mcast
.addr
);
992 make_tx_response(queue
, &txreq
, extra_count
,
997 data_len
= (txreq
.size
> XEN_NETBACK_TX_COPY_LEN
) ?
998 XEN_NETBACK_TX_COPY_LEN
: txreq
.size
;
1000 ret
= xenvif_count_requests(queue
, &txreq
, extra_count
,
1001 txfrags
, work_to_do
);
1003 if (unlikely(ret
< 0))
1008 if (unlikely(txreq
.size
< ETH_HLEN
)) {
1009 netdev_dbg(queue
->vif
->dev
,
1010 "Bad packet size: %d\n", txreq
.size
);
1011 xenvif_tx_err(queue
, &txreq
, extra_count
, idx
);
1015 /* No crossing a page as the payload mustn't fragment. */
1016 if (unlikely((txreq
.offset
+ txreq
.size
) > XEN_PAGE_SIZE
)) {
1017 netdev_err(queue
->vif
->dev
, "Cross page boundary, txreq.offset: %u, size: %u\n",
1018 txreq
.offset
, txreq
.size
);
1019 xenvif_fatal_tx_err(queue
->vif
);
1023 if (ret
>= XEN_NETBK_LEGACY_SLOTS_MAX
- 1 && data_len
< txreq
.size
)
1024 data_len
= txreq
.size
;
1026 skb
= xenvif_alloc_skb(data_len
);
1027 if (unlikely(skb
== NULL
)) {
1028 netdev_dbg(queue
->vif
->dev
,
1029 "Can't allocate a skb in start_xmit.\n");
1030 xenvif_tx_err(queue
, &txreq
, extra_count
, idx
);
1034 skb_shinfo(skb
)->nr_frags
= ret
;
1035 /* At this point shinfo->nr_frags is in fact the number of
1036 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1040 if (skb_shinfo(skb
)->nr_frags
> MAX_SKB_FRAGS
) {
1041 frag_overflow
= skb_shinfo(skb
)->nr_frags
- MAX_SKB_FRAGS
;
1042 BUG_ON(frag_overflow
> MAX_SKB_FRAGS
);
1043 skb_shinfo(skb
)->nr_frags
= MAX_SKB_FRAGS
;
1044 nskb
= xenvif_alloc_skb(0);
1045 if (unlikely(nskb
== NULL
)) {
1046 skb_shinfo(skb
)->nr_frags
= 0;
1048 xenvif_tx_err(queue
, &txreq
, extra_count
, idx
);
1049 if (net_ratelimit())
1050 netdev_err(queue
->vif
->dev
,
1051 "Can't allocate the frag_list skb.\n");
1056 if (extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1].type
) {
1057 struct xen_netif_extra_info
*gso
;
1058 gso
= &extras
[XEN_NETIF_EXTRA_TYPE_GSO
- 1];
1060 if (xenvif_set_skb_gso(queue
->vif
, skb
, gso
)) {
1061 /* Failure in xenvif_set_skb_gso is fatal. */
1062 skb_shinfo(skb
)->nr_frags
= 0;
1069 if (extras
[XEN_NETIF_EXTRA_TYPE_HASH
- 1].type
) {
1070 struct xen_netif_extra_info
*extra
;
1071 enum pkt_hash_types type
= PKT_HASH_TYPE_NONE
;
1073 extra
= &extras
[XEN_NETIF_EXTRA_TYPE_HASH
- 1];
1075 switch (extra
->u
.hash
.type
) {
1076 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4
:
1077 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6
:
1078 type
= PKT_HASH_TYPE_L3
;
1081 case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
:
1082 case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
:
1083 type
= PKT_HASH_TYPE_L4
;
1090 if (type
!= PKT_HASH_TYPE_NONE
)
1092 *(u32
*)extra
->u
.hash
.value
,
1096 xenvif_get_requests(queue
, skb
, &txreq
, txfrags
, copy_ops
,
1097 map_ops
, frag_overflow
, nskb
, extra_count
,
1100 __skb_queue_tail(&queue
->tx_queue
, skb
);
1102 queue
->tx
.req_cons
= idx
;
1108 /* Consolidate skb with a frag_list into a brand new one with local pages on
1109 * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1111 static int xenvif_handle_frag_list(struct xenvif_queue
*queue
, struct sk_buff
*skb
)
1113 unsigned int offset
= skb_headlen(skb
);
1114 skb_frag_t frags
[MAX_SKB_FRAGS
];
1116 struct ubuf_info
*uarg
;
1117 struct sk_buff
*nskb
= skb_shinfo(skb
)->frag_list
;
1119 queue
->stats
.tx_zerocopy_sent
+= 2;
1120 queue
->stats
.tx_frag_overflow
++;
1122 xenvif_fill_frags(queue
, nskb
);
1123 /* Subtract frags size, we will correct it later */
1124 skb
->truesize
-= skb
->data_len
;
1125 skb
->len
+= nskb
->len
;
1126 skb
->data_len
+= nskb
->len
;
1128 /* create a brand new frags array and coalesce there */
1129 for (i
= 0; offset
< skb
->len
; i
++) {
1133 BUG_ON(i
>= MAX_SKB_FRAGS
);
1134 page
= alloc_page(GFP_ATOMIC
);
1137 skb
->truesize
+= skb
->data_len
;
1138 for (j
= 0; j
< i
; j
++)
1139 put_page(skb_frag_page(&frags
[j
]));
1143 if (offset
+ PAGE_SIZE
< skb
->len
)
1146 len
= skb
->len
- offset
;
1147 if (skb_copy_bits(skb
, offset
, page_address(page
), len
))
1151 skb_frag_fill_page_desc(&frags
[i
], page
, 0, len
);
1154 /* Release all the original (foreign) frags. */
1155 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
1156 skb_frag_unref(skb
, f
);
1157 uarg
= skb_shinfo(skb
)->destructor_arg
;
1158 /* increase inflight counter to offset decrement in callback */
1159 atomic_inc(&queue
->inflight_packets
);
1160 uarg
->ops
->complete(NULL
, uarg
, true);
1161 skb_shinfo(skb
)->destructor_arg
= NULL
;
1163 /* Fill the skb with the new (local) frags. */
1164 memcpy(skb_shinfo(skb
)->frags
, frags
, i
* sizeof(skb_frag_t
));
1165 skb_shinfo(skb
)->nr_frags
= i
;
1166 skb
->truesize
+= i
* PAGE_SIZE
;
1171 static int xenvif_tx_submit(struct xenvif_queue
*queue
)
1173 struct gnttab_map_grant_ref
*gop_map
= queue
->tx_map_ops
;
1174 struct gnttab_copy
*gop_copy
= queue
->tx_copy_ops
;
1175 struct sk_buff
*skb
;
1178 while ((skb
= __skb_dequeue(&queue
->tx_queue
)) != NULL
) {
1179 struct xen_netif_tx_request
*txp
;
1182 pending_idx
= copy_pending_idx(skb
, 0);
1183 txp
= &queue
->pending_tx_info
[pending_idx
].req
;
1185 /* Check the remap error code. */
1186 if (unlikely(xenvif_tx_check_gop(queue
, skb
, &gop_map
, &gop_copy
))) {
1187 /* If there was an error, xenvif_tx_check_gop is
1188 * expected to release all the frags which were mapped,
1189 * so kfree_skb shouldn't do it again
1191 skb_shinfo(skb
)->nr_frags
= 0;
1192 if (skb_has_frag_list(skb
)) {
1193 struct sk_buff
*nskb
=
1194 skb_shinfo(skb
)->frag_list
;
1195 skb_shinfo(nskb
)->nr_frags
= 0;
1201 if (txp
->flags
& XEN_NETTXF_csum_blank
)
1202 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1203 else if (txp
->flags
& XEN_NETTXF_data_validated
)
1204 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1206 xenvif_fill_frags(queue
, skb
);
1208 if (unlikely(skb_has_frag_list(skb
))) {
1209 struct sk_buff
*nskb
= skb_shinfo(skb
)->frag_list
;
1210 xenvif_skb_zerocopy_prepare(queue
, nskb
);
1211 if (xenvif_handle_frag_list(queue
, skb
)) {
1212 if (net_ratelimit())
1213 netdev_err(queue
->vif
->dev
,
1214 "Not enough memory to consolidate frag_list!\n");
1215 xenvif_skb_zerocopy_prepare(queue
, skb
);
1219 /* Copied all the bits from the frag list -- free it. */
1220 skb_frag_list_init(skb
);
1224 skb
->dev
= queue
->vif
->dev
;
1225 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1226 skb_reset_network_header(skb
);
1228 if (checksum_setup(queue
, skb
)) {
1229 netdev_dbg(queue
->vif
->dev
,
1230 "Can't setup checksum in net_tx_action\n");
1231 /* We have to set this flag to trigger the callback */
1232 if (skb_shinfo(skb
)->destructor_arg
)
1233 xenvif_skb_zerocopy_prepare(queue
, skb
);
1238 skb_probe_transport_header(skb
);
1240 /* If the packet is GSO then we will have just set up the
1241 * transport header offset in checksum_setup so it's now
1242 * straightforward to calculate gso_segs.
1244 if (skb_is_gso(skb
)) {
1247 /* GSO implies having the L4 header. */
1248 WARN_ON_ONCE(!skb_transport_header_was_set(skb
));
1249 if (unlikely(!skb_transport_header_was_set(skb
))) {
1254 mss
= skb_shinfo(skb
)->gso_size
;
1255 hdrlen
= skb_tcp_all_headers(skb
);
1257 skb_shinfo(skb
)->gso_segs
=
1258 DIV_ROUND_UP(skb
->len
- hdrlen
, mss
);
1261 queue
->stats
.rx_bytes
+= skb
->len
;
1262 queue
->stats
.rx_packets
++;
1266 /* Set this flag right before netif_receive_skb, otherwise
1267 * someone might think this packet already left netback, and
1268 * do a skb_copy_ubufs while we are still in control of the
1269 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1271 if (skb_shinfo(skb
)->destructor_arg
) {
1272 xenvif_skb_zerocopy_prepare(queue
, skb
);
1273 queue
->stats
.tx_zerocopy_sent
++;
1276 netif_receive_skb(skb
);
1282 static void xenvif_zerocopy_callback(struct sk_buff
*skb
,
1283 struct ubuf_info
*ubuf_base
,
1284 bool zerocopy_success
)
1286 unsigned long flags
;
1287 pending_ring_idx_t index
;
1288 struct ubuf_info_msgzc
*ubuf
= uarg_to_msgzc(ubuf_base
);
1289 struct xenvif_queue
*queue
= ubuf_to_queue(ubuf
);
1291 /* This is the only place where we grab this lock, to protect callbacks
1294 spin_lock_irqsave(&queue
->callback_lock
, flags
);
1296 u16 pending_idx
= ubuf
->desc
;
1297 ubuf
= (struct ubuf_info_msgzc
*) ubuf
->ctx
;
1298 BUG_ON(queue
->dealloc_prod
- queue
->dealloc_cons
>=
1300 index
= pending_index(queue
->dealloc_prod
);
1301 queue
->dealloc_ring
[index
] = pending_idx
;
1302 /* Sync with xenvif_tx_dealloc_action:
1303 * insert idx then incr producer.
1306 queue
->dealloc_prod
++;
1308 spin_unlock_irqrestore(&queue
->callback_lock
, flags
);
1310 if (likely(zerocopy_success
))
1311 queue
->stats
.tx_zerocopy_success
++;
1313 queue
->stats
.tx_zerocopy_fail
++;
1314 xenvif_skb_zerocopy_complete(queue
);
1317 const struct ubuf_info_ops xenvif_ubuf_ops
= {
1318 .complete
= xenvif_zerocopy_callback
,
1321 static inline void xenvif_tx_dealloc_action(struct xenvif_queue
*queue
)
1323 struct gnttab_unmap_grant_ref
*gop
;
1324 pending_ring_idx_t dc
, dp
;
1325 u16 pending_idx
, pending_idx_release
[MAX_PENDING_REQS
];
1328 dc
= queue
->dealloc_cons
;
1329 gop
= queue
->tx_unmap_ops
;
1331 /* Free up any grants we have finished using */
1333 dp
= queue
->dealloc_prod
;
1335 /* Ensure we see all indices enqueued by all
1336 * xenvif_zerocopy_callback().
1341 BUG_ON(gop
- queue
->tx_unmap_ops
>= MAX_PENDING_REQS
);
1343 queue
->dealloc_ring
[pending_index(dc
++)];
1345 pending_idx_release
[gop
- queue
->tx_unmap_ops
] =
1347 queue
->pages_to_unmap
[gop
- queue
->tx_unmap_ops
] =
1348 queue
->mmap_pages
[pending_idx
];
1349 gnttab_set_unmap_op(gop
,
1350 idx_to_kaddr(queue
, pending_idx
),
1352 queue
->grant_tx_handle
[pending_idx
]);
1353 xenvif_grant_handle_reset(queue
, pending_idx
);
1357 } while (dp
!= queue
->dealloc_prod
);
1359 queue
->dealloc_cons
= dc
;
1361 if (gop
- queue
->tx_unmap_ops
> 0) {
1363 ret
= gnttab_unmap_refs(queue
->tx_unmap_ops
,
1365 queue
->pages_to_unmap
,
1366 gop
- queue
->tx_unmap_ops
);
1368 netdev_err(queue
->vif
->dev
, "Unmap fail: nr_ops %tu ret %d\n",
1369 gop
- queue
->tx_unmap_ops
, ret
);
1370 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
) {
1371 if (gop
[i
].status
!= GNTST_okay
)
1372 netdev_err(queue
->vif
->dev
,
1373 " host_addr: 0x%llx handle: 0x%x status: %d\n",
1382 for (i
= 0; i
< gop
- queue
->tx_unmap_ops
; ++i
)
1383 xenvif_idx_release(queue
, pending_idx_release
[i
],
1384 XEN_NETIF_RSP_OKAY
);
1388 /* Called after netfront has transmitted */
1389 int xenvif_tx_action(struct xenvif_queue
*queue
, int budget
)
1391 unsigned nr_mops
= 0, nr_cops
= 0;
1394 if (unlikely(!tx_work_todo(queue
)))
1397 xenvif_tx_build_gops(queue
, budget
, &nr_cops
, &nr_mops
);
1402 gnttab_batch_copy(queue
->tx_copy_ops
, nr_cops
);
1404 ret
= gnttab_map_refs(queue
->tx_map_ops
,
1406 queue
->pages_to_map
,
1411 netdev_err(queue
->vif
->dev
, "Map fail: nr %u ret %d\n",
1413 for (i
= 0; i
< nr_mops
; ++i
)
1414 WARN_ON_ONCE(queue
->tx_map_ops
[i
].status
==
1419 work_done
= xenvif_tx_submit(queue
);
1424 static void _make_tx_response(struct xenvif_queue
*queue
,
1425 const struct xen_netif_tx_request
*txp
,
1426 unsigned int extra_count
,
1429 RING_IDX i
= queue
->tx
.rsp_prod_pvt
;
1430 struct xen_netif_tx_response
*resp
;
1432 resp
= RING_GET_RESPONSE(&queue
->tx
, i
);
1434 resp
->status
= status
;
1436 while (extra_count
-- != 0)
1437 RING_GET_RESPONSE(&queue
->tx
, ++i
)->status
= XEN_NETIF_RSP_NULL
;
1439 queue
->tx
.rsp_prod_pvt
= ++i
;
1442 static void push_tx_responses(struct xenvif_queue
*queue
)
1446 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue
->tx
, notify
);
1448 notify_remote_via_irq(queue
->tx_irq
);
1451 static void xenvif_idx_release(struct xenvif_queue
*queue
, u16 pending_idx
,
1454 struct pending_tx_info
*pending_tx_info
;
1455 pending_ring_idx_t index
;
1456 unsigned long flags
;
1458 pending_tx_info
= &queue
->pending_tx_info
[pending_idx
];
1460 spin_lock_irqsave(&queue
->response_lock
, flags
);
1462 _make_tx_response(queue
, &pending_tx_info
->req
,
1463 pending_tx_info
->extra_count
, status
);
1465 /* Release the pending index before pusing the Tx response so
1466 * its available before a new Tx request is pushed by the
1469 index
= pending_index(queue
->pending_prod
++);
1470 queue
->pending_ring
[index
] = pending_idx
;
1472 push_tx_responses(queue
);
1474 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
1477 static void make_tx_response(struct xenvif_queue
*queue
,
1478 const struct xen_netif_tx_request
*txp
,
1479 unsigned int extra_count
,
1482 unsigned long flags
;
1484 spin_lock_irqsave(&queue
->response_lock
, flags
);
1486 _make_tx_response(queue
, txp
, extra_count
, status
);
1487 push_tx_responses(queue
);
1489 spin_unlock_irqrestore(&queue
->response_lock
, flags
);
1492 static void xenvif_idx_unmap(struct xenvif_queue
*queue
, u16 pending_idx
)
1495 struct gnttab_unmap_grant_ref tx_unmap_op
;
1497 gnttab_set_unmap_op(&tx_unmap_op
,
1498 idx_to_kaddr(queue
, pending_idx
),
1500 queue
->grant_tx_handle
[pending_idx
]);
1501 xenvif_grant_handle_reset(queue
, pending_idx
);
1503 ret
= gnttab_unmap_refs(&tx_unmap_op
, NULL
,
1504 &queue
->mmap_pages
[pending_idx
], 1);
1506 netdev_err(queue
->vif
->dev
,
1507 "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1510 tx_unmap_op
.host_addr
,
1512 tx_unmap_op
.status
);
1517 static inline int tx_work_todo(struct xenvif_queue
*queue
)
1519 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue
->tx
)))
1525 static inline bool tx_dealloc_work_todo(struct xenvif_queue
*queue
)
1527 return queue
->dealloc_cons
!= queue
->dealloc_prod
;
1530 void xenvif_unmap_frontend_data_rings(struct xenvif_queue
*queue
)
1532 if (queue
->tx
.sring
)
1533 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1535 if (queue
->rx
.sring
)
1536 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue
->vif
),
1540 int xenvif_map_frontend_data_rings(struct xenvif_queue
*queue
,
1541 grant_ref_t tx_ring_ref
,
1542 grant_ref_t rx_ring_ref
)
1545 struct xen_netif_tx_sring
*txs
;
1546 struct xen_netif_rx_sring
*rxs
;
1547 RING_IDX rsp_prod
, req_prod
;
1550 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1551 &tx_ring_ref
, 1, &addr
);
1555 txs
= (struct xen_netif_tx_sring
*)addr
;
1556 rsp_prod
= READ_ONCE(txs
->rsp_prod
);
1557 req_prod
= READ_ONCE(txs
->req_prod
);
1559 BACK_RING_ATTACH(&queue
->tx
, txs
, rsp_prod
, XEN_PAGE_SIZE
);
1562 if (req_prod
- rsp_prod
> RING_SIZE(&queue
->tx
))
1565 err
= xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue
->vif
),
1566 &rx_ring_ref
, 1, &addr
);
1570 rxs
= (struct xen_netif_rx_sring
*)addr
;
1571 rsp_prod
= READ_ONCE(rxs
->rsp_prod
);
1572 req_prod
= READ_ONCE(rxs
->req_prod
);
1574 BACK_RING_ATTACH(&queue
->rx
, rxs
, rsp_prod
, XEN_PAGE_SIZE
);
1577 if (req_prod
- rsp_prod
> RING_SIZE(&queue
->rx
))
1583 xenvif_unmap_frontend_data_rings(queue
);
1587 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue
*queue
)
1589 /* Dealloc thread must remain running until all inflight
1592 return kthread_should_stop() &&
1593 !atomic_read(&queue
->inflight_packets
);
1596 int xenvif_dealloc_kthread(void *data
)
1598 struct xenvif_queue
*queue
= data
;
1601 wait_event_interruptible(queue
->dealloc_wq
,
1602 tx_dealloc_work_todo(queue
) ||
1603 xenvif_dealloc_kthread_should_stop(queue
));
1604 if (xenvif_dealloc_kthread_should_stop(queue
))
1607 xenvif_tx_dealloc_action(queue
);
1611 /* Unmap anything remaining*/
1612 if (tx_dealloc_work_todo(queue
))
1613 xenvif_tx_dealloc_action(queue
);
1618 static void make_ctrl_response(struct xenvif
*vif
,
1619 const struct xen_netif_ctrl_request
*req
,
1620 u32 status
, u32 data
)
1622 RING_IDX idx
= vif
->ctrl
.rsp_prod_pvt
;
1623 struct xen_netif_ctrl_response rsp
= {
1630 *RING_GET_RESPONSE(&vif
->ctrl
, idx
) = rsp
;
1631 vif
->ctrl
.rsp_prod_pvt
= ++idx
;
1634 static void push_ctrl_response(struct xenvif
*vif
)
1638 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif
->ctrl
, notify
);
1640 notify_remote_via_irq(vif
->ctrl_irq
);
1643 static void process_ctrl_request(struct xenvif
*vif
,
1644 const struct xen_netif_ctrl_request
*req
)
1646 u32 status
= XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED
;
1649 switch (req
->type
) {
1650 case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM
:
1651 status
= xenvif_set_hash_alg(vif
, req
->data
[0]);
1654 case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS
:
1655 status
= xenvif_get_hash_flags(vif
, &data
);
1658 case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS
:
1659 status
= xenvif_set_hash_flags(vif
, req
->data
[0]);
1662 case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY
:
1663 status
= xenvif_set_hash_key(vif
, req
->data
[0],
1667 case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE
:
1668 status
= XEN_NETIF_CTRL_STATUS_SUCCESS
;
1669 data
= XEN_NETBK_MAX_HASH_MAPPING_SIZE
;
1672 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE
:
1673 status
= xenvif_set_hash_mapping_size(vif
,
1677 case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING
:
1678 status
= xenvif_set_hash_mapping(vif
, req
->data
[0],
1687 make_ctrl_response(vif
, req
, status
, data
);
1688 push_ctrl_response(vif
);
1691 static void xenvif_ctrl_action(struct xenvif
*vif
)
1694 RING_IDX req_prod
, req_cons
;
1696 req_prod
= vif
->ctrl
.sring
->req_prod
;
1697 req_cons
= vif
->ctrl
.req_cons
;
1699 /* Make sure we can see requests before we process them. */
1702 if (req_cons
== req_prod
)
1705 while (req_cons
!= req_prod
) {
1706 struct xen_netif_ctrl_request req
;
1708 RING_COPY_REQUEST(&vif
->ctrl
, req_cons
, &req
);
1711 process_ctrl_request(vif
, &req
);
1714 vif
->ctrl
.req_cons
= req_cons
;
1715 vif
->ctrl
.sring
->req_event
= req_cons
+ 1;
1719 static bool xenvif_ctrl_work_todo(struct xenvif
*vif
)
1721 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif
->ctrl
)))
1727 irqreturn_t
xenvif_ctrl_irq_fn(int irq
, void *data
)
1729 struct xenvif
*vif
= data
;
1730 unsigned int eoi_flag
= XEN_EOI_FLAG_SPURIOUS
;
1732 while (xenvif_ctrl_work_todo(vif
)) {
1733 xenvif_ctrl_action(vif
);
1737 xen_irq_lateeoi(irq
, eoi_flag
);
1742 static int __init
netback_init(void)
1749 /* Allow as many queues as there are CPUs but max. 8 if user has not
1750 * specified a value.
1752 if (xenvif_max_queues
== 0)
1753 xenvif_max_queues
= min_t(unsigned int, MAX_QUEUES_DEFAULT
,
1756 if (fatal_skb_slots
< XEN_NETBK_LEGACY_SLOTS_MAX
) {
1757 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1758 fatal_skb_slots
, XEN_NETBK_LEGACY_SLOTS_MAX
);
1759 fatal_skb_slots
= XEN_NETBK_LEGACY_SLOTS_MAX
;
1762 rc
= xenvif_xenbus_init();
1766 #ifdef CONFIG_DEBUG_FS
1767 xen_netback_dbg_root
= debugfs_create_dir("xen-netback", NULL
);
1768 #endif /* CONFIG_DEBUG_FS */
1776 module_init(netback_init
);
1778 static void __exit
netback_fini(void)
1780 #ifdef CONFIG_DEBUG_FS
1781 debugfs_remove_recursive(xen_netback_dbg_root
);
1782 #endif /* CONFIG_DEBUG_FS */
1783 xenvif_xenbus_fini();
1785 module_exit(netback_fini
);
1787 MODULE_DESCRIPTION("Xen backend network device module");
1788 MODULE_LICENSE("Dual BSD/GPL");
1789 MODULE_ALIAS("xen-backend:vif");