2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License version 2
4 * as published by the Free Software Foundation; or, when distributed
5 * separately from the Linux kernel or incorporated into other
6 * software packages, subject to the following license:
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this source file (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use, copy, modify,
11 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12 * and to permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #ifndef __XEN_NETBACK__COMMON_H__
28 #define __XEN_NETBACK__COMMON_H__
30 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
32 #include <linux/module.h>
33 #include <linux/interrupt.h>
34 #include <linux/slab.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/wait.h>
41 #include <linux/sched.h>
43 #include <xen/interface/io/netif.h>
44 #include <xen/interface/grant_table.h>
45 #include <xen/grant_table.h>
46 #include <xen/xenbus.h>
48 #include <linux/debugfs.h>
50 typedef unsigned int pending_ring_idx_t
;
51 #define INVALID_PENDING_RING_IDX (~0U)
53 struct pending_tx_info
{
54 struct xen_netif_tx_request req
; /* tx request */
55 unsigned int extra_count
;
56 /* Callback data for released SKBs. The callback is always
57 * xenvif_zerocopy_callback, desc contains the pending_idx, which is
58 * also an index in pending_tx_info array. It is initialized in
59 * xenvif_alloc and it never changes.
60 * skb_shinfo(skb)->destructor_arg points to the first mapped slot's
61 * callback_struct in this array of struct pending_tx_info's, then ctx
62 * to the next, or NULL if there is no more slot for this skb.
63 * ubuf_to_vif is a helper which finds the struct xenvif from a pointer
66 struct ubuf_info callback_struct
;
69 #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
70 #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
72 struct xenvif_rx_meta
{
79 #define GSO_BIT(type) \
80 (1 << XEN_NETIF_GSO_TYPE_ ## type)
82 /* Discriminate from any valid pending_idx value. */
83 #define INVALID_PENDING_IDX 0xFFFF
85 #define MAX_BUFFER_OFFSET XEN_PAGE_SIZE
87 #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
89 /* The maximum number of frags is derived from the size of a grant (same
90 * as a Xen page size for now).
92 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
94 #define NETBACK_INVALID_HANDLE -1
96 /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
97 * the maximum slots a valid packet can use. Now this value is defined
98 * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
101 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
103 /* Queue name is interface name with "-qNNN" appended */
104 #define QUEUE_NAME_SIZE (IFNAMSIZ + 5)
106 /* IRQ name is queue name with "-tx" or "-rx" appended */
107 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
111 struct xenvif_stats
{
112 /* Stats fields to be updated per-queue.
113 * A subset of struct net_device_stats that contains only the
114 * fields that are updated in netback.c for each queue.
121 /* Additional stats used by xenvif */
122 unsigned long rx_gso_checksum_fixup
;
123 unsigned long tx_zerocopy_sent
;
124 unsigned long tx_zerocopy_success
;
125 unsigned long tx_zerocopy_fail
;
126 unsigned long tx_frag_overflow
;
129 #define COPY_BATCH_SIZE 64
131 struct xenvif_copy_state
{
132 struct gnttab_copy op
[COPY_BATCH_SIZE
];
133 RING_IDX idx
[COPY_BATCH_SIZE
];
135 struct sk_buff_head
*completed
;
138 struct xenvif_queue
{ /* Per-queue data for xenvif */
139 unsigned int id
; /* Queue ID, 0-based */
140 char name
[QUEUE_NAME_SIZE
]; /* DEVNAME-qN */
141 struct xenvif
*vif
; /* Parent VIF */
143 /* Use NAPI for guest TX */
144 struct napi_struct napi
;
145 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
147 /* Only used when feature-split-event-channels = 1 */
148 char tx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-tx */
149 struct xen_netif_tx_back_ring tx
;
150 struct sk_buff_head tx_queue
;
151 struct page
*mmap_pages
[MAX_PENDING_REQS
];
152 pending_ring_idx_t pending_prod
;
153 pending_ring_idx_t pending_cons
;
154 u16 pending_ring
[MAX_PENDING_REQS
];
155 struct pending_tx_info pending_tx_info
[MAX_PENDING_REQS
];
156 grant_handle_t grant_tx_handle
[MAX_PENDING_REQS
];
158 struct gnttab_copy tx_copy_ops
[MAX_PENDING_REQS
];
159 struct gnttab_map_grant_ref tx_map_ops
[MAX_PENDING_REQS
];
160 struct gnttab_unmap_grant_ref tx_unmap_ops
[MAX_PENDING_REQS
];
161 /* passed to gnttab_[un]map_refs with pages under (un)mapping */
162 struct page
*pages_to_map
[MAX_PENDING_REQS
];
163 struct page
*pages_to_unmap
[MAX_PENDING_REQS
];
165 /* This prevents zerocopy callbacks to race over dealloc_ring */
166 spinlock_t callback_lock
;
167 /* This prevents dealloc thread and NAPI instance to race over response
168 * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err
169 * it only protect response creation
171 spinlock_t response_lock
;
172 pending_ring_idx_t dealloc_prod
;
173 pending_ring_idx_t dealloc_cons
;
174 u16 dealloc_ring
[MAX_PENDING_REQS
];
175 struct task_struct
*dealloc_task
;
176 wait_queue_head_t dealloc_wq
;
177 atomic_t inflight_packets
;
179 /* Use kthread for guest RX */
180 struct task_struct
*task
;
181 wait_queue_head_t wq
;
182 /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
184 /* Only used when feature-split-event-channels = 1 */
185 char rx_irq_name
[IRQ_NAME_SIZE
]; /* DEVNAME-qN-rx */
186 struct xen_netif_rx_back_ring rx
;
187 struct sk_buff_head rx_queue
;
189 unsigned int rx_queue_max
;
190 unsigned int rx_queue_len
;
191 unsigned long last_rx_time
;
194 struct xenvif_copy_state rx_copy
;
196 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
197 unsigned long credit_bytes
;
198 unsigned long credit_usec
;
199 unsigned long remaining_credit
;
200 struct timer_list credit_timeout
;
201 u64 credit_window_start
;
205 struct xenvif_stats stats
;
208 enum state_bit_shift
{
209 /* This bit marks that the vif is connected */
210 VIF_STATUS_CONNECTED
,
213 struct xenvif_mcast_addr
{
214 struct list_head entry
;
219 #define XEN_NETBK_MCAST_MAX 64
221 #define XEN_NETBK_MAX_HASH_KEY_SIZE 40
222 #define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
223 #define XEN_NETBK_HASH_TAG_SIZE 40
225 struct xenvif_hash_cache_entry
{
226 struct list_head link
;
228 u8 tag
[XEN_NETBK_HASH_TAG_SIZE
];
234 struct xenvif_hash_cache
{
236 struct list_head list
;
244 u8 key
[XEN_NETBK_MAX_HASH_KEY_SIZE
];
245 u32 mapping
[XEN_NETBK_MAX_HASH_MAPPING_SIZE
];
247 struct xenvif_hash_cache cache
;
251 /* Unique identifier for this interface. */
256 struct list_head fe_mcast_addr
;
257 unsigned int fe_mcast_count
;
259 /* Frontend feature information. */
265 u8 multicast_control
:1;
267 /* Is this interface disabled? True when backend discovers
271 unsigned long status
;
272 unsigned long drain_timeout
;
273 unsigned long stall_timeout
;
276 struct xenvif_queue
*queues
;
277 unsigned int num_queues
; /* active queues, resource allocated */
278 unsigned int stalled_queues
;
280 struct xenvif_hash hash
;
282 struct xenbus_watch credit_watch
;
283 struct xenbus_watch mcast_ctrl_watch
;
287 #ifdef CONFIG_DEBUG_FS
288 struct dentry
*xenvif_dbg_root
;
291 struct xen_netif_ctrl_back_ring ctrl
;
292 unsigned int ctrl_irq
;
294 /* Miscellaneous private stuff. */
295 struct net_device
*dev
;
298 struct xenvif_rx_cb
{
299 unsigned long expires
;
303 #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
305 static inline struct xenbus_device
*xenvif_to_xenbus_device(struct xenvif
*vif
)
307 return to_xenbus_device(vif
->dev
->dev
.parent
);
310 void xenvif_tx_credit_callback(struct timer_list
*t
);
312 struct xenvif
*xenvif_alloc(struct device
*parent
,
314 unsigned int handle
);
316 int xenvif_init_queue(struct xenvif_queue
*queue
);
317 void xenvif_deinit_queue(struct xenvif_queue
*queue
);
319 int xenvif_connect_data(struct xenvif_queue
*queue
,
320 unsigned long tx_ring_ref
,
321 unsigned long rx_ring_ref
,
322 unsigned int tx_evtchn
,
323 unsigned int rx_evtchn
);
324 void xenvif_disconnect_data(struct xenvif
*vif
);
325 int xenvif_connect_ctrl(struct xenvif
*vif
, grant_ref_t ring_ref
,
326 unsigned int evtchn
);
327 void xenvif_disconnect_ctrl(struct xenvif
*vif
);
328 void xenvif_free(struct xenvif
*vif
);
330 int xenvif_xenbus_init(void);
331 void xenvif_xenbus_fini(void);
333 int xenvif_schedulable(struct xenvif
*vif
);
335 int xenvif_queue_stopped(struct xenvif_queue
*queue
);
336 void xenvif_wake_queue(struct xenvif_queue
*queue
);
338 /* (Un)Map communication rings. */
339 void xenvif_unmap_frontend_data_rings(struct xenvif_queue
*queue
);
340 int xenvif_map_frontend_data_rings(struct xenvif_queue
*queue
,
341 grant_ref_t tx_ring_ref
,
342 grant_ref_t rx_ring_ref
);
344 /* Check for SKBs from frontend and schedule backend processing */
345 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue
*queue
);
347 /* Prevent the device from generating any further traffic. */
348 void xenvif_carrier_off(struct xenvif
*vif
);
350 int xenvif_tx_action(struct xenvif_queue
*queue
, int budget
);
352 int xenvif_kthread_guest_rx(void *data
);
353 void xenvif_kick_thread(struct xenvif_queue
*queue
);
355 int xenvif_dealloc_kthread(void *data
);
357 irqreturn_t
xenvif_ctrl_irq_fn(int irq
, void *data
);
359 void xenvif_rx_action(struct xenvif_queue
*queue
);
360 void xenvif_rx_queue_tail(struct xenvif_queue
*queue
, struct sk_buff
*skb
);
362 void xenvif_carrier_on(struct xenvif
*vif
);
364 /* Callback from stack when TX packet can be released */
365 void xenvif_zerocopy_callback(struct ubuf_info
*ubuf
, bool zerocopy_success
);
367 /* Unmap a pending page and release it back to the guest */
368 void xenvif_idx_unmap(struct xenvif_queue
*queue
, u16 pending_idx
);
370 static inline pending_ring_idx_t
nr_pending_reqs(struct xenvif_queue
*queue
)
372 return MAX_PENDING_REQS
-
373 queue
->pending_prod
+ queue
->pending_cons
;
376 irqreturn_t
xenvif_interrupt(int irq
, void *dev_id
);
378 extern bool separate_tx_rx_irq
;
380 extern unsigned int rx_drain_timeout_msecs
;
381 extern unsigned int rx_stall_timeout_msecs
;
382 extern unsigned int xenvif_max_queues
;
383 extern unsigned int xenvif_hash_cache_size
;
385 #ifdef CONFIG_DEBUG_FS
386 extern struct dentry
*xen_netback_dbg_root
;
389 void xenvif_skb_zerocopy_prepare(struct xenvif_queue
*queue
,
390 struct sk_buff
*skb
);
391 void xenvif_skb_zerocopy_complete(struct xenvif_queue
*queue
);
393 /* Multicast control */
394 bool xenvif_mcast_match(struct xenvif
*vif
, const u8
*addr
);
395 void xenvif_mcast_addr_list_free(struct xenvif
*vif
);
398 void xenvif_init_hash(struct xenvif
*vif
);
399 void xenvif_deinit_hash(struct xenvif
*vif
);
401 u32
xenvif_set_hash_alg(struct xenvif
*vif
, u32 alg
);
402 u32
xenvif_get_hash_flags(struct xenvif
*vif
, u32
*flags
);
403 u32
xenvif_set_hash_flags(struct xenvif
*vif
, u32 flags
);
404 u32
xenvif_set_hash_key(struct xenvif
*vif
, u32 gref
, u32 len
);
405 u32
xenvif_set_hash_mapping_size(struct xenvif
*vif
, u32 size
);
406 u32
xenvif_set_hash_mapping(struct xenvif
*vif
, u32 gref
, u32 len
,
409 void xenvif_set_skb_hash(struct xenvif
*vif
, struct sk_buff
*skb
);
411 #ifdef CONFIG_DEBUG_FS
412 void xenvif_dump_hash_info(struct xenvif
*vif
, struct seq_file
*m
);
415 #endif /* __XEN_NETBACK__COMMON_H__ */