2 * Copyright (c) 2016 Citrix Systems Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Softare Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #define XEN_NETIF_DEFINE_TOEPLITZ
32 #include <linux/vmalloc.h>
33 #include <linux/rculist.h>
35 static void xenvif_add_hash(struct xenvif
*vif
, const u8
*tag
,
36 unsigned int len
, u32 val
)
38 struct xenvif_hash_cache_entry
*new, *entry
, *oldest
;
42 new = kmalloc(sizeof(*entry
), GFP_ATOMIC
);
46 memcpy(new->tag
, tag
, len
);
50 spin_lock_irqsave(&vif
->hash
.cache
.lock
, flags
);
54 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
,
55 lockdep_is_held(&vif
->hash
.cache
.lock
)) {
56 /* Make sure we don't add duplicate entries */
57 if (entry
->len
== len
&&
58 memcmp(entry
->tag
, tag
, len
) == 0)
60 if (!oldest
|| entry
->seq
< oldest
->seq
)
65 new->seq
= atomic_inc_return(&vif
->hash
.cache
.seq
);
66 list_add_rcu(&new->link
, &vif
->hash
.cache
.list
);
68 if (++vif
->hash
.cache
.count
> xenvif_hash_cache_size
) {
69 list_del_rcu(&oldest
->link
);
70 vif
->hash
.cache
.count
--;
71 kfree_rcu(oldest
, rcu
);
75 spin_unlock_irqrestore(&vif
->hash
.cache
.lock
, flags
);
81 static u32
xenvif_new_hash(struct xenvif
*vif
, const u8
*data
,
86 val
= xen_netif_toeplitz_hash(vif
->hash
.key
,
87 sizeof(vif
->hash
.key
),
90 if (xenvif_hash_cache_size
!= 0)
91 xenvif_add_hash(vif
, data
, len
, val
);
96 static void xenvif_flush_hash(struct xenvif
*vif
)
98 struct xenvif_hash_cache_entry
*entry
;
101 if (xenvif_hash_cache_size
== 0)
104 spin_lock_irqsave(&vif
->hash
.cache
.lock
, flags
);
106 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
,
107 lockdep_is_held(&vif
->hash
.cache
.lock
)) {
108 list_del_rcu(&entry
->link
);
109 vif
->hash
.cache
.count
--;
110 kfree_rcu(entry
, rcu
);
113 spin_unlock_irqrestore(&vif
->hash
.cache
.lock
, flags
);
116 static u32
xenvif_find_hash(struct xenvif
*vif
, const u8
*data
,
119 struct xenvif_hash_cache_entry
*entry
;
123 if (len
>= XEN_NETBK_HASH_TAG_SIZE
)
126 if (xenvif_hash_cache_size
== 0)
127 return xenvif_new_hash(vif
, data
, len
);
133 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
134 if (entry
->len
== len
&&
135 memcmp(entry
->tag
, data
, len
) == 0) {
137 entry
->seq
= atomic_inc_return(&vif
->hash
.cache
.seq
);
146 val
= xenvif_new_hash(vif
, data
, len
);
151 void xenvif_set_skb_hash(struct xenvif
*vif
, struct sk_buff
*skb
)
153 struct flow_keys flow
;
155 enum pkt_hash_types type
= PKT_HASH_TYPE_NONE
;
156 u32 flags
= vif
->hash
.flags
;
159 /* Quick rejection test: If the network protocol doesn't
160 * correspond to any enabled hash type then there's no point
161 * in parsing the packet header.
163 switch (skb
->protocol
) {
164 case htons(ETH_P_IP
):
165 if (flags
& (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
166 XEN_NETIF_CTRL_HASH_TYPE_IPV4
))
171 case htons(ETH_P_IPV6
):
172 if (flags
& (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
|
173 XEN_NETIF_CTRL_HASH_TYPE_IPV6
))
182 memset(&flow
, 0, sizeof(flow
));
183 if (!skb_flow_dissect_flow_keys(skb
, &flow
, 0))
186 has_tcp_hdr
= (flow
.basic
.ip_proto
== IPPROTO_TCP
) &&
187 !(flow
.control
.flags
& FLOW_DIS_IS_FRAGMENT
);
189 switch (skb
->protocol
) {
190 case htons(ETH_P_IP
):
192 (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
)) {
195 memcpy(&data
[0], &flow
.addrs
.v4addrs
.src
, 4);
196 memcpy(&data
[4], &flow
.addrs
.v4addrs
.dst
, 4);
197 memcpy(&data
[8], &flow
.ports
.src
, 2);
198 memcpy(&data
[10], &flow
.ports
.dst
, 2);
200 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
201 type
= PKT_HASH_TYPE_L4
;
202 } else if (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4
) {
205 memcpy(&data
[0], &flow
.addrs
.v4addrs
.src
, 4);
206 memcpy(&data
[4], &flow
.addrs
.v4addrs
.dst
, 4);
208 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
209 type
= PKT_HASH_TYPE_L3
;
214 case htons(ETH_P_IPV6
):
216 (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
)) {
219 memcpy(&data
[0], &flow
.addrs
.v6addrs
.src
, 16);
220 memcpy(&data
[16], &flow
.addrs
.v6addrs
.dst
, 16);
221 memcpy(&data
[32], &flow
.ports
.src
, 2);
222 memcpy(&data
[34], &flow
.ports
.dst
, 2);
224 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
225 type
= PKT_HASH_TYPE_L4
;
226 } else if (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6
) {
229 memcpy(&data
[0], &flow
.addrs
.v6addrs
.src
, 16);
230 memcpy(&data
[16], &flow
.addrs
.v6addrs
.dst
, 16);
232 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
233 type
= PKT_HASH_TYPE_L3
;
240 if (type
== PKT_HASH_TYPE_NONE
)
243 __skb_set_sw_hash(skb
, hash
, type
== PKT_HASH_TYPE_L4
);
246 u32
xenvif_set_hash_alg(struct xenvif
*vif
, u32 alg
)
249 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
:
250 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ
:
254 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
259 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
262 u32
xenvif_get_hash_flags(struct xenvif
*vif
, u32
*flags
)
264 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
265 return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED
;
267 *flags
= XEN_NETIF_CTRL_HASH_TYPE_IPV4
|
268 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
269 XEN_NETIF_CTRL_HASH_TYPE_IPV6
|
270 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
;
272 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
275 u32
xenvif_set_hash_flags(struct xenvif
*vif
, u32 flags
)
277 if (flags
& ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4
|
278 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
279 XEN_NETIF_CTRL_HASH_TYPE_IPV6
|
280 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
))
281 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
283 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
284 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
286 vif
->hash
.flags
= flags
;
288 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
291 u32
xenvif_set_hash_key(struct xenvif
*vif
, u32 gref
, u32 len
)
293 u8
*key
= vif
->hash
.key
;
294 struct gnttab_copy copy_op
= {
295 .source
.u
.ref
= gref
,
296 .source
.domid
= vif
->domid
,
297 .dest
.u
.gmfn
= virt_to_gfn(key
),
298 .dest
.domid
= DOMID_SELF
,
299 .dest
.offset
= xen_offset_in_page(key
),
301 .flags
= GNTCOPY_source_gref
304 if (len
> XEN_NETBK_MAX_HASH_KEY_SIZE
)
305 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
307 if (copy_op
.len
!= 0) {
308 gnttab_batch_copy(©_op
, 1);
310 if (copy_op
.status
!= GNTST_okay
)
311 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
314 /* Clear any remaining key octets */
315 if (len
< XEN_NETBK_MAX_HASH_KEY_SIZE
)
316 memset(key
+ len
, 0, XEN_NETBK_MAX_HASH_KEY_SIZE
- len
);
318 xenvif_flush_hash(vif
);
320 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
323 u32
xenvif_set_hash_mapping_size(struct xenvif
*vif
, u32 size
)
325 if (size
> XEN_NETBK_MAX_HASH_MAPPING_SIZE
)
326 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
328 vif
->hash
.size
= size
;
329 memset(vif
->hash
.mapping
[vif
->hash
.mapping_sel
], 0,
332 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
335 u32
xenvif_set_hash_mapping(struct xenvif
*vif
, u32 gref
, u32 len
,
338 u32
*mapping
= vif
->hash
.mapping
[!vif
->hash
.mapping_sel
];
340 struct gnttab_copy copy_op
[2] = {{
341 .source
.u
.ref
= gref
,
342 .source
.domid
= vif
->domid
,
343 .dest
.domid
= DOMID_SELF
,
344 .len
= len
* sizeof(*mapping
),
345 .flags
= GNTCOPY_source_gref
348 if ((off
+ len
< off
) || (off
+ len
> vif
->hash
.size
) ||
349 len
> XEN_PAGE_SIZE
/ sizeof(*mapping
))
350 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
352 copy_op
[0].dest
.u
.gmfn
= virt_to_gfn(mapping
+ off
);
353 copy_op
[0].dest
.offset
= xen_offset_in_page(mapping
+ off
);
354 if (copy_op
[0].dest
.offset
+ copy_op
[0].len
> XEN_PAGE_SIZE
) {
355 copy_op
[1] = copy_op
[0];
356 copy_op
[1].source
.offset
= XEN_PAGE_SIZE
- copy_op
[0].dest
.offset
;
357 copy_op
[1].dest
.u
.gmfn
= virt_to_gfn(mapping
+ off
+ len
);
358 copy_op
[1].dest
.offset
= 0;
359 copy_op
[1].len
= copy_op
[0].len
- copy_op
[1].source
.offset
;
360 copy_op
[0].len
= copy_op
[1].source
.offset
;
364 memcpy(mapping
, vif
->hash
.mapping
[vif
->hash
.mapping_sel
],
365 vif
->hash
.size
* sizeof(*mapping
));
367 if (copy_op
[0].len
!= 0) {
368 gnttab_batch_copy(copy_op
, nr
);
370 if (copy_op
[0].status
!= GNTST_okay
||
371 copy_op
[nr
- 1].status
!= GNTST_okay
)
372 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
376 if (mapping
[off
++] >= vif
->num_queues
)
377 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
379 vif
->hash
.mapping_sel
= !vif
->hash
.mapping_sel
;
381 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
384 #ifdef CONFIG_DEBUG_FS
385 void xenvif_dump_hash_info(struct xenvif
*vif
, struct seq_file
*m
)
389 switch (vif
->hash
.alg
) {
390 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ
:
391 seq_puts(m
, "Hash Algorithm: TOEPLITZ\n");
394 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
:
395 seq_puts(m
, "Hash Algorithm: NONE\n");
401 if (vif
->hash
.flags
) {
402 seq_puts(m
, "\nHash Flags:\n");
404 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4
)
405 seq_puts(m
, "- IPv4\n");
406 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
)
407 seq_puts(m
, "- IPv4 + TCP\n");
408 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6
)
409 seq_puts(m
, "- IPv6\n");
410 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
)
411 seq_puts(m
, "- IPv6 + TCP\n");
414 seq_puts(m
, "\nHash Key:\n");
416 for (i
= 0; i
< XEN_NETBK_MAX_HASH_KEY_SIZE
; ) {
420 if (i
+ n
>= XEN_NETBK_MAX_HASH_KEY_SIZE
)
421 n
= XEN_NETBK_MAX_HASH_KEY_SIZE
- i
;
423 seq_printf(m
, "[%2u - %2u]: ", i
, i
+ n
- 1);
425 for (j
= 0; j
< n
; j
++, i
++)
426 seq_printf(m
, "%02x ", vif
->hash
.key
[i
]);
431 if (vif
->hash
.size
!= 0) {
432 const u32
*mapping
= vif
->hash
.mapping
[vif
->hash
.mapping_sel
];
434 seq_puts(m
, "\nHash Mapping:\n");
436 for (i
= 0; i
< vif
->hash
.size
; ) {
440 if (i
+ n
>= vif
->hash
.size
)
441 n
= vif
->hash
.size
- i
;
443 seq_printf(m
, "[%4u - %4u]: ", i
, i
+ n
- 1);
445 for (j
= 0; j
< n
; j
++, i
++)
446 seq_printf(m
, "%4u ", mapping
[i
]);
452 #endif /* CONFIG_DEBUG_FS */
454 void xenvif_init_hash(struct xenvif
*vif
)
456 if (xenvif_hash_cache_size
== 0)
459 BUG_ON(vif
->hash
.cache
.count
);
461 spin_lock_init(&vif
->hash
.cache
.lock
);
462 INIT_LIST_HEAD(&vif
->hash
.cache
.list
);
465 void xenvif_deinit_hash(struct xenvif
*vif
)
467 xenvif_flush_hash(vif
);