2 * Copyright (c) 2016 Citrix Systems Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Softare Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #define XEN_NETIF_DEFINE_TOEPLITZ
32 #include <linux/vmalloc.h>
33 #include <linux/rculist.h>
35 static void xenvif_add_hash(struct xenvif
*vif
, const u8
*tag
,
36 unsigned int len
, u32 val
)
38 struct xenvif_hash_cache_entry
*new, *entry
, *oldest
;
42 new = kmalloc(sizeof(*entry
), GFP_ATOMIC
);
46 memcpy(new->tag
, tag
, len
);
50 spin_lock_irqsave(&vif
->hash
.cache
.lock
, flags
);
54 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
55 /* Make sure we don't add duplicate entries */
56 if (entry
->len
== len
&&
57 memcmp(entry
->tag
, tag
, len
) == 0)
59 if (!oldest
|| entry
->seq
< oldest
->seq
)
64 new->seq
= atomic_inc_return(&vif
->hash
.cache
.seq
);
65 list_add_rcu(&new->link
, &vif
->hash
.cache
.list
);
67 if (++vif
->hash
.cache
.count
> xenvif_hash_cache_size
) {
68 list_del_rcu(&oldest
->link
);
69 vif
->hash
.cache
.count
--;
70 kfree_rcu(oldest
, rcu
);
74 spin_unlock_irqrestore(&vif
->hash
.cache
.lock
, flags
);
80 static u32
xenvif_new_hash(struct xenvif
*vif
, const u8
*data
,
85 val
= xen_netif_toeplitz_hash(vif
->hash
.key
,
86 sizeof(vif
->hash
.key
),
89 if (xenvif_hash_cache_size
!= 0)
90 xenvif_add_hash(vif
, data
, len
, val
);
95 static void xenvif_flush_hash(struct xenvif
*vif
)
97 struct xenvif_hash_cache_entry
*entry
;
100 if (xenvif_hash_cache_size
== 0)
103 spin_lock_irqsave(&vif
->hash
.cache
.lock
, flags
);
105 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
106 list_del_rcu(&entry
->link
);
107 vif
->hash
.cache
.count
--;
108 kfree_rcu(entry
, rcu
);
111 spin_unlock_irqrestore(&vif
->hash
.cache
.lock
, flags
);
114 static u32
xenvif_find_hash(struct xenvif
*vif
, const u8
*data
,
117 struct xenvif_hash_cache_entry
*entry
;
121 if (len
>= XEN_NETBK_HASH_TAG_SIZE
)
124 if (xenvif_hash_cache_size
== 0)
125 return xenvif_new_hash(vif
, data
, len
);
131 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
132 if (entry
->len
== len
&&
133 memcmp(entry
->tag
, data
, len
) == 0) {
135 entry
->seq
= atomic_inc_return(&vif
->hash
.cache
.seq
);
144 val
= xenvif_new_hash(vif
, data
, len
);
149 void xenvif_set_skb_hash(struct xenvif
*vif
, struct sk_buff
*skb
)
151 struct flow_keys flow
;
153 enum pkt_hash_types type
= PKT_HASH_TYPE_NONE
;
154 u32 flags
= vif
->hash
.flags
;
157 /* Quick rejection test: If the network protocol doesn't
158 * correspond to any enabled hash type then there's no point
159 * in parsing the packet header.
161 switch (skb
->protocol
) {
162 case htons(ETH_P_IP
):
163 if (flags
& (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
164 XEN_NETIF_CTRL_HASH_TYPE_IPV4
))
169 case htons(ETH_P_IPV6
):
170 if (flags
& (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
|
171 XEN_NETIF_CTRL_HASH_TYPE_IPV6
))
180 memset(&flow
, 0, sizeof(flow
));
181 if (!skb_flow_dissect_flow_keys(skb
, &flow
, 0))
184 has_tcp_hdr
= (flow
.basic
.ip_proto
== IPPROTO_TCP
) &&
185 !(flow
.control
.flags
& FLOW_DIS_IS_FRAGMENT
);
187 switch (skb
->protocol
) {
188 case htons(ETH_P_IP
):
190 (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
)) {
193 memcpy(&data
[0], &flow
.addrs
.v4addrs
.src
, 4);
194 memcpy(&data
[4], &flow
.addrs
.v4addrs
.dst
, 4);
195 memcpy(&data
[8], &flow
.ports
.src
, 2);
196 memcpy(&data
[10], &flow
.ports
.dst
, 2);
198 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
199 type
= PKT_HASH_TYPE_L4
;
200 } else if (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4
) {
203 memcpy(&data
[0], &flow
.addrs
.v4addrs
.src
, 4);
204 memcpy(&data
[4], &flow
.addrs
.v4addrs
.dst
, 4);
206 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
207 type
= PKT_HASH_TYPE_L3
;
212 case htons(ETH_P_IPV6
):
214 (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
)) {
217 memcpy(&data
[0], &flow
.addrs
.v6addrs
.src
, 16);
218 memcpy(&data
[16], &flow
.addrs
.v6addrs
.dst
, 16);
219 memcpy(&data
[32], &flow
.ports
.src
, 2);
220 memcpy(&data
[34], &flow
.ports
.dst
, 2);
222 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
223 type
= PKT_HASH_TYPE_L4
;
224 } else if (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6
) {
227 memcpy(&data
[0], &flow
.addrs
.v6addrs
.src
, 16);
228 memcpy(&data
[16], &flow
.addrs
.v6addrs
.dst
, 16);
230 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
231 type
= PKT_HASH_TYPE_L3
;
238 if (type
== PKT_HASH_TYPE_NONE
)
241 __skb_set_sw_hash(skb
, hash
, type
== PKT_HASH_TYPE_L4
);
244 u32
xenvif_set_hash_alg(struct xenvif
*vif
, u32 alg
)
247 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
:
248 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ
:
252 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
257 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
260 u32
xenvif_get_hash_flags(struct xenvif
*vif
, u32
*flags
)
262 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
263 return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED
;
265 *flags
= XEN_NETIF_CTRL_HASH_TYPE_IPV4
|
266 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
267 XEN_NETIF_CTRL_HASH_TYPE_IPV6
|
268 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
;
270 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
273 u32
xenvif_set_hash_flags(struct xenvif
*vif
, u32 flags
)
275 if (flags
& ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4
|
276 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
277 XEN_NETIF_CTRL_HASH_TYPE_IPV6
|
278 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
))
279 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
281 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
282 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
284 vif
->hash
.flags
= flags
;
286 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
289 u32
xenvif_set_hash_key(struct xenvif
*vif
, u32 gref
, u32 len
)
291 u8
*key
= vif
->hash
.key
;
292 struct gnttab_copy copy_op
= {
293 .source
.u
.ref
= gref
,
294 .source
.domid
= vif
->domid
,
295 .dest
.u
.gmfn
= virt_to_gfn(key
),
296 .dest
.domid
= DOMID_SELF
,
297 .dest
.offset
= xen_offset_in_page(key
),
299 .flags
= GNTCOPY_source_gref
302 if (len
> XEN_NETBK_MAX_HASH_KEY_SIZE
)
303 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
305 if (copy_op
.len
!= 0) {
306 gnttab_batch_copy(©_op
, 1);
308 if (copy_op
.status
!= GNTST_okay
)
309 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
312 /* Clear any remaining key octets */
313 if (len
< XEN_NETBK_MAX_HASH_KEY_SIZE
)
314 memset(key
+ len
, 0, XEN_NETBK_MAX_HASH_KEY_SIZE
- len
);
316 xenvif_flush_hash(vif
);
318 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
321 u32
xenvif_set_hash_mapping_size(struct xenvif
*vif
, u32 size
)
323 if (size
> XEN_NETBK_MAX_HASH_MAPPING_SIZE
)
324 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
326 vif
->hash
.size
= size
;
327 memset(vif
->hash
.mapping
, 0, sizeof(u32
) * size
);
329 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
332 u32
xenvif_set_hash_mapping(struct xenvif
*vif
, u32 gref
, u32 len
,
335 u32
*mapping
= &vif
->hash
.mapping
[off
];
336 struct gnttab_copy copy_op
= {
337 .source
.u
.ref
= gref
,
338 .source
.domid
= vif
->domid
,
339 .dest
.u
.gmfn
= virt_to_gfn(mapping
),
340 .dest
.domid
= DOMID_SELF
,
341 .dest
.offset
= xen_offset_in_page(mapping
),
342 .len
= len
* sizeof(u32
),
343 .flags
= GNTCOPY_source_gref
346 if ((off
+ len
> vif
->hash
.size
) || copy_op
.len
> XEN_PAGE_SIZE
)
347 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
350 if (mapping
[off
++] >= vif
->num_queues
)
351 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
353 if (copy_op
.len
!= 0) {
354 gnttab_batch_copy(©_op
, 1);
356 if (copy_op
.status
!= GNTST_okay
)
357 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
360 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
363 #ifdef CONFIG_DEBUG_FS
364 void xenvif_dump_hash_info(struct xenvif
*vif
, struct seq_file
*m
)
368 switch (vif
->hash
.alg
) {
369 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ
:
370 seq_puts(m
, "Hash Algorithm: TOEPLITZ\n");
373 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
:
374 seq_puts(m
, "Hash Algorithm: NONE\n");
380 if (vif
->hash
.flags
) {
381 seq_puts(m
, "\nHash Flags:\n");
383 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4
)
384 seq_puts(m
, "- IPv4\n");
385 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
)
386 seq_puts(m
, "- IPv4 + TCP\n");
387 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6
)
388 seq_puts(m
, "- IPv6\n");
389 if (vif
->hash
.flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
)
390 seq_puts(m
, "- IPv6 + TCP\n");
393 seq_puts(m
, "\nHash Key:\n");
395 for (i
= 0; i
< XEN_NETBK_MAX_HASH_KEY_SIZE
; ) {
399 if (i
+ n
>= XEN_NETBK_MAX_HASH_KEY_SIZE
)
400 n
= XEN_NETBK_MAX_HASH_KEY_SIZE
- i
;
402 seq_printf(m
, "[%2u - %2u]: ", i
, i
+ n
- 1);
404 for (j
= 0; j
< n
; j
++, i
++)
405 seq_printf(m
, "%02x ", vif
->hash
.key
[i
]);
410 if (vif
->hash
.size
!= 0) {
411 seq_puts(m
, "\nHash Mapping:\n");
413 for (i
= 0; i
< vif
->hash
.size
; ) {
417 if (i
+ n
>= vif
->hash
.size
)
418 n
= vif
->hash
.size
- i
;
420 seq_printf(m
, "[%4u - %4u]: ", i
, i
+ n
- 1);
422 for (j
= 0; j
< n
; j
++, i
++)
423 seq_printf(m
, "%4u ", vif
->hash
.mapping
[i
]);
429 #endif /* CONFIG_DEBUG_FS */
431 void xenvif_init_hash(struct xenvif
*vif
)
433 if (xenvif_hash_cache_size
== 0)
436 spin_lock_init(&vif
->hash
.cache
.lock
);
437 INIT_LIST_HEAD(&vif
->hash
.cache
.list
);
440 void xenvif_deinit_hash(struct xenvif
*vif
)
442 xenvif_flush_hash(vif
);