2 * Copyright (c) 2016 Citrix Systems Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Softare Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #define XEN_NETIF_DEFINE_TOEPLITZ
32 #include <linux/vmalloc.h>
33 #include <linux/rculist.h>
35 static void xenvif_del_hash(struct rcu_head
*rcu
)
37 struct xenvif_hash_cache_entry
*entry
;
39 entry
= container_of(rcu
, struct xenvif_hash_cache_entry
, rcu
);
44 static void xenvif_add_hash(struct xenvif
*vif
, const u8
*tag
,
45 unsigned int len
, u32 val
)
47 struct xenvif_hash_cache_entry
*new, *entry
, *oldest
;
51 new = kmalloc(sizeof(*entry
), GFP_KERNEL
);
55 memcpy(new->tag
, tag
, len
);
59 spin_lock_irqsave(&vif
->hash
.cache
.lock
, flags
);
63 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
64 /* Make sure we don't add duplicate entries */
65 if (entry
->len
== len
&&
66 memcmp(entry
->tag
, tag
, len
) == 0)
68 if (!oldest
|| entry
->seq
< oldest
->seq
)
73 new->seq
= atomic_inc_return(&vif
->hash
.cache
.seq
);
74 list_add_rcu(&new->link
, &vif
->hash
.cache
.list
);
76 if (++vif
->hash
.cache
.count
> xenvif_hash_cache_size
) {
77 list_del_rcu(&oldest
->link
);
78 vif
->hash
.cache
.count
--;
79 call_rcu(&oldest
->rcu
, xenvif_del_hash
);
83 spin_unlock_irqrestore(&vif
->hash
.cache
.lock
, flags
);
89 static u32
xenvif_new_hash(struct xenvif
*vif
, const u8
*data
,
94 val
= xen_netif_toeplitz_hash(vif
->hash
.key
,
95 sizeof(vif
->hash
.key
),
98 if (xenvif_hash_cache_size
!= 0)
99 xenvif_add_hash(vif
, data
, len
, val
);
104 static void xenvif_flush_hash(struct xenvif
*vif
)
106 struct xenvif_hash_cache_entry
*entry
;
109 if (xenvif_hash_cache_size
== 0)
112 spin_lock_irqsave(&vif
->hash
.cache
.lock
, flags
);
114 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
115 list_del_rcu(&entry
->link
);
116 vif
->hash
.cache
.count
--;
117 call_rcu(&entry
->rcu
, xenvif_del_hash
);
120 spin_unlock_irqrestore(&vif
->hash
.cache
.lock
, flags
);
123 static u32
xenvif_find_hash(struct xenvif
*vif
, const u8
*data
,
126 struct xenvif_hash_cache_entry
*entry
;
130 if (len
>= XEN_NETBK_HASH_TAG_SIZE
)
133 if (xenvif_hash_cache_size
== 0)
134 return xenvif_new_hash(vif
, data
, len
);
140 list_for_each_entry_rcu(entry
, &vif
->hash
.cache
.list
, link
) {
141 if (entry
->len
== len
&&
142 memcmp(entry
->tag
, data
, len
) == 0) {
144 entry
->seq
= atomic_inc_return(&vif
->hash
.cache
.seq
);
153 val
= xenvif_new_hash(vif
, data
, len
);
158 void xenvif_set_skb_hash(struct xenvif
*vif
, struct sk_buff
*skb
)
160 struct flow_keys flow
;
162 enum pkt_hash_types type
= PKT_HASH_TYPE_NONE
;
163 u32 flags
= vif
->hash
.flags
;
166 /* Quick rejection test: If the network protocol doesn't
167 * correspond to any enabled hash type then there's no point
168 * in parsing the packet header.
170 switch (skb
->protocol
) {
171 case htons(ETH_P_IP
):
172 if (flags
& (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
173 XEN_NETIF_CTRL_HASH_TYPE_IPV4
))
178 case htons(ETH_P_IPV6
):
179 if (flags
& (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
|
180 XEN_NETIF_CTRL_HASH_TYPE_IPV6
))
189 memset(&flow
, 0, sizeof(flow
));
190 if (!skb_flow_dissect_flow_keys(skb
, &flow
, 0))
193 has_tcp_hdr
= (flow
.basic
.ip_proto
== IPPROTO_TCP
) &&
194 !(flow
.control
.flags
& FLOW_DIS_IS_FRAGMENT
);
196 switch (skb
->protocol
) {
197 case htons(ETH_P_IP
):
199 (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
)) {
202 memcpy(&data
[0], &flow
.addrs
.v4addrs
.src
, 4);
203 memcpy(&data
[4], &flow
.addrs
.v4addrs
.dst
, 4);
204 memcpy(&data
[8], &flow
.ports
.src
, 2);
205 memcpy(&data
[10], &flow
.ports
.dst
, 2);
207 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
208 type
= PKT_HASH_TYPE_L4
;
209 } else if (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV4
) {
212 memcpy(&data
[0], &flow
.addrs
.v4addrs
.src
, 4);
213 memcpy(&data
[4], &flow
.addrs
.v4addrs
.dst
, 4);
215 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
216 type
= PKT_HASH_TYPE_L3
;
221 case htons(ETH_P_IPV6
):
223 (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
)) {
226 memcpy(&data
[0], &flow
.addrs
.v6addrs
.src
, 16);
227 memcpy(&data
[16], &flow
.addrs
.v6addrs
.dst
, 16);
228 memcpy(&data
[32], &flow
.ports
.src
, 2);
229 memcpy(&data
[34], &flow
.ports
.dst
, 2);
231 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
232 type
= PKT_HASH_TYPE_L4
;
233 } else if (flags
& XEN_NETIF_CTRL_HASH_TYPE_IPV6
) {
236 memcpy(&data
[0], &flow
.addrs
.v6addrs
.src
, 16);
237 memcpy(&data
[16], &flow
.addrs
.v6addrs
.dst
, 16);
239 hash
= xenvif_find_hash(vif
, data
, sizeof(data
));
240 type
= PKT_HASH_TYPE_L3
;
247 if (type
== PKT_HASH_TYPE_NONE
)
250 __skb_set_sw_hash(skb
, hash
, type
== PKT_HASH_TYPE_L4
);
253 u32
xenvif_set_hash_alg(struct xenvif
*vif
, u32 alg
)
256 case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
:
257 case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ
:
261 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
266 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
269 u32
xenvif_get_hash_flags(struct xenvif
*vif
, u32
*flags
)
271 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
272 return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED
;
274 *flags
= XEN_NETIF_CTRL_HASH_TYPE_IPV4
|
275 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
276 XEN_NETIF_CTRL_HASH_TYPE_IPV6
|
277 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
;
279 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
282 u32
xenvif_set_hash_flags(struct xenvif
*vif
, u32 flags
)
284 if (flags
& ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4
|
285 XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP
|
286 XEN_NETIF_CTRL_HASH_TYPE_IPV6
|
287 XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP
))
288 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
290 if (vif
->hash
.alg
== XEN_NETIF_CTRL_HASH_ALGORITHM_NONE
)
291 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
293 vif
->hash
.flags
= flags
;
295 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
298 u32
xenvif_set_hash_key(struct xenvif
*vif
, u32 gref
, u32 len
)
300 u8
*key
= vif
->hash
.key
;
301 struct gnttab_copy copy_op
= {
302 .source
.u
.ref
= gref
,
303 .source
.domid
= vif
->domid
,
304 .dest
.u
.gmfn
= virt_to_gfn(key
),
305 .dest
.domid
= DOMID_SELF
,
306 .dest
.offset
= xen_offset_in_page(key
),
308 .flags
= GNTCOPY_source_gref
311 if (len
> XEN_NETBK_MAX_HASH_KEY_SIZE
)
312 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
314 if (copy_op
.len
!= 0) {
315 gnttab_batch_copy(©_op
, 1);
317 if (copy_op
.status
!= GNTST_okay
)
318 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
321 /* Clear any remaining key octets */
322 if (len
< XEN_NETBK_MAX_HASH_KEY_SIZE
)
323 memset(key
+ len
, 0, XEN_NETBK_MAX_HASH_KEY_SIZE
- len
);
325 xenvif_flush_hash(vif
);
327 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
330 u32
xenvif_set_hash_mapping_size(struct xenvif
*vif
, u32 size
)
332 if (size
> XEN_NETBK_MAX_HASH_MAPPING_SIZE
)
333 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
335 vif
->hash
.size
= size
;
336 memset(vif
->hash
.mapping
, 0, sizeof(u32
) * size
);
338 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
341 u32
xenvif_set_hash_mapping(struct xenvif
*vif
, u32 gref
, u32 len
,
344 u32
*mapping
= &vif
->hash
.mapping
[off
];
345 struct gnttab_copy copy_op
= {
346 .source
.u
.ref
= gref
,
347 .source
.domid
= vif
->domid
,
348 .dest
.u
.gmfn
= virt_to_gfn(mapping
),
349 .dest
.domid
= DOMID_SELF
,
350 .dest
.offset
= xen_offset_in_page(mapping
),
351 .len
= len
* sizeof(u32
),
352 .flags
= GNTCOPY_source_gref
355 if ((off
+ len
> vif
->hash
.size
) || copy_op
.len
> XEN_PAGE_SIZE
)
356 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
359 if (mapping
[off
++] >= vif
->num_queues
)
360 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
362 if (copy_op
.len
!= 0) {
363 gnttab_batch_copy(©_op
, 1);
365 if (copy_op
.status
!= GNTST_okay
)
366 return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER
;
369 return XEN_NETIF_CTRL_STATUS_SUCCESS
;
372 void xenvif_init_hash(struct xenvif
*vif
)
374 if (xenvif_hash_cache_size
== 0)
377 spin_lock_init(&vif
->hash
.cache
.lock
);
378 INIT_LIST_HEAD(&vif
->hash
.cache
.list
);
381 void xenvif_deinit_hash(struct xenvif
*vif
)
383 xenvif_flush_hash(vif
);