1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 - Cambridge Greys Limited
4 * Copyright (C) 2011 - 2014 Cisco Systems Inc
7 #include <linux/etherdevice.h>
8 #include <linux/netdevice.h>
9 #include <linux/skbuff.h>
10 #include <linux/slab.h>
11 #include <asm/byteorder.h>
12 #include <uapi/linux/ip.h>
13 #include <uapi/linux/virtio_net.h>
14 #include <linux/virtio_net.h>
15 #include <linux/virtio_byteorder.h>
16 #include <linux/netdev_features.h>
17 #include "vector_user.h"
18 #include "vector_kern.h"
20 #define GOOD_LINEAR 512
21 #define GSO_ERROR "Incoming GSO frames and GRO disabled on the interface"
23 struct gre_minimal_header
{
39 struct gre_minimal_header expected_header
;
41 uint32_t checksum_offset
;
43 uint32_t sequence_offset
;
47 struct uml_l2tpv3_data
{
61 uint32_t cookie_offset
;
62 uint32_t session_offset
;
63 uint32_t counter_offset
;
66 static int l2tpv3_form_header(uint8_t *header
,
67 struct sk_buff
*skb
, struct vector_private
*vp
)
69 struct uml_l2tpv3_data
*td
= vp
->transport_data
;
73 *(uint32_t *) header
= cpu_to_be32(L2TPV3_DATA_PACKET
);
74 (*(uint32_t *) (header
+ td
->session_offset
)) = td
->tx_session
;
78 (*(uint64_t *)(header
+ td
->cookie_offset
)) =
81 (*(uint32_t *)(header
+ td
->cookie_offset
)) =
84 if (td
->has_counter
) {
85 counter
= (uint32_t *)(header
+ td
->counter_offset
);
86 if (td
->pin_counter
) {
90 *counter
= cpu_to_be32(td
->counter
);
96 static int gre_form_header(uint8_t *header
,
97 struct sk_buff
*skb
, struct vector_private
*vp
)
99 struct uml_gre_data
*td
= vp
->transport_data
;
101 *((uint32_t *) header
) = *((uint32_t *) &td
->expected_header
);
103 (*(uint32_t *) (header
+ td
->key_offset
)) = td
->tx_key
;
104 if (td
->has_sequence
) {
105 sequence
= (uint32_t *)(header
+ td
->sequence_offset
);
106 if (td
->pin_sequence
)
109 *sequence
= cpu_to_be32(++td
->sequence
);
114 static int raw_form_header(uint8_t *header
,
115 struct sk_buff
*skb
, struct vector_private
*vp
)
117 struct virtio_net_hdr
*vheader
= (struct virtio_net_hdr
*) header
;
119 virtio_net_hdr_from_skb(
122 virtio_legacy_is_little_endian(),
130 static int l2tpv3_verify_header(
131 uint8_t *header
, struct sk_buff
*skb
, struct vector_private
*vp
)
133 struct uml_l2tpv3_data
*td
= vp
->transport_data
;
137 if ((!td
->udp
) && (!td
->ipv6
))
138 header
+= sizeof(struct iphdr
) /* fix for ipv4 raw */;
140 /* we do not do a strict check for "data" packets as per
141 * the RFC spec because the pure IP spec does not have
146 if (td
->cookie_is_64
)
147 cookie
= *(uint64_t *)(header
+ td
->cookie_offset
);
149 cookie
= *(uint32_t *)(header
+ td
->cookie_offset
);
150 if (cookie
!= td
->rx_cookie
) {
152 netdev_err(vp
->dev
, "uml_l2tpv3: unknown cookie id");
156 session
= (uint32_t *) (header
+ td
->session_offset
);
157 if (*session
!= td
->rx_session
) {
159 netdev_err(vp
->dev
, "uml_l2tpv3: session mismatch");
165 static int gre_verify_header(
166 uint8_t *header
, struct sk_buff
*skb
, struct vector_private
*vp
)
170 struct uml_gre_data
*td
= vp
->transport_data
;
173 header
+= sizeof(struct iphdr
) /* fix for ipv4 raw */;
175 if (*((uint32_t *) header
) != *((uint32_t *) &td
->expected_header
)) {
177 netdev_err(vp
->dev
, "header type disagreement, expecting %0x, got %0x",
178 *((uint32_t *) &td
->expected_header
),
179 *((uint32_t *) header
)
185 key
= (*(uint32_t *)(header
+ td
->key_offset
));
186 if (key
!= td
->rx_key
) {
188 netdev_err(vp
->dev
, "unknown key id %0x, expecting %0x",
196 static int raw_verify_header(
197 uint8_t *header
, struct sk_buff
*skb
, struct vector_private
*vp
)
199 struct virtio_net_hdr
*vheader
= (struct virtio_net_hdr
*) header
;
201 if ((vheader
->gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) &&
202 (vp
->req_size
!= 65536)) {
209 if ((vheader
->flags
& VIRTIO_NET_HDR_F_DATA_VALID
) > 0)
212 virtio_net_hdr_to_skb(skb
, vheader
, virtio_legacy_is_little_endian());
216 static bool get_uint_param(
217 struct arglist
*def
, char *param
, unsigned int *result
)
219 char *arg
= uml_vector_fetch_arg(def
, param
);
222 if (kstrtoint(arg
, 0, result
) == 0)
228 static bool get_ulong_param(
229 struct arglist
*def
, char *param
, unsigned long *result
)
231 char *arg
= uml_vector_fetch_arg(def
, param
);
234 if (kstrtoul(arg
, 0, result
) == 0)
241 static int build_gre_transport_data(struct vector_private
*vp
)
243 struct uml_gre_data
*td
;
248 vp
->transport_data
= kmalloc(sizeof(struct uml_gre_data
), GFP_KERNEL
);
249 if (vp
->transport_data
== NULL
)
251 td
= vp
->transport_data
;
254 td
->expected_header
.arptype
= GRE_IRB
;
255 td
->expected_header
.header
= 0;
257 vp
->form_header
= &gre_form_header
;
258 vp
->verify_header
= &gre_verify_header
;
261 td
->sequence_offset
= 4;
262 td
->checksum_offset
= 4;
265 if (get_uint_param(vp
->parsed
, "v6", &temp_int
)) {
270 if (get_uint_param(vp
->parsed
, "rx_key", &temp_rx
)) {
271 if (get_uint_param(vp
->parsed
, "tx_key", &temp_tx
)) {
273 td
->expected_header
.header
|= GRE_MODE_KEY
;
274 td
->rx_key
= cpu_to_be32(temp_rx
);
275 td
->tx_key
= cpu_to_be32(temp_tx
);
276 vp
->header_size
+= 4;
277 td
->sequence_offset
+= 4;
283 td
->sequence
= false;
284 if (get_uint_param(vp
->parsed
, "sequence", &temp_int
)) {
286 vp
->header_size
+= 4;
287 td
->has_sequence
= true;
288 td
->expected_header
.header
|= GRE_MODE_SEQUENCE
;
290 vp
->parsed
, "pin_sequence", &temp_int
)) {
292 td
->pin_sequence
= true;
296 vp
->rx_header_size
= vp
->header_size
;
298 vp
->rx_header_size
+= sizeof(struct iphdr
);
302 static int build_l2tpv3_transport_data(struct vector_private
*vp
)
305 struct uml_l2tpv3_data
*td
;
306 int temp_int
, temp_rxs
, temp_txs
;
307 unsigned long temp_rx
;
308 unsigned long temp_tx
;
310 vp
->transport_data
= kmalloc(
311 sizeof(struct uml_l2tpv3_data
), GFP_KERNEL
);
313 if (vp
->transport_data
== NULL
)
316 td
= vp
->transport_data
;
318 vp
->form_header
= &l2tpv3_form_header
;
319 vp
->verify_header
= &l2tpv3_verify_header
;
323 td
->session_offset
= 0;
324 td
->cookie_offset
= 4;
325 td
->counter_offset
= 4;
329 if (get_uint_param(vp
->parsed
, "v6", &temp_int
)) {
334 if (get_uint_param(vp
->parsed
, "rx_session", &temp_rxs
)) {
335 if (get_uint_param(vp
->parsed
, "tx_session", &temp_txs
)) {
336 td
->tx_session
= cpu_to_be32(temp_txs
);
337 td
->rx_session
= cpu_to_be32(temp_rxs
);
345 td
->cookie_is_64
= false;
346 if (get_uint_param(vp
->parsed
, "cookie64", &temp_int
)) {
348 td
->cookie_is_64
= true;
351 if (get_ulong_param(vp
->parsed
, "rx_cookie", &temp_rx
)) {
352 if (get_ulong_param(vp
->parsed
, "tx_cookie", &temp_tx
)) {
354 if (td
->cookie_is_64
) {
355 td
->rx_cookie
= cpu_to_be64(temp_rx
);
356 td
->tx_cookie
= cpu_to_be64(temp_tx
);
357 vp
->header_size
+= 8;
358 td
->counter_offset
+= 8;
360 td
->rx_cookie
= cpu_to_be32(temp_rx
);
361 td
->tx_cookie
= cpu_to_be32(temp_tx
);
362 vp
->header_size
+= 4;
363 td
->counter_offset
+= 4;
370 td
->has_counter
= false;
371 if (get_uint_param(vp
->parsed
, "counter", &temp_int
)) {
373 td
->has_counter
= true;
374 vp
->header_size
+= 4;
376 vp
->parsed
, "pin_counter", &temp_int
)) {
378 td
->pin_counter
= true;
383 if (get_uint_param(vp
->parsed
, "udp", &temp_int
)) {
386 vp
->header_size
+= 4;
387 td
->counter_offset
+= 4;
388 td
->session_offset
+= 4;
389 td
->cookie_offset
+= 4;
393 vp
->rx_header_size
= vp
->header_size
;
394 if ((!td
->ipv6
) && (!td
->udp
))
395 vp
->rx_header_size
+= sizeof(struct iphdr
);
400 static int build_raw_transport_data(struct vector_private
*vp
)
402 if (uml_raw_enable_vnet_headers(vp
->fds
->rx_fd
)) {
403 if (!uml_raw_enable_vnet_headers(vp
->fds
->tx_fd
))
405 vp
->form_header
= &raw_form_header
;
406 vp
->verify_header
= &raw_verify_header
;
407 vp
->header_size
= sizeof(struct virtio_net_hdr
);
408 vp
->rx_header_size
= sizeof(struct virtio_net_hdr
);
409 vp
->dev
->hw_features
|= (NETIF_F_TSO
| NETIF_F_GRO
);
411 (NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
|
412 NETIF_F_TSO
| NETIF_F_GRO
);
415 "raw: using vnet headers for tso and tx/rx checksum"
421 static int build_hybrid_transport_data(struct vector_private
*vp
)
423 if (uml_raw_enable_vnet_headers(vp
->fds
->rx_fd
)) {
424 vp
->form_header
= &raw_form_header
;
425 vp
->verify_header
= &raw_verify_header
;
426 vp
->header_size
= sizeof(struct virtio_net_hdr
);
427 vp
->rx_header_size
= sizeof(struct virtio_net_hdr
);
428 vp
->dev
->hw_features
|=
429 (NETIF_F_TSO
| NETIF_F_GSO
| NETIF_F_GRO
);
431 (NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
|
432 NETIF_F_TSO
| NETIF_F_GSO
| NETIF_F_GRO
);
435 "tap/raw hybrid: using vnet headers for tso and tx/rx checksum"
438 return 0; /* do not try to enable tap too if raw failed */
440 if (uml_tap_enable_vnet_headers(vp
->fds
->tx_fd
))
445 static int build_tap_transport_data(struct vector_private
*vp
)
447 /* "Pure" tap uses the same fd for rx and tx */
448 if (uml_tap_enable_vnet_headers(vp
->fds
->tx_fd
)) {
449 vp
->form_header
= &raw_form_header
;
450 vp
->verify_header
= &raw_verify_header
;
451 vp
->header_size
= sizeof(struct virtio_net_hdr
);
452 vp
->rx_header_size
= sizeof(struct virtio_net_hdr
);
453 vp
->dev
->hw_features
|=
454 (NETIF_F_TSO
| NETIF_F_GSO
| NETIF_F_GRO
);
456 (NETIF_F_RXCSUM
| NETIF_F_HW_CSUM
|
457 NETIF_F_TSO
| NETIF_F_GSO
| NETIF_F_GRO
);
460 "tap: using vnet headers for tso and tx/rx checksum"
468 static int build_bess_transport_data(struct vector_private
*vp
)
470 vp
->form_header
= NULL
;
471 vp
->verify_header
= NULL
;
473 vp
->rx_header_size
= 0;
477 int build_transport_data(struct vector_private
*vp
)
479 char *transport
= uml_vector_fetch_arg(vp
->parsed
, "transport");
481 if (strncmp(transport
, TRANS_GRE
, TRANS_GRE_LEN
) == 0)
482 return build_gre_transport_data(vp
);
483 if (strncmp(transport
, TRANS_L2TPV3
, TRANS_L2TPV3_LEN
) == 0)
484 return build_l2tpv3_transport_data(vp
);
485 if (strncmp(transport
, TRANS_RAW
, TRANS_RAW_LEN
) == 0)
486 return build_raw_transport_data(vp
);
487 if (strncmp(transport
, TRANS_TAP
, TRANS_TAP_LEN
) == 0)
488 return build_tap_transport_data(vp
);
489 if (strncmp(transport
, TRANS_HYBRID
, TRANS_HYBRID_LEN
) == 0)
490 return build_hybrid_transport_data(vp
);
491 if (strncmp(transport
, TRANS_BESS
, TRANS_BESS_LEN
) == 0)
492 return build_bess_transport_data(vp
);