1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2017 Nicira, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/skbuff.h>
11 #include <linux/openvswitch.h>
12 #include <linux/sctp.h>
13 #include <linux/tcp.h>
14 #include <linux/udp.h>
15 #include <linux/in6.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
23 #include <net/ip6_fib.h>
24 #include <net/checksum.h>
25 #include <net/dsfield.h>
28 #if IS_ENABLED(CONFIG_PSAMPLE)
29 #include <net/psample.h>
32 #include <net/sctp/checksum.h>
37 #include "conntrack.h"
39 #include "flow_netlink.h"
40 #include "openvswitch_trace.h"
42 struct deferred_action
{
44 const struct nlattr
*actions
;
47 /* Store pkt_key clone when creating deferred action. */
48 struct sw_flow_key pkt_key
;
51 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
52 struct ovs_frag_data
{
56 __be16 inner_protocol
;
57 u16 network_offset
; /* valid only for MPLS */
62 u8 l2_data
[MAX_L2_LEN
];
65 static DEFINE_PER_CPU(struct ovs_frag_data
, ovs_frag_data_storage
);
67 #define DEFERRED_ACTION_FIFO_SIZE 10
68 #define OVS_RECURSION_LIMIT 5
69 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
73 /* Deferred action fifo queue storage. */
74 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
77 struct action_flow_keys
{
78 struct sw_flow_key key
[OVS_DEFERRED_ACTION_THRESHOLD
];
81 static struct action_fifo __percpu
*action_fifos
;
82 static struct action_flow_keys __percpu
*flow_keys
;
83 static DEFINE_PER_CPU(int, exec_actions_level
);
85 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
86 * space. Return NULL if out of key spaces.
88 static struct sw_flow_key
*clone_key(const struct sw_flow_key
*key_
)
90 struct action_flow_keys
*keys
= this_cpu_ptr(flow_keys
);
91 int level
= this_cpu_read(exec_actions_level
);
92 struct sw_flow_key
*key
= NULL
;
94 if (level
<= OVS_DEFERRED_ACTION_THRESHOLD
) {
95 key
= &keys
->key
[level
- 1];
102 static void action_fifo_init(struct action_fifo
*fifo
)
108 static bool action_fifo_is_empty(const struct action_fifo
*fifo
)
110 return (fifo
->head
== fifo
->tail
);
113 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
115 if (action_fifo_is_empty(fifo
))
118 return &fifo
->fifo
[fifo
->tail
++];
121 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
123 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
126 return &fifo
->fifo
[fifo
->head
++];
129 /* Return true if fifo is not full */
130 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
131 const struct sw_flow_key
*key
,
132 const struct nlattr
*actions
,
133 const int actions_len
)
135 struct action_fifo
*fifo
;
136 struct deferred_action
*da
;
138 fifo
= this_cpu_ptr(action_fifos
);
139 da
= action_fifo_put(fifo
);
142 da
->actions
= actions
;
143 da
->actions_len
= actions_len
;
150 static void invalidate_flow_key(struct sw_flow_key
*key
)
152 key
->mac_proto
|= SW_FLOW_KEY_INVALID
;
155 static bool is_flow_key_valid(const struct sw_flow_key
*key
)
157 return !(key
->mac_proto
& SW_FLOW_KEY_INVALID
);
160 static int clone_execute(struct datapath
*dp
, struct sk_buff
*skb
,
161 struct sw_flow_key
*key
,
163 const struct nlattr
*actions
, int len
,
164 bool last
, bool clone_flow_key
);
166 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
167 struct sw_flow_key
*key
,
168 const struct nlattr
*attr
, int len
);
170 static int push_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
171 __be32 mpls_lse
, __be16 mpls_ethertype
, __u16 mac_len
)
175 err
= skb_mpls_push(skb
, mpls_lse
, mpls_ethertype
, mac_len
, !!mac_len
);
180 key
->mac_proto
= MAC_PROTO_NONE
;
182 invalidate_flow_key(key
);
186 static int pop_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
187 const __be16 ethertype
)
191 err
= skb_mpls_pop(skb
, ethertype
, skb
->mac_len
,
192 ovs_key_mac_proto(key
) == MAC_PROTO_ETHERNET
);
196 if (ethertype
== htons(ETH_P_TEB
))
197 key
->mac_proto
= MAC_PROTO_ETHERNET
;
199 invalidate_flow_key(key
);
203 static int set_mpls(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
204 const __be32
*mpls_lse
, const __be32
*mask
)
206 struct mpls_shim_hdr
*stack
;
210 if (!pskb_may_pull(skb
, skb_network_offset(skb
) + MPLS_HLEN
))
213 stack
= mpls_hdr(skb
);
214 lse
= OVS_MASKED(stack
->label_stack_entry
, *mpls_lse
, *mask
);
215 err
= skb_mpls_update_lse(skb
, lse
);
219 flow_key
->mpls
.lse
[0] = lse
;
223 static int pop_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
227 err
= skb_vlan_pop(skb
);
228 if (skb_vlan_tag_present(skb
)) {
229 invalidate_flow_key(key
);
231 key
->eth
.vlan
.tci
= 0;
232 key
->eth
.vlan
.tpid
= 0;
237 static int push_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
,
238 const struct ovs_action_push_vlan
*vlan
)
242 if (skb_vlan_tag_present(skb
)) {
243 invalidate_flow_key(key
);
245 key
->eth
.vlan
.tci
= vlan
->vlan_tci
;
246 key
->eth
.vlan
.tpid
= vlan
->vlan_tpid
;
248 err
= skb_vlan_push(skb
, vlan
->vlan_tpid
,
249 ntohs(vlan
->vlan_tci
) & ~VLAN_CFI_MASK
);
250 skb_reset_mac_len(skb
);
254 /* 'src' is already properly masked. */
255 static void ether_addr_copy_masked(u8
*dst_
, const u8
*src_
, const u8
*mask_
)
257 u16
*dst
= (u16
*)dst_
;
258 const u16
*src
= (const u16
*)src_
;
259 const u16
*mask
= (const u16
*)mask_
;
261 OVS_SET_MASKED(dst
[0], src
[0], mask
[0]);
262 OVS_SET_MASKED(dst
[1], src
[1], mask
[1]);
263 OVS_SET_MASKED(dst
[2], src
[2], mask
[2]);
266 static int set_eth_addr(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
267 const struct ovs_key_ethernet
*key
,
268 const struct ovs_key_ethernet
*mask
)
272 err
= skb_ensure_writable(skb
, ETH_HLEN
);
276 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
278 ether_addr_copy_masked(eth_hdr(skb
)->h_source
, key
->eth_src
,
280 ether_addr_copy_masked(eth_hdr(skb
)->h_dest
, key
->eth_dst
,
283 skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
285 ether_addr_copy(flow_key
->eth
.src
, eth_hdr(skb
)->h_source
);
286 ether_addr_copy(flow_key
->eth
.dst
, eth_hdr(skb
)->h_dest
);
290 /* pop_eth does not support VLAN packets as this action is never called
293 static int pop_eth(struct sk_buff
*skb
, struct sw_flow_key
*key
)
297 err
= skb_eth_pop(skb
);
301 /* safe right before invalidate_flow_key */
302 key
->mac_proto
= MAC_PROTO_NONE
;
303 invalidate_flow_key(key
);
307 static int push_eth(struct sk_buff
*skb
, struct sw_flow_key
*key
,
308 const struct ovs_action_push_eth
*ethh
)
312 err
= skb_eth_push(skb
, ethh
->addresses
.eth_dst
,
313 ethh
->addresses
.eth_src
);
317 /* safe right before invalidate_flow_key */
318 key
->mac_proto
= MAC_PROTO_ETHERNET
;
319 invalidate_flow_key(key
);
323 static noinline_for_stack
int push_nsh(struct sk_buff
*skb
,
324 struct sw_flow_key
*key
,
325 const struct nlattr
*a
)
327 u8 buffer
[NSH_HDR_MAX_LEN
];
328 struct nshhdr
*nh
= (struct nshhdr
*)buffer
;
331 err
= nsh_hdr_from_nlattr(a
, nh
, NSH_HDR_MAX_LEN
);
335 err
= nsh_push(skb
, nh
);
339 /* safe right before invalidate_flow_key */
340 key
->mac_proto
= MAC_PROTO_NONE
;
341 invalidate_flow_key(key
);
345 static int pop_nsh(struct sk_buff
*skb
, struct sw_flow_key
*key
)
353 /* safe right before invalidate_flow_key */
354 if (skb
->protocol
== htons(ETH_P_TEB
))
355 key
->mac_proto
= MAC_PROTO_ETHERNET
;
357 key
->mac_proto
= MAC_PROTO_NONE
;
358 invalidate_flow_key(key
);
362 static void update_ip_l4_checksum(struct sk_buff
*skb
, struct iphdr
*nh
,
363 __be32 addr
, __be32 new_addr
)
365 int transport_len
= skb
->len
- skb_transport_offset(skb
);
367 if (nh
->frag_off
& htons(IP_OFFSET
))
370 if (nh
->protocol
== IPPROTO_TCP
) {
371 if (likely(transport_len
>= sizeof(struct tcphdr
)))
372 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
373 addr
, new_addr
, true);
374 } else if (nh
->protocol
== IPPROTO_UDP
) {
375 if (likely(transport_len
>= sizeof(struct udphdr
))) {
376 struct udphdr
*uh
= udp_hdr(skb
);
378 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
379 inet_proto_csum_replace4(&uh
->check
, skb
,
380 addr
, new_addr
, true);
382 uh
->check
= CSUM_MANGLED_0
;
388 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
389 __be32
*addr
, __be32 new_addr
)
391 update_ip_l4_checksum(skb
, nh
, *addr
, new_addr
);
392 csum_replace4(&nh
->check
, *addr
, new_addr
);
394 ovs_ct_clear(skb
, NULL
);
398 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
399 __be32 addr
[4], const __be32 new_addr
[4])
401 int transport_len
= skb
->len
- skb_transport_offset(skb
);
403 if (l4_proto
== NEXTHDR_TCP
) {
404 if (likely(transport_len
>= sizeof(struct tcphdr
)))
405 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
406 addr
, new_addr
, true);
407 } else if (l4_proto
== NEXTHDR_UDP
) {
408 if (likely(transport_len
>= sizeof(struct udphdr
))) {
409 struct udphdr
*uh
= udp_hdr(skb
);
411 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
412 inet_proto_csum_replace16(&uh
->check
, skb
,
413 addr
, new_addr
, true);
415 uh
->check
= CSUM_MANGLED_0
;
418 } else if (l4_proto
== NEXTHDR_ICMP
) {
419 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
420 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
421 skb
, addr
, new_addr
, true);
425 static void mask_ipv6_addr(const __be32 old
[4], const __be32 addr
[4],
426 const __be32 mask
[4], __be32 masked
[4])
428 masked
[0] = OVS_MASKED(old
[0], addr
[0], mask
[0]);
429 masked
[1] = OVS_MASKED(old
[1], addr
[1], mask
[1]);
430 masked
[2] = OVS_MASKED(old
[2], addr
[2], mask
[2]);
431 masked
[3] = OVS_MASKED(old
[3], addr
[3], mask
[3]);
434 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
435 __be32 addr
[4], const __be32 new_addr
[4],
436 bool recalculate_csum
)
438 if (recalculate_csum
)
439 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
442 ovs_ct_clear(skb
, NULL
);
443 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
446 static void set_ipv6_dsfield(struct sk_buff
*skb
, struct ipv6hdr
*nh
, u8 ipv6_tclass
, u8 mask
)
448 u8 old_ipv6_tclass
= ipv6_get_dsfield(nh
);
450 ipv6_tclass
= OVS_MASKED(old_ipv6_tclass
, ipv6_tclass
, mask
);
452 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
453 csum_replace(&skb
->csum
, (__force __wsum
)(old_ipv6_tclass
<< 12),
454 (__force __wsum
)(ipv6_tclass
<< 12));
456 ipv6_change_dsfield(nh
, ~mask
, ipv6_tclass
);
459 static void set_ipv6_fl(struct sk_buff
*skb
, struct ipv6hdr
*nh
, u32 fl
, u32 mask
)
463 ofl
= nh
->flow_lbl
[0] << 16 | nh
->flow_lbl
[1] << 8 | nh
->flow_lbl
[2];
464 fl
= OVS_MASKED(ofl
, fl
, mask
);
466 /* Bits 21-24 are always unmasked, so this retains their values. */
467 nh
->flow_lbl
[0] = (u8
)(fl
>> 16);
468 nh
->flow_lbl
[1] = (u8
)(fl
>> 8);
469 nh
->flow_lbl
[2] = (u8
)fl
;
471 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
472 csum_replace(&skb
->csum
, (__force __wsum
)htonl(ofl
), (__force __wsum
)htonl(fl
));
475 static void set_ipv6_ttl(struct sk_buff
*skb
, struct ipv6hdr
*nh
, u8 new_ttl
, u8 mask
)
477 new_ttl
= OVS_MASKED(nh
->hop_limit
, new_ttl
, mask
);
479 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
480 csum_replace(&skb
->csum
, (__force __wsum
)(nh
->hop_limit
<< 8),
481 (__force __wsum
)(new_ttl
<< 8));
482 nh
->hop_limit
= new_ttl
;
485 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
,
488 new_ttl
= OVS_MASKED(nh
->ttl
, new_ttl
, mask
);
490 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
494 static int set_ipv4(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
495 const struct ovs_key_ipv4
*key
,
496 const struct ovs_key_ipv4
*mask
)
502 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
503 sizeof(struct iphdr
));
509 /* Setting an IP addresses is typically only a side effect of
510 * matching on them in the current userspace implementation, so it
511 * makes sense to check if the value actually changed.
513 if (mask
->ipv4_src
) {
514 new_addr
= OVS_MASKED(nh
->saddr
, key
->ipv4_src
, mask
->ipv4_src
);
516 if (unlikely(new_addr
!= nh
->saddr
)) {
517 set_ip_addr(skb
, nh
, &nh
->saddr
, new_addr
);
518 flow_key
->ipv4
.addr
.src
= new_addr
;
521 if (mask
->ipv4_dst
) {
522 new_addr
= OVS_MASKED(nh
->daddr
, key
->ipv4_dst
, mask
->ipv4_dst
);
524 if (unlikely(new_addr
!= nh
->daddr
)) {
525 set_ip_addr(skb
, nh
, &nh
->daddr
, new_addr
);
526 flow_key
->ipv4
.addr
.dst
= new_addr
;
529 if (mask
->ipv4_tos
) {
530 ipv4_change_dsfield(nh
, ~mask
->ipv4_tos
, key
->ipv4_tos
);
531 flow_key
->ip
.tos
= nh
->tos
;
533 if (mask
->ipv4_ttl
) {
534 set_ip_ttl(skb
, nh
, key
->ipv4_ttl
, mask
->ipv4_ttl
);
535 flow_key
->ip
.ttl
= nh
->ttl
;
541 static bool is_ipv6_mask_nonzero(const __be32 addr
[4])
543 return !!(addr
[0] | addr
[1] | addr
[2] | addr
[3]);
546 static int set_ipv6(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
547 const struct ovs_key_ipv6
*key
,
548 const struct ovs_key_ipv6
*mask
)
553 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
554 sizeof(struct ipv6hdr
));
560 /* Setting an IP addresses is typically only a side effect of
561 * matching on them in the current userspace implementation, so it
562 * makes sense to check if the value actually changed.
564 if (is_ipv6_mask_nonzero(mask
->ipv6_src
)) {
565 __be32
*saddr
= (__be32
*)&nh
->saddr
;
568 mask_ipv6_addr(saddr
, key
->ipv6_src
, mask
->ipv6_src
, masked
);
570 if (unlikely(memcmp(saddr
, masked
, sizeof(masked
)))) {
571 set_ipv6_addr(skb
, flow_key
->ip
.proto
, saddr
, masked
,
573 memcpy(&flow_key
->ipv6
.addr
.src
, masked
,
574 sizeof(flow_key
->ipv6
.addr
.src
));
577 if (is_ipv6_mask_nonzero(mask
->ipv6_dst
)) {
578 unsigned int offset
= 0;
579 int flags
= IP6_FH_F_SKIP_RH
;
580 bool recalc_csum
= true;
581 __be32
*daddr
= (__be32
*)&nh
->daddr
;
584 mask_ipv6_addr(daddr
, key
->ipv6_dst
, mask
->ipv6_dst
, masked
);
586 if (unlikely(memcmp(daddr
, masked
, sizeof(masked
)))) {
587 if (ipv6_ext_hdr(nh
->nexthdr
))
588 recalc_csum
= (ipv6_find_hdr(skb
, &offset
,
593 set_ipv6_addr(skb
, flow_key
->ip
.proto
, daddr
, masked
,
595 memcpy(&flow_key
->ipv6
.addr
.dst
, masked
,
596 sizeof(flow_key
->ipv6
.addr
.dst
));
599 if (mask
->ipv6_tclass
) {
600 set_ipv6_dsfield(skb
, nh
, key
->ipv6_tclass
, mask
->ipv6_tclass
);
601 flow_key
->ip
.tos
= ipv6_get_dsfield(nh
);
603 if (mask
->ipv6_label
) {
604 set_ipv6_fl(skb
, nh
, ntohl(key
->ipv6_label
),
605 ntohl(mask
->ipv6_label
));
606 flow_key
->ipv6
.label
=
607 *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
609 if (mask
->ipv6_hlimit
) {
610 set_ipv6_ttl(skb
, nh
, key
->ipv6_hlimit
, mask
->ipv6_hlimit
);
611 flow_key
->ip
.ttl
= nh
->hop_limit
;
616 static int set_nsh(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
617 const struct nlattr
*a
)
626 struct ovs_key_nsh key
;
627 struct ovs_key_nsh mask
;
629 err
= nsh_key_from_nlattr(a
, &key
, &mask
);
633 /* Make sure the NSH base header is there */
634 if (!pskb_may_pull(skb
, skb_network_offset(skb
) + NSH_BASE_HDR_LEN
))
638 length
= nsh_hdr_len(nh
);
640 /* Make sure the whole NSH header is there */
641 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
647 skb_postpull_rcsum(skb
, nh
, length
);
648 flags
= nsh_get_flags(nh
);
649 flags
= OVS_MASKED(flags
, key
.base
.flags
, mask
.base
.flags
);
650 flow_key
->nsh
.base
.flags
= flags
;
651 ttl
= nsh_get_ttl(nh
);
652 ttl
= OVS_MASKED(ttl
, key
.base
.ttl
, mask
.base
.ttl
);
653 flow_key
->nsh
.base
.ttl
= ttl
;
654 nsh_set_flags_and_ttl(nh
, flags
, ttl
);
655 nh
->path_hdr
= OVS_MASKED(nh
->path_hdr
, key
.base
.path_hdr
,
657 flow_key
->nsh
.base
.path_hdr
= nh
->path_hdr
;
658 switch (nh
->mdtype
) {
660 for (i
= 0; i
< NSH_MD1_CONTEXT_SIZE
; i
++) {
662 OVS_MASKED(nh
->md1
.context
[i
], key
.context
[i
],
665 memcpy(flow_key
->nsh
.context
, nh
->md1
.context
,
666 sizeof(nh
->md1
.context
));
669 memset(flow_key
->nsh
.context
, 0,
670 sizeof(flow_key
->nsh
.context
));
675 skb_postpush_rcsum(skb
, nh
, length
);
679 /* Must follow skb_ensure_writable() since that can move the skb data. */
680 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
681 __be16 new_port
, __sum16
*check
)
683 ovs_ct_clear(skb
, NULL
);
684 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, false);
688 static int set_udp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
689 const struct ovs_key_udp
*key
,
690 const struct ovs_key_udp
*mask
)
696 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
697 sizeof(struct udphdr
));
702 /* Either of the masks is non-zero, so do not bother checking them. */
703 src
= OVS_MASKED(uh
->source
, key
->udp_src
, mask
->udp_src
);
704 dst
= OVS_MASKED(uh
->dest
, key
->udp_dst
, mask
->udp_dst
);
706 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
707 if (likely(src
!= uh
->source
)) {
708 set_tp_port(skb
, &uh
->source
, src
, &uh
->check
);
709 flow_key
->tp
.src
= src
;
711 if (likely(dst
!= uh
->dest
)) {
712 set_tp_port(skb
, &uh
->dest
, dst
, &uh
->check
);
713 flow_key
->tp
.dst
= dst
;
716 if (unlikely(!uh
->check
))
717 uh
->check
= CSUM_MANGLED_0
;
721 flow_key
->tp
.src
= src
;
722 flow_key
->tp
.dst
= dst
;
723 ovs_ct_clear(skb
, NULL
);
731 static int set_tcp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
732 const struct ovs_key_tcp
*key
,
733 const struct ovs_key_tcp
*mask
)
739 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
740 sizeof(struct tcphdr
));
745 src
= OVS_MASKED(th
->source
, key
->tcp_src
, mask
->tcp_src
);
746 if (likely(src
!= th
->source
)) {
747 set_tp_port(skb
, &th
->source
, src
, &th
->check
);
748 flow_key
->tp
.src
= src
;
750 dst
= OVS_MASKED(th
->dest
, key
->tcp_dst
, mask
->tcp_dst
);
751 if (likely(dst
!= th
->dest
)) {
752 set_tp_port(skb
, &th
->dest
, dst
, &th
->check
);
753 flow_key
->tp
.dst
= dst
;
760 static int set_sctp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
761 const struct ovs_key_sctp
*key
,
762 const struct ovs_key_sctp
*mask
)
764 unsigned int sctphoff
= skb_transport_offset(skb
);
766 __le32 old_correct_csum
, new_csum
, old_csum
;
769 err
= skb_ensure_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
774 old_csum
= sh
->checksum
;
775 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
777 sh
->source
= OVS_MASKED(sh
->source
, key
->sctp_src
, mask
->sctp_src
);
778 sh
->dest
= OVS_MASKED(sh
->dest
, key
->sctp_dst
, mask
->sctp_dst
);
780 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
782 /* Carry any checksum errors through. */
783 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
786 ovs_ct_clear(skb
, NULL
);
788 flow_key
->tp
.src
= sh
->source
;
789 flow_key
->tp
.dst
= sh
->dest
;
794 static int ovs_vport_output(struct net
*net
, struct sock
*sk
,
797 struct ovs_frag_data
*data
= this_cpu_ptr(&ovs_frag_data_storage
);
798 struct vport
*vport
= data
->vport
;
800 if (skb_cow_head(skb
, data
->l2_len
) < 0) {
801 kfree_skb_reason(skb
, SKB_DROP_REASON_NOMEM
);
805 __skb_dst_copy(skb
, data
->dst
);
806 *OVS_CB(skb
) = data
->cb
;
807 skb
->inner_protocol
= data
->inner_protocol
;
808 if (data
->vlan_tci
& VLAN_CFI_MASK
)
809 __vlan_hwaccel_put_tag(skb
, data
->vlan_proto
, data
->vlan_tci
& ~VLAN_CFI_MASK
);
811 __vlan_hwaccel_clear_tag(skb
);
813 /* Reconstruct the MAC header. */
814 skb_push(skb
, data
->l2_len
);
815 memcpy(skb
->data
, &data
->l2_data
, data
->l2_len
);
816 skb_postpush_rcsum(skb
, skb
->data
, data
->l2_len
);
817 skb_reset_mac_header(skb
);
819 if (eth_p_mpls(skb
->protocol
)) {
820 skb
->inner_network_header
= skb
->network_header
;
821 skb_set_network_header(skb
, data
->network_offset
);
822 skb_reset_mac_len(skb
);
825 ovs_vport_send(vport
, skb
, data
->mac_proto
);
830 ovs_dst_get_mtu(const struct dst_entry
*dst
)
832 return dst
->dev
->mtu
;
835 static struct dst_ops ovs_dst_ops
= {
837 .mtu
= ovs_dst_get_mtu
,
840 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
841 * ovs_vport_output(), which is called once per fragmented packet.
843 static void prepare_frag(struct vport
*vport
, struct sk_buff
*skb
,
844 u16 orig_network_offset
, u8 mac_proto
)
846 unsigned int hlen
= skb_network_offset(skb
);
847 struct ovs_frag_data
*data
;
849 data
= this_cpu_ptr(&ovs_frag_data_storage
);
850 data
->dst
= skb
->_skb_refdst
;
852 data
->cb
= *OVS_CB(skb
);
853 data
->inner_protocol
= skb
->inner_protocol
;
854 data
->network_offset
= orig_network_offset
;
855 if (skb_vlan_tag_present(skb
))
856 data
->vlan_tci
= skb_vlan_tag_get(skb
) | VLAN_CFI_MASK
;
859 data
->vlan_proto
= skb
->vlan_proto
;
860 data
->mac_proto
= mac_proto
;
862 memcpy(&data
->l2_data
, skb
->data
, hlen
);
864 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
868 static void ovs_fragment(struct net
*net
, struct vport
*vport
,
869 struct sk_buff
*skb
, u16 mru
,
870 struct sw_flow_key
*key
)
872 enum ovs_drop_reason reason
;
873 u16 orig_network_offset
= 0;
875 if (eth_p_mpls(skb
->protocol
)) {
876 orig_network_offset
= skb_network_offset(skb
);
877 skb
->network_header
= skb
->inner_network_header
;
880 if (skb_network_offset(skb
) > MAX_L2_LEN
) {
881 OVS_NLERR(1, "L2 header too long to fragment");
882 reason
= OVS_DROP_FRAG_L2_TOO_LONG
;
886 if (key
->eth
.type
== htons(ETH_P_IP
)) {
887 struct rtable ovs_rt
= { 0 };
888 unsigned long orig_dst
;
890 prepare_frag(vport
, skb
, orig_network_offset
,
891 ovs_key_mac_proto(key
));
892 dst_init(&ovs_rt
.dst
, &ovs_dst_ops
, NULL
,
893 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
894 ovs_rt
.dst
.dev
= vport
->dev
;
896 orig_dst
= skb
->_skb_refdst
;
897 skb_dst_set_noref(skb
, &ovs_rt
.dst
);
898 IPCB(skb
)->frag_max_size
= mru
;
900 ip_do_fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
901 refdst_drop(orig_dst
);
902 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
903 unsigned long orig_dst
;
904 struct rt6_info ovs_rt
;
906 prepare_frag(vport
, skb
, orig_network_offset
,
907 ovs_key_mac_proto(key
));
908 memset(&ovs_rt
, 0, sizeof(ovs_rt
));
909 dst_init(&ovs_rt
.dst
, &ovs_dst_ops
, NULL
,
910 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
911 ovs_rt
.dst
.dev
= vport
->dev
;
913 orig_dst
= skb
->_skb_refdst
;
914 skb_dst_set_noref(skb
, &ovs_rt
.dst
);
915 IP6CB(skb
)->frag_max_size
= mru
;
917 ipv6_stub
->ipv6_fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
918 refdst_drop(orig_dst
);
920 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
921 ovs_vport_name(vport
), ntohs(key
->eth
.type
), mru
,
923 reason
= OVS_DROP_FRAG_INVALID_PROTO
;
929 ovs_kfree_skb_reason(skb
, reason
);
932 static void do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
,
933 struct sw_flow_key
*key
)
935 struct vport
*vport
= ovs_vport_rcu(dp
, out_port
);
937 if (likely(vport
&& netif_carrier_ok(vport
->dev
))) {
938 u16 mru
= OVS_CB(skb
)->mru
;
939 u32 cutlen
= OVS_CB(skb
)->cutlen
;
941 if (unlikely(cutlen
> 0)) {
942 if (skb
->len
- cutlen
> ovs_mac_header_len(key
))
943 pskb_trim(skb
, skb
->len
- cutlen
);
945 pskb_trim(skb
, ovs_mac_header_len(key
));
948 /* Need to set the pkt_type to involve the routing layer. The
949 * packet movement through the OVS datapath doesn't generally
950 * use routing, but this is needed for tunnel cases.
952 skb
->pkt_type
= PACKET_OUTGOING
;
955 (skb
->len
<= mru
+ vport
->dev
->hard_header_len
))) {
956 ovs_vport_send(vport
, skb
, ovs_key_mac_proto(key
));
957 } else if (mru
<= vport
->dev
->mtu
) {
958 struct net
*net
= read_pnet(&dp
->net
);
960 ovs_fragment(net
, vport
, skb
, mru
, key
);
962 kfree_skb_reason(skb
, SKB_DROP_REASON_PKT_TOO_BIG
);
965 kfree_skb_reason(skb
, SKB_DROP_REASON_DEV_READY
);
969 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
970 struct sw_flow_key
*key
, const struct nlattr
*attr
,
971 const struct nlattr
*actions
, int actions_len
,
974 struct dp_upcall_info upcall
;
975 const struct nlattr
*a
;
978 memset(&upcall
, 0, sizeof(upcall
));
979 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
980 upcall
.mru
= OVS_CB(skb
)->mru
;
982 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
983 a
= nla_next(a
, &rem
)) {
984 switch (nla_type(a
)) {
985 case OVS_USERSPACE_ATTR_USERDATA
:
989 case OVS_USERSPACE_ATTR_PID
:
990 if (dp
->user_features
&
991 OVS_DP_F_DISPATCH_UPCALL_PER_CPU
)
993 ovs_dp_get_upcall_portid(dp
,
996 upcall
.portid
= nla_get_u32(a
);
999 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
: {
1000 /* Get out tunnel info. */
1001 struct vport
*vport
;
1003 vport
= ovs_vport_rcu(dp
, nla_get_u32(a
));
1007 err
= dev_fill_metadata_dst(vport
->dev
, skb
);
1009 upcall
.egress_tun_info
= skb_tunnel_info(skb
);
1015 case OVS_USERSPACE_ATTR_ACTIONS
: {
1016 /* Include actions. */
1017 upcall
.actions
= actions
;
1018 upcall
.actions_len
= actions_len
;
1022 } /* End of switch. */
1025 return ovs_dp_upcall(dp
, skb
, key
, &upcall
, cutlen
);
1028 static int dec_ttl_exception_handler(struct datapath
*dp
, struct sk_buff
*skb
,
1029 struct sw_flow_key
*key
,
1030 const struct nlattr
*attr
)
1032 /* The first attribute is always 'OVS_DEC_TTL_ATTR_ACTION'. */
1033 struct nlattr
*actions
= nla_data(attr
);
1035 if (nla_len(actions
))
1036 return clone_execute(dp
, skb
, key
, 0, nla_data(actions
),
1037 nla_len(actions
), true, false);
1039 ovs_kfree_skb_reason(skb
, OVS_DROP_IP_TTL
);
1043 /* When 'last' is true, sample() should always consume the 'skb'.
1044 * Otherwise, sample() should keep 'skb' intact regardless what
1045 * actions are executed within sample().
1047 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
1048 struct sw_flow_key
*key
, const struct nlattr
*attr
,
1051 struct nlattr
*actions
;
1052 struct nlattr
*sample_arg
;
1053 int rem
= nla_len(attr
);
1054 const struct sample_arg
*arg
;
1055 u32 init_probability
;
1056 bool clone_flow_key
;
1059 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1060 sample_arg
= nla_data(attr
);
1061 arg
= nla_data(sample_arg
);
1062 actions
= nla_next(sample_arg
, &rem
);
1063 init_probability
= OVS_CB(skb
)->probability
;
1065 if ((arg
->probability
!= U32_MAX
) &&
1066 (!arg
->probability
|| get_random_u32() > arg
->probability
)) {
1068 ovs_kfree_skb_reason(skb
, OVS_DROP_LAST_ACTION
);
1072 OVS_CB(skb
)->probability
= arg
->probability
;
1074 clone_flow_key
= !arg
->exec
;
1075 err
= clone_execute(dp
, skb
, key
, 0, actions
, rem
, last
,
1079 OVS_CB(skb
)->probability
= init_probability
;
1084 /* When 'last' is true, clone() should always consume the 'skb'.
1085 * Otherwise, clone() should keep 'skb' intact regardless what
1086 * actions are executed within clone().
1088 static int clone(struct datapath
*dp
, struct sk_buff
*skb
,
1089 struct sw_flow_key
*key
, const struct nlattr
*attr
,
1092 struct nlattr
*actions
;
1093 struct nlattr
*clone_arg
;
1094 int rem
= nla_len(attr
);
1095 bool dont_clone_flow_key
;
1097 /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
1098 clone_arg
= nla_data(attr
);
1099 dont_clone_flow_key
= nla_get_u32(clone_arg
);
1100 actions
= nla_next(clone_arg
, &rem
);
1102 return clone_execute(dp
, skb
, key
, 0, actions
, rem
, last
,
1103 !dont_clone_flow_key
);
1106 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
1107 const struct nlattr
*attr
)
1109 struct ovs_action_hash
*hash_act
= nla_data(attr
);
1112 if (hash_act
->hash_alg
== OVS_HASH_ALG_L4
) {
1113 /* OVS_HASH_ALG_L4 hasing type. */
1114 hash
= skb_get_hash(skb
);
1115 } else if (hash_act
->hash_alg
== OVS_HASH_ALG_SYM_L4
) {
1116 /* OVS_HASH_ALG_SYM_L4 hashing type. NOTE: this doesn't
1117 * extend past an encapsulated header.
1119 hash
= __skb_get_hash_symmetric(skb
);
1122 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
1126 key
->ovs_flow_hash
= hash
;
1129 static int execute_set_action(struct sk_buff
*skb
,
1130 struct sw_flow_key
*flow_key
,
1131 const struct nlattr
*a
)
1133 /* Only tunnel set execution is supported without a mask. */
1134 if (nla_type(a
) == OVS_KEY_ATTR_TUNNEL_INFO
) {
1135 struct ovs_tunnel_info
*tun
= nla_data(a
);
1138 dst_hold((struct dst_entry
*)tun
->tun_dst
);
1139 skb_dst_set(skb
, (struct dst_entry
*)tun
->tun_dst
);
1146 /* Mask is at the midpoint of the data. */
1147 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1149 static int execute_masked_set_action(struct sk_buff
*skb
,
1150 struct sw_flow_key
*flow_key
,
1151 const struct nlattr
*a
)
1155 switch (nla_type(a
)) {
1156 case OVS_KEY_ATTR_PRIORITY
:
1157 OVS_SET_MASKED(skb
->priority
, nla_get_u32(a
),
1158 *get_mask(a
, u32
*));
1159 flow_key
->phy
.priority
= skb
->priority
;
1162 case OVS_KEY_ATTR_SKB_MARK
:
1163 OVS_SET_MASKED(skb
->mark
, nla_get_u32(a
), *get_mask(a
, u32
*));
1164 flow_key
->phy
.skb_mark
= skb
->mark
;
1167 case OVS_KEY_ATTR_TUNNEL_INFO
:
1168 /* Masked data not supported for tunnel. */
1172 case OVS_KEY_ATTR_ETHERNET
:
1173 err
= set_eth_addr(skb
, flow_key
, nla_data(a
),
1174 get_mask(a
, struct ovs_key_ethernet
*));
1177 case OVS_KEY_ATTR_NSH
:
1178 err
= set_nsh(skb
, flow_key
, a
);
1181 case OVS_KEY_ATTR_IPV4
:
1182 err
= set_ipv4(skb
, flow_key
, nla_data(a
),
1183 get_mask(a
, struct ovs_key_ipv4
*));
1186 case OVS_KEY_ATTR_IPV6
:
1187 err
= set_ipv6(skb
, flow_key
, nla_data(a
),
1188 get_mask(a
, struct ovs_key_ipv6
*));
1191 case OVS_KEY_ATTR_TCP
:
1192 err
= set_tcp(skb
, flow_key
, nla_data(a
),
1193 get_mask(a
, struct ovs_key_tcp
*));
1196 case OVS_KEY_ATTR_UDP
:
1197 err
= set_udp(skb
, flow_key
, nla_data(a
),
1198 get_mask(a
, struct ovs_key_udp
*));
1201 case OVS_KEY_ATTR_SCTP
:
1202 err
= set_sctp(skb
, flow_key
, nla_data(a
),
1203 get_mask(a
, struct ovs_key_sctp
*));
1206 case OVS_KEY_ATTR_MPLS
:
1207 err
= set_mpls(skb
, flow_key
, nla_data(a
), get_mask(a
,
1211 case OVS_KEY_ATTR_CT_STATE
:
1212 case OVS_KEY_ATTR_CT_ZONE
:
1213 case OVS_KEY_ATTR_CT_MARK
:
1214 case OVS_KEY_ATTR_CT_LABELS
:
1215 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
:
1216 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
:
1224 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
1225 struct sw_flow_key
*key
,
1226 const struct nlattr
*a
, bool last
)
1230 if (!is_flow_key_valid(key
)) {
1233 err
= ovs_flow_key_update(skb
, key
);
1237 BUG_ON(!is_flow_key_valid(key
));
1239 recirc_id
= nla_get_u32(a
);
1240 return clone_execute(dp
, skb
, key
, recirc_id
, NULL
, 0, last
, true);
1243 static int execute_check_pkt_len(struct datapath
*dp
, struct sk_buff
*skb
,
1244 struct sw_flow_key
*key
,
1245 const struct nlattr
*attr
, bool last
)
1247 struct ovs_skb_cb
*ovs_cb
= OVS_CB(skb
);
1248 const struct nlattr
*actions
, *cpl_arg
;
1249 int len
, max_len
, rem
= nla_len(attr
);
1250 const struct check_pkt_len_arg
*arg
;
1251 bool clone_flow_key
;
1253 /* The first netlink attribute in 'attr' is always
1254 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1256 cpl_arg
= nla_data(attr
);
1257 arg
= nla_data(cpl_arg
);
1259 len
= ovs_cb
->mru
? ovs_cb
->mru
+ skb
->mac_len
: skb
->len
;
1260 max_len
= arg
->pkt_len
;
1262 if ((skb_is_gso(skb
) && skb_gso_validate_mac_len(skb
, max_len
)) ||
1264 /* Second netlink attribute in 'attr' is always
1265 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1267 actions
= nla_next(cpl_arg
, &rem
);
1268 clone_flow_key
= !arg
->exec_for_lesser_equal
;
1270 /* Third netlink attribute in 'attr' is always
1271 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1273 actions
= nla_next(cpl_arg
, &rem
);
1274 actions
= nla_next(actions
, &rem
);
1275 clone_flow_key
= !arg
->exec_for_greater
;
1278 return clone_execute(dp
, skb
, key
, 0, nla_data(actions
),
1279 nla_len(actions
), last
, clone_flow_key
);
1282 static int execute_dec_ttl(struct sk_buff
*skb
, struct sw_flow_key
*key
)
1286 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1289 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
1296 if (nh
->hop_limit
<= 1)
1297 return -EHOSTUNREACH
;
1299 key
->ip
.ttl
= --nh
->hop_limit
;
1300 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
1304 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
1311 return -EHOSTUNREACH
;
1313 old_ttl
= nh
->ttl
--;
1314 csum_replace2(&nh
->check
, htons(old_ttl
<< 8),
1315 htons(nh
->ttl
<< 8));
1316 key
->ip
.ttl
= nh
->ttl
;
1321 #if IS_ENABLED(CONFIG_PSAMPLE)
1322 static void execute_psample(struct datapath
*dp
, struct sk_buff
*skb
,
1323 const struct nlattr
*attr
)
1325 struct psample_group psample_group
= {};
1326 struct psample_metadata md
= {};
1327 const struct nlattr
*a
;
1331 nla_for_each_attr(a
, nla_data(attr
), nla_len(attr
), rem
) {
1332 switch (nla_type(a
)) {
1333 case OVS_PSAMPLE_ATTR_GROUP
:
1334 psample_group
.group_num
= nla_get_u32(a
);
1337 case OVS_PSAMPLE_ATTR_COOKIE
:
1338 md
.user_cookie
= nla_data(a
);
1339 md
.user_cookie_len
= nla_len(a
);
1344 psample_group
.net
= ovs_dp_get_net(dp
);
1345 md
.in_ifindex
= OVS_CB(skb
)->input_vport
->dev
->ifindex
;
1346 md
.trunc_size
= skb
->len
- OVS_CB(skb
)->cutlen
;
1347 md
.rate_as_probability
= 1;
1349 rate
= OVS_CB(skb
)->probability
? OVS_CB(skb
)->probability
: U32_MAX
;
1351 psample_sample_packet(&psample_group
, skb
, rate
, &md
);
1354 static void execute_psample(struct datapath
*dp
, struct sk_buff
*skb
,
1355 const struct nlattr
*attr
)
1359 /* Execute a list of actions against 'skb'. */
1360 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1361 struct sw_flow_key
*key
,
1362 const struct nlattr
*attr
, int len
)
1364 const struct nlattr
*a
;
1367 for (a
= attr
, rem
= len
; rem
> 0;
1368 a
= nla_next(a
, &rem
)) {
1371 if (trace_ovs_do_execute_action_enabled())
1372 trace_ovs_do_execute_action(dp
, skb
, key
, a
, rem
);
1374 /* Actions that rightfully have to consume the skb should do it
1375 * and return directly.
1377 switch (nla_type(a
)) {
1378 case OVS_ACTION_ATTR_OUTPUT
: {
1379 int port
= nla_get_u32(a
);
1380 struct sk_buff
*clone
;
1382 /* Every output action needs a separate clone
1383 * of 'skb', In case the output action is the
1384 * last action, cloning can be avoided.
1386 if (nla_is_last(a
, rem
)) {
1387 do_output(dp
, skb
, port
, key
);
1388 /* 'skb' has been used for output.
1393 clone
= skb_clone(skb
, GFP_ATOMIC
);
1395 do_output(dp
, clone
, port
, key
);
1396 OVS_CB(skb
)->cutlen
= 0;
1400 case OVS_ACTION_ATTR_TRUNC
: {
1401 struct ovs_action_trunc
*trunc
= nla_data(a
);
1403 if (skb
->len
> trunc
->max_len
)
1404 OVS_CB(skb
)->cutlen
= skb
->len
- trunc
->max_len
;
1408 case OVS_ACTION_ATTR_USERSPACE
:
1409 output_userspace(dp
, skb
, key
, a
, attr
,
1410 len
, OVS_CB(skb
)->cutlen
);
1411 OVS_CB(skb
)->cutlen
= 0;
1412 if (nla_is_last(a
, rem
)) {
1418 case OVS_ACTION_ATTR_HASH
:
1419 execute_hash(skb
, key
, a
);
1422 case OVS_ACTION_ATTR_PUSH_MPLS
: {
1423 struct ovs_action_push_mpls
*mpls
= nla_data(a
);
1425 err
= push_mpls(skb
, key
, mpls
->mpls_lse
,
1426 mpls
->mpls_ethertype
, skb
->mac_len
);
1429 case OVS_ACTION_ATTR_ADD_MPLS
: {
1430 struct ovs_action_add_mpls
*mpls
= nla_data(a
);
1433 if (mpls
->tun_flags
& OVS_MPLS_L3_TUNNEL_FLAG_MASK
)
1434 mac_len
= skb
->mac_len
;
1436 err
= push_mpls(skb
, key
, mpls
->mpls_lse
,
1437 mpls
->mpls_ethertype
, mac_len
);
1440 case OVS_ACTION_ATTR_POP_MPLS
:
1441 err
= pop_mpls(skb
, key
, nla_get_be16(a
));
1444 case OVS_ACTION_ATTR_PUSH_VLAN
:
1445 err
= push_vlan(skb
, key
, nla_data(a
));
1448 case OVS_ACTION_ATTR_POP_VLAN
:
1449 err
= pop_vlan(skb
, key
);
1452 case OVS_ACTION_ATTR_RECIRC
: {
1453 bool last
= nla_is_last(a
, rem
);
1455 err
= execute_recirc(dp
, skb
, key
, a
, last
);
1457 /* If this is the last action, the skb has
1458 * been consumed or freed.
1459 * Return immediately.
1466 case OVS_ACTION_ATTR_SET
:
1467 err
= execute_set_action(skb
, key
, nla_data(a
));
1470 case OVS_ACTION_ATTR_SET_MASKED
:
1471 case OVS_ACTION_ATTR_SET_TO_MASKED
:
1472 err
= execute_masked_set_action(skb
, key
, nla_data(a
));
1475 case OVS_ACTION_ATTR_SAMPLE
: {
1476 bool last
= nla_is_last(a
, rem
);
1478 err
= sample(dp
, skb
, key
, a
, last
);
1485 case OVS_ACTION_ATTR_CT
:
1486 if (!is_flow_key_valid(key
)) {
1487 err
= ovs_flow_key_update(skb
, key
);
1492 err
= ovs_ct_execute(ovs_dp_get_net(dp
), skb
, key
,
1495 /* Hide stolen IP fragments from user space. */
1497 return err
== -EINPROGRESS
? 0 : err
;
1500 case OVS_ACTION_ATTR_CT_CLEAR
:
1501 err
= ovs_ct_clear(skb
, key
);
1504 case OVS_ACTION_ATTR_PUSH_ETH
:
1505 err
= push_eth(skb
, key
, nla_data(a
));
1508 case OVS_ACTION_ATTR_POP_ETH
:
1509 err
= pop_eth(skb
, key
);
1512 case OVS_ACTION_ATTR_PUSH_NSH
:
1513 err
= push_nsh(skb
, key
, nla_data(a
));
1516 case OVS_ACTION_ATTR_POP_NSH
:
1517 err
= pop_nsh(skb
, key
);
1520 case OVS_ACTION_ATTR_METER
:
1521 if (ovs_meter_execute(dp
, skb
, key
, nla_get_u32(a
))) {
1522 ovs_kfree_skb_reason(skb
, OVS_DROP_METER
);
1527 case OVS_ACTION_ATTR_CLONE
: {
1528 bool last
= nla_is_last(a
, rem
);
1530 err
= clone(dp
, skb
, key
, a
, last
);
1537 case OVS_ACTION_ATTR_CHECK_PKT_LEN
: {
1538 bool last
= nla_is_last(a
, rem
);
1540 err
= execute_check_pkt_len(dp
, skb
, key
, a
, last
);
1547 case OVS_ACTION_ATTR_DEC_TTL
:
1548 err
= execute_dec_ttl(skb
, key
);
1549 if (err
== -EHOSTUNREACH
)
1550 return dec_ttl_exception_handler(dp
, skb
,
1554 case OVS_ACTION_ATTR_DROP
: {
1555 enum ovs_drop_reason reason
= nla_get_u32(a
)
1556 ? OVS_DROP_EXPLICIT_WITH_ERROR
1557 : OVS_DROP_EXPLICIT
;
1559 ovs_kfree_skb_reason(skb
, reason
);
1563 case OVS_ACTION_ATTR_PSAMPLE
:
1564 execute_psample(dp
, skb
, a
);
1565 OVS_CB(skb
)->cutlen
= 0;
1566 if (nla_is_last(a
, rem
)) {
1573 if (unlikely(err
)) {
1574 ovs_kfree_skb_reason(skb
, OVS_DROP_ACTION_ERROR
);
1579 ovs_kfree_skb_reason(skb
, OVS_DROP_LAST_ACTION
);
1583 /* Execute the actions on the clone of the packet. The effect of the
1584 * execution does not affect the original 'skb' nor the original 'key'.
1586 * The execution may be deferred in case the actions can not be executed
1589 static int clone_execute(struct datapath
*dp
, struct sk_buff
*skb
,
1590 struct sw_flow_key
*key
, u32 recirc_id
,
1591 const struct nlattr
*actions
, int len
,
1592 bool last
, bool clone_flow_key
)
1594 struct deferred_action
*da
;
1595 struct sw_flow_key
*clone
;
1597 skb
= last
? skb
: skb_clone(skb
, GFP_ATOMIC
);
1599 /* Out of memory, skip this action.
1604 /* When clone_flow_key is false, the 'key' will not be change
1605 * by the actions, then the 'key' can be used directly.
1606 * Otherwise, try to clone key from the next recursion level of
1607 * 'flow_keys'. If clone is successful, execute the actions
1608 * without deferring.
1610 clone
= clone_flow_key
? clone_key(key
) : key
;
1614 if (actions
) { /* Sample action */
1616 __this_cpu_inc(exec_actions_level
);
1618 err
= do_execute_actions(dp
, skb
, clone
,
1622 __this_cpu_dec(exec_actions_level
);
1623 } else { /* Recirc action */
1624 clone
->recirc_id
= recirc_id
;
1625 ovs_dp_process_packet(skb
, clone
);
1630 /* Out of 'flow_keys' space. Defer actions */
1631 da
= add_deferred_actions(skb
, key
, actions
, len
);
1633 if (!actions
) { /* Recirc action */
1635 key
->recirc_id
= recirc_id
;
1638 /* Out of per CPU action FIFO space. Drop the 'skb' and
1641 ovs_kfree_skb_reason(skb
, OVS_DROP_DEFERRED_LIMIT
);
1643 if (net_ratelimit()) {
1644 if (actions
) { /* Sample action */
1645 pr_warn("%s: deferred action limit reached, drop sample action\n",
1647 } else { /* Recirc action */
1648 pr_warn("%s: deferred action limit reached, drop recirc action (recirc_id=%#x)\n",
1649 ovs_dp_name(dp
), recirc_id
);
1656 static void process_deferred_actions(struct datapath
*dp
)
1658 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
1660 /* Do not touch the FIFO in case there is no deferred actions. */
1661 if (action_fifo_is_empty(fifo
))
1664 /* Finishing executing all deferred actions. */
1666 struct deferred_action
*da
= action_fifo_get(fifo
);
1667 struct sk_buff
*skb
= da
->skb
;
1668 struct sw_flow_key
*key
= &da
->pkt_key
;
1669 const struct nlattr
*actions
= da
->actions
;
1670 int actions_len
= da
->actions_len
;
1673 do_execute_actions(dp
, skb
, key
, actions
, actions_len
);
1675 ovs_dp_process_packet(skb
, key
);
1676 } while (!action_fifo_is_empty(fifo
));
1678 /* Reset FIFO for the next packet. */
1679 action_fifo_init(fifo
);
1682 /* Execute a list of actions against 'skb'. */
1683 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1684 const struct sw_flow_actions
*acts
,
1685 struct sw_flow_key
*key
)
1689 level
= __this_cpu_inc_return(exec_actions_level
);
1690 if (unlikely(level
> OVS_RECURSION_LIMIT
)) {
1691 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1693 ovs_kfree_skb_reason(skb
, OVS_DROP_RECURSION_LIMIT
);
1698 OVS_CB(skb
)->acts_origlen
= acts
->orig_len
;
1699 err
= do_execute_actions(dp
, skb
, key
,
1700 acts
->actions
, acts
->actions_len
);
1703 process_deferred_actions(dp
);
1706 __this_cpu_dec(exec_actions_level
);
1710 int action_fifos_init(void)
1712 action_fifos
= alloc_percpu(struct action_fifo
);
1716 flow_keys
= alloc_percpu(struct action_flow_keys
);
1718 free_percpu(action_fifos
);
1725 void action_fifos_exit(void)
1727 free_percpu(action_fifos
);
1728 free_percpu(flow_keys
);