1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2017 Nicira, Inc.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/skbuff.h>
11 #include <linux/openvswitch.h>
12 #include <linux/netfilter_ipv6.h>
13 #include <linux/sctp.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <linux/in6.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_vlan.h>
23 #include <net/ip6_fib.h>
24 #include <net/checksum.h>
25 #include <net/dsfield.h>
27 #include <net/sctp/checksum.h>
31 #include "conntrack.h"
33 #include "flow_netlink.h"
35 struct deferred_action
{
37 const struct nlattr
*actions
;
40 /* Store pkt_key clone when creating deferred action. */
41 struct sw_flow_key pkt_key
;
44 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
45 struct ovs_frag_data
{
49 __be16 inner_protocol
;
50 u16 network_offset
; /* valid only for MPLS */
55 u8 l2_data
[MAX_L2_LEN
];
58 static DEFINE_PER_CPU(struct ovs_frag_data
, ovs_frag_data_storage
);
60 #define DEFERRED_ACTION_FIFO_SIZE 10
61 #define OVS_RECURSION_LIMIT 5
62 #define OVS_DEFERRED_ACTION_THRESHOLD (OVS_RECURSION_LIMIT - 2)
66 /* Deferred action fifo queue storage. */
67 struct deferred_action fifo
[DEFERRED_ACTION_FIFO_SIZE
];
70 struct action_flow_keys
{
71 struct sw_flow_key key
[OVS_DEFERRED_ACTION_THRESHOLD
];
74 static struct action_fifo __percpu
*action_fifos
;
75 static struct action_flow_keys __percpu
*flow_keys
;
76 static DEFINE_PER_CPU(int, exec_actions_level
);
78 /* Make a clone of the 'key', using the pre-allocated percpu 'flow_keys'
79 * space. Return NULL if out of key spaces.
81 static struct sw_flow_key
*clone_key(const struct sw_flow_key
*key_
)
83 struct action_flow_keys
*keys
= this_cpu_ptr(flow_keys
);
84 int level
= this_cpu_read(exec_actions_level
);
85 struct sw_flow_key
*key
= NULL
;
87 if (level
<= OVS_DEFERRED_ACTION_THRESHOLD
) {
88 key
= &keys
->key
[level
- 1];
95 static void action_fifo_init(struct action_fifo
*fifo
)
101 static bool action_fifo_is_empty(const struct action_fifo
*fifo
)
103 return (fifo
->head
== fifo
->tail
);
106 static struct deferred_action
*action_fifo_get(struct action_fifo
*fifo
)
108 if (action_fifo_is_empty(fifo
))
111 return &fifo
->fifo
[fifo
->tail
++];
114 static struct deferred_action
*action_fifo_put(struct action_fifo
*fifo
)
116 if (fifo
->head
>= DEFERRED_ACTION_FIFO_SIZE
- 1)
119 return &fifo
->fifo
[fifo
->head
++];
122 /* Return true if fifo is not full */
123 static struct deferred_action
*add_deferred_actions(struct sk_buff
*skb
,
124 const struct sw_flow_key
*key
,
125 const struct nlattr
*actions
,
126 const int actions_len
)
128 struct action_fifo
*fifo
;
129 struct deferred_action
*da
;
131 fifo
= this_cpu_ptr(action_fifos
);
132 da
= action_fifo_put(fifo
);
135 da
->actions
= actions
;
136 da
->actions_len
= actions_len
;
143 static void invalidate_flow_key(struct sw_flow_key
*key
)
145 key
->mac_proto
|= SW_FLOW_KEY_INVALID
;
148 static bool is_flow_key_valid(const struct sw_flow_key
*key
)
150 return !(key
->mac_proto
& SW_FLOW_KEY_INVALID
);
153 static int clone_execute(struct datapath
*dp
, struct sk_buff
*skb
,
154 struct sw_flow_key
*key
,
156 const struct nlattr
*actions
, int len
,
157 bool last
, bool clone_flow_key
);
159 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
160 struct sw_flow_key
*key
,
161 const struct nlattr
*attr
, int len
);
163 static int push_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
164 __be32 mpls_lse
, __be16 mpls_ethertype
, __u16 mac_len
)
168 err
= skb_mpls_push(skb
, mpls_lse
, mpls_ethertype
, mac_len
, !!mac_len
);
173 key
->mac_proto
= MAC_PROTO_NONE
;
175 invalidate_flow_key(key
);
179 static int pop_mpls(struct sk_buff
*skb
, struct sw_flow_key
*key
,
180 const __be16 ethertype
)
184 err
= skb_mpls_pop(skb
, ethertype
, skb
->mac_len
,
185 ovs_key_mac_proto(key
) == MAC_PROTO_ETHERNET
);
189 if (ethertype
== htons(ETH_P_TEB
))
190 key
->mac_proto
= MAC_PROTO_ETHERNET
;
192 invalidate_flow_key(key
);
196 static int set_mpls(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
197 const __be32
*mpls_lse
, const __be32
*mask
)
199 struct mpls_shim_hdr
*stack
;
203 stack
= mpls_hdr(skb
);
204 lse
= OVS_MASKED(stack
->label_stack_entry
, *mpls_lse
, *mask
);
205 err
= skb_mpls_update_lse(skb
, lse
);
209 flow_key
->mpls
.lse
[0] = lse
;
213 static int pop_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
)
217 err
= skb_vlan_pop(skb
);
218 if (skb_vlan_tag_present(skb
)) {
219 invalidate_flow_key(key
);
221 key
->eth
.vlan
.tci
= 0;
222 key
->eth
.vlan
.tpid
= 0;
227 static int push_vlan(struct sk_buff
*skb
, struct sw_flow_key
*key
,
228 const struct ovs_action_push_vlan
*vlan
)
230 if (skb_vlan_tag_present(skb
)) {
231 invalidate_flow_key(key
);
233 key
->eth
.vlan
.tci
= vlan
->vlan_tci
;
234 key
->eth
.vlan
.tpid
= vlan
->vlan_tpid
;
236 return skb_vlan_push(skb
, vlan
->vlan_tpid
,
237 ntohs(vlan
->vlan_tci
) & ~VLAN_CFI_MASK
);
240 /* 'src' is already properly masked. */
241 static void ether_addr_copy_masked(u8
*dst_
, const u8
*src_
, const u8
*mask_
)
243 u16
*dst
= (u16
*)dst_
;
244 const u16
*src
= (const u16
*)src_
;
245 const u16
*mask
= (const u16
*)mask_
;
247 OVS_SET_MASKED(dst
[0], src
[0], mask
[0]);
248 OVS_SET_MASKED(dst
[1], src
[1], mask
[1]);
249 OVS_SET_MASKED(dst
[2], src
[2], mask
[2]);
252 static int set_eth_addr(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
253 const struct ovs_key_ethernet
*key
,
254 const struct ovs_key_ethernet
*mask
)
258 err
= skb_ensure_writable(skb
, ETH_HLEN
);
262 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
264 ether_addr_copy_masked(eth_hdr(skb
)->h_source
, key
->eth_src
,
266 ether_addr_copy_masked(eth_hdr(skb
)->h_dest
, key
->eth_dst
,
269 skb_postpush_rcsum(skb
, eth_hdr(skb
), ETH_ALEN
* 2);
271 ether_addr_copy(flow_key
->eth
.src
, eth_hdr(skb
)->h_source
);
272 ether_addr_copy(flow_key
->eth
.dst
, eth_hdr(skb
)->h_dest
);
276 /* pop_eth does not support VLAN packets as this action is never called
279 static int pop_eth(struct sk_buff
*skb
, struct sw_flow_key
*key
)
281 skb_pull_rcsum(skb
, ETH_HLEN
);
282 skb_reset_mac_header(skb
);
283 skb_reset_mac_len(skb
);
285 /* safe right before invalidate_flow_key */
286 key
->mac_proto
= MAC_PROTO_NONE
;
287 invalidate_flow_key(key
);
291 static int push_eth(struct sk_buff
*skb
, struct sw_flow_key
*key
,
292 const struct ovs_action_push_eth
*ethh
)
296 /* Add the new Ethernet header */
297 if (skb_cow_head(skb
, ETH_HLEN
) < 0)
300 skb_push(skb
, ETH_HLEN
);
301 skb_reset_mac_header(skb
);
302 skb_reset_mac_len(skb
);
305 ether_addr_copy(hdr
->h_source
, ethh
->addresses
.eth_src
);
306 ether_addr_copy(hdr
->h_dest
, ethh
->addresses
.eth_dst
);
307 hdr
->h_proto
= skb
->protocol
;
309 skb_postpush_rcsum(skb
, hdr
, ETH_HLEN
);
311 /* safe right before invalidate_flow_key */
312 key
->mac_proto
= MAC_PROTO_ETHERNET
;
313 invalidate_flow_key(key
);
317 static int push_nsh(struct sk_buff
*skb
, struct sw_flow_key
*key
,
318 const struct nshhdr
*nh
)
322 err
= nsh_push(skb
, nh
);
326 /* safe right before invalidate_flow_key */
327 key
->mac_proto
= MAC_PROTO_NONE
;
328 invalidate_flow_key(key
);
332 static int pop_nsh(struct sk_buff
*skb
, struct sw_flow_key
*key
)
340 /* safe right before invalidate_flow_key */
341 if (skb
->protocol
== htons(ETH_P_TEB
))
342 key
->mac_proto
= MAC_PROTO_ETHERNET
;
344 key
->mac_proto
= MAC_PROTO_NONE
;
345 invalidate_flow_key(key
);
349 static void update_ip_l4_checksum(struct sk_buff
*skb
, struct iphdr
*nh
,
350 __be32 addr
, __be32 new_addr
)
352 int transport_len
= skb
->len
- skb_transport_offset(skb
);
354 if (nh
->frag_off
& htons(IP_OFFSET
))
357 if (nh
->protocol
== IPPROTO_TCP
) {
358 if (likely(transport_len
>= sizeof(struct tcphdr
)))
359 inet_proto_csum_replace4(&tcp_hdr(skb
)->check
, skb
,
360 addr
, new_addr
, true);
361 } else if (nh
->protocol
== IPPROTO_UDP
) {
362 if (likely(transport_len
>= sizeof(struct udphdr
))) {
363 struct udphdr
*uh
= udp_hdr(skb
);
365 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
366 inet_proto_csum_replace4(&uh
->check
, skb
,
367 addr
, new_addr
, true);
369 uh
->check
= CSUM_MANGLED_0
;
375 static void set_ip_addr(struct sk_buff
*skb
, struct iphdr
*nh
,
376 __be32
*addr
, __be32 new_addr
)
378 update_ip_l4_checksum(skb
, nh
, *addr
, new_addr
);
379 csum_replace4(&nh
->check
, *addr
, new_addr
);
384 static void update_ipv6_checksum(struct sk_buff
*skb
, u8 l4_proto
,
385 __be32 addr
[4], const __be32 new_addr
[4])
387 int transport_len
= skb
->len
- skb_transport_offset(skb
);
389 if (l4_proto
== NEXTHDR_TCP
) {
390 if (likely(transport_len
>= sizeof(struct tcphdr
)))
391 inet_proto_csum_replace16(&tcp_hdr(skb
)->check
, skb
,
392 addr
, new_addr
, true);
393 } else if (l4_proto
== NEXTHDR_UDP
) {
394 if (likely(transport_len
>= sizeof(struct udphdr
))) {
395 struct udphdr
*uh
= udp_hdr(skb
);
397 if (uh
->check
|| skb
->ip_summed
== CHECKSUM_PARTIAL
) {
398 inet_proto_csum_replace16(&uh
->check
, skb
,
399 addr
, new_addr
, true);
401 uh
->check
= CSUM_MANGLED_0
;
404 } else if (l4_proto
== NEXTHDR_ICMP
) {
405 if (likely(transport_len
>= sizeof(struct icmp6hdr
)))
406 inet_proto_csum_replace16(&icmp6_hdr(skb
)->icmp6_cksum
,
407 skb
, addr
, new_addr
, true);
411 static void mask_ipv6_addr(const __be32 old
[4], const __be32 addr
[4],
412 const __be32 mask
[4], __be32 masked
[4])
414 masked
[0] = OVS_MASKED(old
[0], addr
[0], mask
[0]);
415 masked
[1] = OVS_MASKED(old
[1], addr
[1], mask
[1]);
416 masked
[2] = OVS_MASKED(old
[2], addr
[2], mask
[2]);
417 masked
[3] = OVS_MASKED(old
[3], addr
[3], mask
[3]);
420 static void set_ipv6_addr(struct sk_buff
*skb
, u8 l4_proto
,
421 __be32 addr
[4], const __be32 new_addr
[4],
422 bool recalculate_csum
)
424 if (recalculate_csum
)
425 update_ipv6_checksum(skb
, l4_proto
, addr
, new_addr
);
428 memcpy(addr
, new_addr
, sizeof(__be32
[4]));
431 static void set_ipv6_fl(struct ipv6hdr
*nh
, u32 fl
, u32 mask
)
433 /* Bits 21-24 are always unmasked, so this retains their values. */
434 OVS_SET_MASKED(nh
->flow_lbl
[0], (u8
)(fl
>> 16), (u8
)(mask
>> 16));
435 OVS_SET_MASKED(nh
->flow_lbl
[1], (u8
)(fl
>> 8), (u8
)(mask
>> 8));
436 OVS_SET_MASKED(nh
->flow_lbl
[2], (u8
)fl
, (u8
)mask
);
439 static void set_ip_ttl(struct sk_buff
*skb
, struct iphdr
*nh
, u8 new_ttl
,
442 new_ttl
= OVS_MASKED(nh
->ttl
, new_ttl
, mask
);
444 csum_replace2(&nh
->check
, htons(nh
->ttl
<< 8), htons(new_ttl
<< 8));
448 static int set_ipv4(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
449 const struct ovs_key_ipv4
*key
,
450 const struct ovs_key_ipv4
*mask
)
456 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
457 sizeof(struct iphdr
));
463 /* Setting an IP addresses is typically only a side effect of
464 * matching on them in the current userspace implementation, so it
465 * makes sense to check if the value actually changed.
467 if (mask
->ipv4_src
) {
468 new_addr
= OVS_MASKED(nh
->saddr
, key
->ipv4_src
, mask
->ipv4_src
);
470 if (unlikely(new_addr
!= nh
->saddr
)) {
471 set_ip_addr(skb
, nh
, &nh
->saddr
, new_addr
);
472 flow_key
->ipv4
.addr
.src
= new_addr
;
475 if (mask
->ipv4_dst
) {
476 new_addr
= OVS_MASKED(nh
->daddr
, key
->ipv4_dst
, mask
->ipv4_dst
);
478 if (unlikely(new_addr
!= nh
->daddr
)) {
479 set_ip_addr(skb
, nh
, &nh
->daddr
, new_addr
);
480 flow_key
->ipv4
.addr
.dst
= new_addr
;
483 if (mask
->ipv4_tos
) {
484 ipv4_change_dsfield(nh
, ~mask
->ipv4_tos
, key
->ipv4_tos
);
485 flow_key
->ip
.tos
= nh
->tos
;
487 if (mask
->ipv4_ttl
) {
488 set_ip_ttl(skb
, nh
, key
->ipv4_ttl
, mask
->ipv4_ttl
);
489 flow_key
->ip
.ttl
= nh
->ttl
;
495 static bool is_ipv6_mask_nonzero(const __be32 addr
[4])
497 return !!(addr
[0] | addr
[1] | addr
[2] | addr
[3]);
500 static int set_ipv6(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
501 const struct ovs_key_ipv6
*key
,
502 const struct ovs_key_ipv6
*mask
)
507 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
508 sizeof(struct ipv6hdr
));
514 /* Setting an IP addresses is typically only a side effect of
515 * matching on them in the current userspace implementation, so it
516 * makes sense to check if the value actually changed.
518 if (is_ipv6_mask_nonzero(mask
->ipv6_src
)) {
519 __be32
*saddr
= (__be32
*)&nh
->saddr
;
522 mask_ipv6_addr(saddr
, key
->ipv6_src
, mask
->ipv6_src
, masked
);
524 if (unlikely(memcmp(saddr
, masked
, sizeof(masked
)))) {
525 set_ipv6_addr(skb
, flow_key
->ip
.proto
, saddr
, masked
,
527 memcpy(&flow_key
->ipv6
.addr
.src
, masked
,
528 sizeof(flow_key
->ipv6
.addr
.src
));
531 if (is_ipv6_mask_nonzero(mask
->ipv6_dst
)) {
532 unsigned int offset
= 0;
533 int flags
= IP6_FH_F_SKIP_RH
;
534 bool recalc_csum
= true;
535 __be32
*daddr
= (__be32
*)&nh
->daddr
;
538 mask_ipv6_addr(daddr
, key
->ipv6_dst
, mask
->ipv6_dst
, masked
);
540 if (unlikely(memcmp(daddr
, masked
, sizeof(masked
)))) {
541 if (ipv6_ext_hdr(nh
->nexthdr
))
542 recalc_csum
= (ipv6_find_hdr(skb
, &offset
,
547 set_ipv6_addr(skb
, flow_key
->ip
.proto
, daddr
, masked
,
549 memcpy(&flow_key
->ipv6
.addr
.dst
, masked
,
550 sizeof(flow_key
->ipv6
.addr
.dst
));
553 if (mask
->ipv6_tclass
) {
554 ipv6_change_dsfield(nh
, ~mask
->ipv6_tclass
, key
->ipv6_tclass
);
555 flow_key
->ip
.tos
= ipv6_get_dsfield(nh
);
557 if (mask
->ipv6_label
) {
558 set_ipv6_fl(nh
, ntohl(key
->ipv6_label
),
559 ntohl(mask
->ipv6_label
));
560 flow_key
->ipv6
.label
=
561 *(__be32
*)nh
& htonl(IPV6_FLOWINFO_FLOWLABEL
);
563 if (mask
->ipv6_hlimit
) {
564 OVS_SET_MASKED(nh
->hop_limit
, key
->ipv6_hlimit
,
566 flow_key
->ip
.ttl
= nh
->hop_limit
;
571 static int set_nsh(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
572 const struct nlattr
*a
)
581 struct ovs_key_nsh key
;
582 struct ovs_key_nsh mask
;
584 err
= nsh_key_from_nlattr(a
, &key
, &mask
);
588 /* Make sure the NSH base header is there */
589 if (!pskb_may_pull(skb
, skb_network_offset(skb
) + NSH_BASE_HDR_LEN
))
593 length
= nsh_hdr_len(nh
);
595 /* Make sure the whole NSH header is there */
596 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
602 skb_postpull_rcsum(skb
, nh
, length
);
603 flags
= nsh_get_flags(nh
);
604 flags
= OVS_MASKED(flags
, key
.base
.flags
, mask
.base
.flags
);
605 flow_key
->nsh
.base
.flags
= flags
;
606 ttl
= nsh_get_ttl(nh
);
607 ttl
= OVS_MASKED(ttl
, key
.base
.ttl
, mask
.base
.ttl
);
608 flow_key
->nsh
.base
.ttl
= ttl
;
609 nsh_set_flags_and_ttl(nh
, flags
, ttl
);
610 nh
->path_hdr
= OVS_MASKED(nh
->path_hdr
, key
.base
.path_hdr
,
612 flow_key
->nsh
.base
.path_hdr
= nh
->path_hdr
;
613 switch (nh
->mdtype
) {
615 for (i
= 0; i
< NSH_MD1_CONTEXT_SIZE
; i
++) {
617 OVS_MASKED(nh
->md1
.context
[i
], key
.context
[i
],
620 memcpy(flow_key
->nsh
.context
, nh
->md1
.context
,
621 sizeof(nh
->md1
.context
));
624 memset(flow_key
->nsh
.context
, 0,
625 sizeof(flow_key
->nsh
.context
));
630 skb_postpush_rcsum(skb
, nh
, length
);
634 /* Must follow skb_ensure_writable() since that can move the skb data. */
635 static void set_tp_port(struct sk_buff
*skb
, __be16
*port
,
636 __be16 new_port
, __sum16
*check
)
638 inet_proto_csum_replace2(check
, skb
, *port
, new_port
, false);
642 static int set_udp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
643 const struct ovs_key_udp
*key
,
644 const struct ovs_key_udp
*mask
)
650 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
651 sizeof(struct udphdr
));
656 /* Either of the masks is non-zero, so do not bother checking them. */
657 src
= OVS_MASKED(uh
->source
, key
->udp_src
, mask
->udp_src
);
658 dst
= OVS_MASKED(uh
->dest
, key
->udp_dst
, mask
->udp_dst
);
660 if (uh
->check
&& skb
->ip_summed
!= CHECKSUM_PARTIAL
) {
661 if (likely(src
!= uh
->source
)) {
662 set_tp_port(skb
, &uh
->source
, src
, &uh
->check
);
663 flow_key
->tp
.src
= src
;
665 if (likely(dst
!= uh
->dest
)) {
666 set_tp_port(skb
, &uh
->dest
, dst
, &uh
->check
);
667 flow_key
->tp
.dst
= dst
;
670 if (unlikely(!uh
->check
))
671 uh
->check
= CSUM_MANGLED_0
;
675 flow_key
->tp
.src
= src
;
676 flow_key
->tp
.dst
= dst
;
684 static int set_tcp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
685 const struct ovs_key_tcp
*key
,
686 const struct ovs_key_tcp
*mask
)
692 err
= skb_ensure_writable(skb
, skb_transport_offset(skb
) +
693 sizeof(struct tcphdr
));
698 src
= OVS_MASKED(th
->source
, key
->tcp_src
, mask
->tcp_src
);
699 if (likely(src
!= th
->source
)) {
700 set_tp_port(skb
, &th
->source
, src
, &th
->check
);
701 flow_key
->tp
.src
= src
;
703 dst
= OVS_MASKED(th
->dest
, key
->tcp_dst
, mask
->tcp_dst
);
704 if (likely(dst
!= th
->dest
)) {
705 set_tp_port(skb
, &th
->dest
, dst
, &th
->check
);
706 flow_key
->tp
.dst
= dst
;
713 static int set_sctp(struct sk_buff
*skb
, struct sw_flow_key
*flow_key
,
714 const struct ovs_key_sctp
*key
,
715 const struct ovs_key_sctp
*mask
)
717 unsigned int sctphoff
= skb_transport_offset(skb
);
719 __le32 old_correct_csum
, new_csum
, old_csum
;
722 err
= skb_ensure_writable(skb
, sctphoff
+ sizeof(struct sctphdr
));
727 old_csum
= sh
->checksum
;
728 old_correct_csum
= sctp_compute_cksum(skb
, sctphoff
);
730 sh
->source
= OVS_MASKED(sh
->source
, key
->sctp_src
, mask
->sctp_src
);
731 sh
->dest
= OVS_MASKED(sh
->dest
, key
->sctp_dst
, mask
->sctp_dst
);
733 new_csum
= sctp_compute_cksum(skb
, sctphoff
);
735 /* Carry any checksum errors through. */
736 sh
->checksum
= old_csum
^ old_correct_csum
^ new_csum
;
739 flow_key
->tp
.src
= sh
->source
;
740 flow_key
->tp
.dst
= sh
->dest
;
745 static int ovs_vport_output(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
747 struct ovs_frag_data
*data
= this_cpu_ptr(&ovs_frag_data_storage
);
748 struct vport
*vport
= data
->vport
;
750 if (skb_cow_head(skb
, data
->l2_len
) < 0) {
755 __skb_dst_copy(skb
, data
->dst
);
756 *OVS_CB(skb
) = data
->cb
;
757 skb
->inner_protocol
= data
->inner_protocol
;
758 if (data
->vlan_tci
& VLAN_CFI_MASK
)
759 __vlan_hwaccel_put_tag(skb
, data
->vlan_proto
, data
->vlan_tci
& ~VLAN_CFI_MASK
);
761 __vlan_hwaccel_clear_tag(skb
);
763 /* Reconstruct the MAC header. */
764 skb_push(skb
, data
->l2_len
);
765 memcpy(skb
->data
, &data
->l2_data
, data
->l2_len
);
766 skb_postpush_rcsum(skb
, skb
->data
, data
->l2_len
);
767 skb_reset_mac_header(skb
);
769 if (eth_p_mpls(skb
->protocol
)) {
770 skb
->inner_network_header
= skb
->network_header
;
771 skb_set_network_header(skb
, data
->network_offset
);
772 skb_reset_mac_len(skb
);
775 ovs_vport_send(vport
, skb
, data
->mac_proto
);
780 ovs_dst_get_mtu(const struct dst_entry
*dst
)
782 return dst
->dev
->mtu
;
785 static struct dst_ops ovs_dst_ops
= {
787 .mtu
= ovs_dst_get_mtu
,
790 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
791 * ovs_vport_output(), which is called once per fragmented packet.
793 static void prepare_frag(struct vport
*vport
, struct sk_buff
*skb
,
794 u16 orig_network_offset
, u8 mac_proto
)
796 unsigned int hlen
= skb_network_offset(skb
);
797 struct ovs_frag_data
*data
;
799 data
= this_cpu_ptr(&ovs_frag_data_storage
);
800 data
->dst
= skb
->_skb_refdst
;
802 data
->cb
= *OVS_CB(skb
);
803 data
->inner_protocol
= skb
->inner_protocol
;
804 data
->network_offset
= orig_network_offset
;
805 if (skb_vlan_tag_present(skb
))
806 data
->vlan_tci
= skb_vlan_tag_get(skb
) | VLAN_CFI_MASK
;
809 data
->vlan_proto
= skb
->vlan_proto
;
810 data
->mac_proto
= mac_proto
;
812 memcpy(&data
->l2_data
, skb
->data
, hlen
);
814 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
818 static void ovs_fragment(struct net
*net
, struct vport
*vport
,
819 struct sk_buff
*skb
, u16 mru
,
820 struct sw_flow_key
*key
)
822 u16 orig_network_offset
= 0;
824 if (eth_p_mpls(skb
->protocol
)) {
825 orig_network_offset
= skb_network_offset(skb
);
826 skb
->network_header
= skb
->inner_network_header
;
829 if (skb_network_offset(skb
) > MAX_L2_LEN
) {
830 OVS_NLERR(1, "L2 header too long to fragment");
834 if (key
->eth
.type
== htons(ETH_P_IP
)) {
835 struct dst_entry ovs_dst
;
836 unsigned long orig_dst
;
838 prepare_frag(vport
, skb
, orig_network_offset
,
839 ovs_key_mac_proto(key
));
840 dst_init(&ovs_dst
, &ovs_dst_ops
, NULL
, 1,
841 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
842 ovs_dst
.dev
= vport
->dev
;
844 orig_dst
= skb
->_skb_refdst
;
845 skb_dst_set_noref(skb
, &ovs_dst
);
846 IPCB(skb
)->frag_max_size
= mru
;
848 ip_do_fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
849 refdst_drop(orig_dst
);
850 } else if (key
->eth
.type
== htons(ETH_P_IPV6
)) {
851 const struct nf_ipv6_ops
*v6ops
= nf_get_ipv6_ops();
852 unsigned long orig_dst
;
853 struct rt6_info ovs_rt
;
858 prepare_frag(vport
, skb
, orig_network_offset
,
859 ovs_key_mac_proto(key
));
860 memset(&ovs_rt
, 0, sizeof(ovs_rt
));
861 dst_init(&ovs_rt
.dst
, &ovs_dst_ops
, NULL
, 1,
862 DST_OBSOLETE_NONE
, DST_NOCOUNT
);
863 ovs_rt
.dst
.dev
= vport
->dev
;
865 orig_dst
= skb
->_skb_refdst
;
866 skb_dst_set_noref(skb
, &ovs_rt
.dst
);
867 IP6CB(skb
)->frag_max_size
= mru
;
869 v6ops
->fragment(net
, skb
->sk
, skb
, ovs_vport_output
);
870 refdst_drop(orig_dst
);
872 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
873 ovs_vport_name(vport
), ntohs(key
->eth
.type
), mru
,
883 static void do_output(struct datapath
*dp
, struct sk_buff
*skb
, int out_port
,
884 struct sw_flow_key
*key
)
886 struct vport
*vport
= ovs_vport_rcu(dp
, out_port
);
889 u16 mru
= OVS_CB(skb
)->mru
;
890 u32 cutlen
= OVS_CB(skb
)->cutlen
;
892 if (unlikely(cutlen
> 0)) {
893 if (skb
->len
- cutlen
> ovs_mac_header_len(key
))
894 pskb_trim(skb
, skb
->len
- cutlen
);
896 pskb_trim(skb
, ovs_mac_header_len(key
));
900 (skb
->len
<= mru
+ vport
->dev
->hard_header_len
))) {
901 ovs_vport_send(vport
, skb
, ovs_key_mac_proto(key
));
902 } else if (mru
<= vport
->dev
->mtu
) {
903 struct net
*net
= read_pnet(&dp
->net
);
905 ovs_fragment(net
, vport
, skb
, mru
, key
);
914 static int output_userspace(struct datapath
*dp
, struct sk_buff
*skb
,
915 struct sw_flow_key
*key
, const struct nlattr
*attr
,
916 const struct nlattr
*actions
, int actions_len
,
919 struct dp_upcall_info upcall
;
920 const struct nlattr
*a
;
923 memset(&upcall
, 0, sizeof(upcall
));
924 upcall
.cmd
= OVS_PACKET_CMD_ACTION
;
925 upcall
.mru
= OVS_CB(skb
)->mru
;
927 for (a
= nla_data(attr
), rem
= nla_len(attr
); rem
> 0;
928 a
= nla_next(a
, &rem
)) {
929 switch (nla_type(a
)) {
930 case OVS_USERSPACE_ATTR_USERDATA
:
934 case OVS_USERSPACE_ATTR_PID
:
935 upcall
.portid
= nla_get_u32(a
);
938 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
: {
939 /* Get out tunnel info. */
942 vport
= ovs_vport_rcu(dp
, nla_get_u32(a
));
946 err
= dev_fill_metadata_dst(vport
->dev
, skb
);
948 upcall
.egress_tun_info
= skb_tunnel_info(skb
);
954 case OVS_USERSPACE_ATTR_ACTIONS
: {
955 /* Include actions. */
956 upcall
.actions
= actions
;
957 upcall
.actions_len
= actions_len
;
961 } /* End of switch. */
964 return ovs_dp_upcall(dp
, skb
, key
, &upcall
, cutlen
);
967 static int dec_ttl_exception_handler(struct datapath
*dp
, struct sk_buff
*skb
,
968 struct sw_flow_key
*key
,
969 const struct nlattr
*attr
, bool last
)
971 /* The first action is always 'OVS_DEC_TTL_ATTR_ARG'. */
972 struct nlattr
*dec_ttl_arg
= nla_data(attr
);
973 int rem
= nla_len(attr
);
975 if (nla_len(dec_ttl_arg
)) {
976 struct nlattr
*actions
= nla_next(dec_ttl_arg
, &rem
);
979 return clone_execute(dp
, skb
, key
, 0, actions
, rem
,
986 /* When 'last' is true, sample() should always consume the 'skb'.
987 * Otherwise, sample() should keep 'skb' intact regardless what
988 * actions are executed within sample().
990 static int sample(struct datapath
*dp
, struct sk_buff
*skb
,
991 struct sw_flow_key
*key
, const struct nlattr
*attr
,
994 struct nlattr
*actions
;
995 struct nlattr
*sample_arg
;
996 int rem
= nla_len(attr
);
997 const struct sample_arg
*arg
;
1000 /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */
1001 sample_arg
= nla_data(attr
);
1002 arg
= nla_data(sample_arg
);
1003 actions
= nla_next(sample_arg
, &rem
);
1005 if ((arg
->probability
!= U32_MAX
) &&
1006 (!arg
->probability
|| prandom_u32() > arg
->probability
)) {
1012 clone_flow_key
= !arg
->exec
;
1013 return clone_execute(dp
, skb
, key
, 0, actions
, rem
, last
,
1017 /* When 'last' is true, clone() should always consume the 'skb'.
1018 * Otherwise, clone() should keep 'skb' intact regardless what
1019 * actions are executed within clone().
1021 static int clone(struct datapath
*dp
, struct sk_buff
*skb
,
1022 struct sw_flow_key
*key
, const struct nlattr
*attr
,
1025 struct nlattr
*actions
;
1026 struct nlattr
*clone_arg
;
1027 int rem
= nla_len(attr
);
1028 bool dont_clone_flow_key
;
1030 /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
1031 clone_arg
= nla_data(attr
);
1032 dont_clone_flow_key
= nla_get_u32(clone_arg
);
1033 actions
= nla_next(clone_arg
, &rem
);
1035 return clone_execute(dp
, skb
, key
, 0, actions
, rem
, last
,
1036 !dont_clone_flow_key
);
1039 static void execute_hash(struct sk_buff
*skb
, struct sw_flow_key
*key
,
1040 const struct nlattr
*attr
)
1042 struct ovs_action_hash
*hash_act
= nla_data(attr
);
1045 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
1046 hash
= skb_get_hash(skb
);
1047 hash
= jhash_1word(hash
, hash_act
->hash_basis
);
1051 key
->ovs_flow_hash
= hash
;
1054 static int execute_set_action(struct sk_buff
*skb
,
1055 struct sw_flow_key
*flow_key
,
1056 const struct nlattr
*a
)
1058 /* Only tunnel set execution is supported without a mask. */
1059 if (nla_type(a
) == OVS_KEY_ATTR_TUNNEL_INFO
) {
1060 struct ovs_tunnel_info
*tun
= nla_data(a
);
1063 dst_hold((struct dst_entry
*)tun
->tun_dst
);
1064 skb_dst_set(skb
, (struct dst_entry
*)tun
->tun_dst
);
1071 /* Mask is at the midpoint of the data. */
1072 #define get_mask(a, type) ((const type)nla_data(a) + 1)
1074 static int execute_masked_set_action(struct sk_buff
*skb
,
1075 struct sw_flow_key
*flow_key
,
1076 const struct nlattr
*a
)
1080 switch (nla_type(a
)) {
1081 case OVS_KEY_ATTR_PRIORITY
:
1082 OVS_SET_MASKED(skb
->priority
, nla_get_u32(a
),
1083 *get_mask(a
, u32
*));
1084 flow_key
->phy
.priority
= skb
->priority
;
1087 case OVS_KEY_ATTR_SKB_MARK
:
1088 OVS_SET_MASKED(skb
->mark
, nla_get_u32(a
), *get_mask(a
, u32
*));
1089 flow_key
->phy
.skb_mark
= skb
->mark
;
1092 case OVS_KEY_ATTR_TUNNEL_INFO
:
1093 /* Masked data not supported for tunnel. */
1097 case OVS_KEY_ATTR_ETHERNET
:
1098 err
= set_eth_addr(skb
, flow_key
, nla_data(a
),
1099 get_mask(a
, struct ovs_key_ethernet
*));
1102 case OVS_KEY_ATTR_NSH
:
1103 err
= set_nsh(skb
, flow_key
, a
);
1106 case OVS_KEY_ATTR_IPV4
:
1107 err
= set_ipv4(skb
, flow_key
, nla_data(a
),
1108 get_mask(a
, struct ovs_key_ipv4
*));
1111 case OVS_KEY_ATTR_IPV6
:
1112 err
= set_ipv6(skb
, flow_key
, nla_data(a
),
1113 get_mask(a
, struct ovs_key_ipv6
*));
1116 case OVS_KEY_ATTR_TCP
:
1117 err
= set_tcp(skb
, flow_key
, nla_data(a
),
1118 get_mask(a
, struct ovs_key_tcp
*));
1121 case OVS_KEY_ATTR_UDP
:
1122 err
= set_udp(skb
, flow_key
, nla_data(a
),
1123 get_mask(a
, struct ovs_key_udp
*));
1126 case OVS_KEY_ATTR_SCTP
:
1127 err
= set_sctp(skb
, flow_key
, nla_data(a
),
1128 get_mask(a
, struct ovs_key_sctp
*));
1131 case OVS_KEY_ATTR_MPLS
:
1132 err
= set_mpls(skb
, flow_key
, nla_data(a
), get_mask(a
,
1136 case OVS_KEY_ATTR_CT_STATE
:
1137 case OVS_KEY_ATTR_CT_ZONE
:
1138 case OVS_KEY_ATTR_CT_MARK
:
1139 case OVS_KEY_ATTR_CT_LABELS
:
1140 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
:
1141 case OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
:
1149 static int execute_recirc(struct datapath
*dp
, struct sk_buff
*skb
,
1150 struct sw_flow_key
*key
,
1151 const struct nlattr
*a
, bool last
)
1155 if (!is_flow_key_valid(key
)) {
1158 err
= ovs_flow_key_update(skb
, key
);
1162 BUG_ON(!is_flow_key_valid(key
));
1164 recirc_id
= nla_get_u32(a
);
1165 return clone_execute(dp
, skb
, key
, recirc_id
, NULL
, 0, last
, true);
1168 static int execute_check_pkt_len(struct datapath
*dp
, struct sk_buff
*skb
,
1169 struct sw_flow_key
*key
,
1170 const struct nlattr
*attr
, bool last
)
1172 const struct nlattr
*actions
, *cpl_arg
;
1173 const struct check_pkt_len_arg
*arg
;
1174 int rem
= nla_len(attr
);
1175 bool clone_flow_key
;
1177 /* The first netlink attribute in 'attr' is always
1178 * 'OVS_CHECK_PKT_LEN_ATTR_ARG'.
1180 cpl_arg
= nla_data(attr
);
1181 arg
= nla_data(cpl_arg
);
1183 if (skb
->len
<= arg
->pkt_len
) {
1184 /* Second netlink attribute in 'attr' is always
1185 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
1187 actions
= nla_next(cpl_arg
, &rem
);
1188 clone_flow_key
= !arg
->exec_for_lesser_equal
;
1190 /* Third netlink attribute in 'attr' is always
1191 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER'.
1193 actions
= nla_next(cpl_arg
, &rem
);
1194 actions
= nla_next(actions
, &rem
);
1195 clone_flow_key
= !arg
->exec_for_greater
;
1198 return clone_execute(dp
, skb
, key
, 0, nla_data(actions
),
1199 nla_len(actions
), last
, clone_flow_key
);
1202 static int execute_dec_ttl(struct sk_buff
*skb
, struct sw_flow_key
*key
)
1206 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1209 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
1216 if (nh
->hop_limit
<= 1)
1217 return -EHOSTUNREACH
;
1219 key
->ip
.ttl
= --nh
->hop_limit
;
1224 err
= skb_ensure_writable(skb
, skb_network_offset(skb
) +
1231 return -EHOSTUNREACH
;
1233 old_ttl
= nh
->ttl
--;
1234 csum_replace2(&nh
->check
, htons(old_ttl
<< 8),
1235 htons(nh
->ttl
<< 8));
1236 key
->ip
.ttl
= nh
->ttl
;
1241 /* Execute a list of actions against 'skb'. */
1242 static int do_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1243 struct sw_flow_key
*key
,
1244 const struct nlattr
*attr
, int len
)
1246 const struct nlattr
*a
;
1249 for (a
= attr
, rem
= len
; rem
> 0;
1250 a
= nla_next(a
, &rem
)) {
1253 switch (nla_type(a
)) {
1254 case OVS_ACTION_ATTR_OUTPUT
: {
1255 int port
= nla_get_u32(a
);
1256 struct sk_buff
*clone
;
1258 /* Every output action needs a separate clone
1259 * of 'skb', In case the output action is the
1260 * last action, cloning can be avoided.
1262 if (nla_is_last(a
, rem
)) {
1263 do_output(dp
, skb
, port
, key
);
1264 /* 'skb' has been used for output.
1269 clone
= skb_clone(skb
, GFP_ATOMIC
);
1271 do_output(dp
, clone
, port
, key
);
1272 OVS_CB(skb
)->cutlen
= 0;
1276 case OVS_ACTION_ATTR_TRUNC
: {
1277 struct ovs_action_trunc
*trunc
= nla_data(a
);
1279 if (skb
->len
> trunc
->max_len
)
1280 OVS_CB(skb
)->cutlen
= skb
->len
- trunc
->max_len
;
1284 case OVS_ACTION_ATTR_USERSPACE
:
1285 output_userspace(dp
, skb
, key
, a
, attr
,
1286 len
, OVS_CB(skb
)->cutlen
);
1287 OVS_CB(skb
)->cutlen
= 0;
1290 case OVS_ACTION_ATTR_HASH
:
1291 execute_hash(skb
, key
, a
);
1294 case OVS_ACTION_ATTR_PUSH_MPLS
: {
1295 struct ovs_action_push_mpls
*mpls
= nla_data(a
);
1297 err
= push_mpls(skb
, key
, mpls
->mpls_lse
,
1298 mpls
->mpls_ethertype
, skb
->mac_len
);
1301 case OVS_ACTION_ATTR_ADD_MPLS
: {
1302 struct ovs_action_add_mpls
*mpls
= nla_data(a
);
1305 if (mpls
->tun_flags
& OVS_MPLS_L3_TUNNEL_FLAG_MASK
)
1306 mac_len
= skb
->mac_len
;
1308 err
= push_mpls(skb
, key
, mpls
->mpls_lse
,
1309 mpls
->mpls_ethertype
, mac_len
);
1312 case OVS_ACTION_ATTR_POP_MPLS
:
1313 err
= pop_mpls(skb
, key
, nla_get_be16(a
));
1316 case OVS_ACTION_ATTR_PUSH_VLAN
:
1317 err
= push_vlan(skb
, key
, nla_data(a
));
1320 case OVS_ACTION_ATTR_POP_VLAN
:
1321 err
= pop_vlan(skb
, key
);
1324 case OVS_ACTION_ATTR_RECIRC
: {
1325 bool last
= nla_is_last(a
, rem
);
1327 err
= execute_recirc(dp
, skb
, key
, a
, last
);
1329 /* If this is the last action, the skb has
1330 * been consumed or freed.
1331 * Return immediately.
1338 case OVS_ACTION_ATTR_SET
:
1339 err
= execute_set_action(skb
, key
, nla_data(a
));
1342 case OVS_ACTION_ATTR_SET_MASKED
:
1343 case OVS_ACTION_ATTR_SET_TO_MASKED
:
1344 err
= execute_masked_set_action(skb
, key
, nla_data(a
));
1347 case OVS_ACTION_ATTR_SAMPLE
: {
1348 bool last
= nla_is_last(a
, rem
);
1350 err
= sample(dp
, skb
, key
, a
, last
);
1357 case OVS_ACTION_ATTR_CT
:
1358 if (!is_flow_key_valid(key
)) {
1359 err
= ovs_flow_key_update(skb
, key
);
1364 err
= ovs_ct_execute(ovs_dp_get_net(dp
), skb
, key
,
1367 /* Hide stolen IP fragments from user space. */
1369 return err
== -EINPROGRESS
? 0 : err
;
1372 case OVS_ACTION_ATTR_CT_CLEAR
:
1373 err
= ovs_ct_clear(skb
, key
);
1376 case OVS_ACTION_ATTR_PUSH_ETH
:
1377 err
= push_eth(skb
, key
, nla_data(a
));
1380 case OVS_ACTION_ATTR_POP_ETH
:
1381 err
= pop_eth(skb
, key
);
1384 case OVS_ACTION_ATTR_PUSH_NSH
: {
1385 u8 buffer
[NSH_HDR_MAX_LEN
];
1386 struct nshhdr
*nh
= (struct nshhdr
*)buffer
;
1388 err
= nsh_hdr_from_nlattr(nla_data(a
), nh
,
1392 err
= push_nsh(skb
, key
, nh
);
1396 case OVS_ACTION_ATTR_POP_NSH
:
1397 err
= pop_nsh(skb
, key
);
1400 case OVS_ACTION_ATTR_METER
:
1401 if (ovs_meter_execute(dp
, skb
, key
, nla_get_u32(a
))) {
1407 case OVS_ACTION_ATTR_CLONE
: {
1408 bool last
= nla_is_last(a
, rem
);
1410 err
= clone(dp
, skb
, key
, a
, last
);
1417 case OVS_ACTION_ATTR_CHECK_PKT_LEN
: {
1418 bool last
= nla_is_last(a
, rem
);
1420 err
= execute_check_pkt_len(dp
, skb
, key
, a
, last
);
1427 case OVS_ACTION_ATTR_DEC_TTL
:
1428 err
= execute_dec_ttl(skb
, key
);
1429 if (err
== -EHOSTUNREACH
) {
1430 err
= dec_ttl_exception_handler(dp
, skb
, key
,
1437 if (unlikely(err
)) {
1447 /* Execute the actions on the clone of the packet. The effect of the
1448 * execution does not affect the original 'skb' nor the original 'key'.
1450 * The execution may be deferred in case the actions can not be executed
1453 static int clone_execute(struct datapath
*dp
, struct sk_buff
*skb
,
1454 struct sw_flow_key
*key
, u32 recirc_id
,
1455 const struct nlattr
*actions
, int len
,
1456 bool last
, bool clone_flow_key
)
1458 struct deferred_action
*da
;
1459 struct sw_flow_key
*clone
;
1461 skb
= last
? skb
: skb_clone(skb
, GFP_ATOMIC
);
1463 /* Out of memory, skip this action.
1468 /* When clone_flow_key is false, the 'key' will not be change
1469 * by the actions, then the 'key' can be used directly.
1470 * Otherwise, try to clone key from the next recursion level of
1471 * 'flow_keys'. If clone is successful, execute the actions
1472 * without deferring.
1474 clone
= clone_flow_key
? clone_key(key
) : key
;
1478 if (actions
) { /* Sample action */
1480 __this_cpu_inc(exec_actions_level
);
1482 err
= do_execute_actions(dp
, skb
, clone
,
1486 __this_cpu_dec(exec_actions_level
);
1487 } else { /* Recirc action */
1488 clone
->recirc_id
= recirc_id
;
1489 ovs_dp_process_packet(skb
, clone
);
1494 /* Out of 'flow_keys' space. Defer actions */
1495 da
= add_deferred_actions(skb
, key
, actions
, len
);
1497 if (!actions
) { /* Recirc action */
1499 key
->recirc_id
= recirc_id
;
1502 /* Out of per CPU action FIFO space. Drop the 'skb' and
1507 if (net_ratelimit()) {
1508 if (actions
) { /* Sample action */
1509 pr_warn("%s: deferred action limit reached, drop sample action\n",
1511 } else { /* Recirc action */
1512 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1520 static void process_deferred_actions(struct datapath
*dp
)
1522 struct action_fifo
*fifo
= this_cpu_ptr(action_fifos
);
1524 /* Do not touch the FIFO in case there is no deferred actions. */
1525 if (action_fifo_is_empty(fifo
))
1528 /* Finishing executing all deferred actions. */
1530 struct deferred_action
*da
= action_fifo_get(fifo
);
1531 struct sk_buff
*skb
= da
->skb
;
1532 struct sw_flow_key
*key
= &da
->pkt_key
;
1533 const struct nlattr
*actions
= da
->actions
;
1534 int actions_len
= da
->actions_len
;
1537 do_execute_actions(dp
, skb
, key
, actions
, actions_len
);
1539 ovs_dp_process_packet(skb
, key
);
1540 } while (!action_fifo_is_empty(fifo
));
1542 /* Reset FIFO for the next packet. */
1543 action_fifo_init(fifo
);
1546 /* Execute a list of actions against 'skb'. */
1547 int ovs_execute_actions(struct datapath
*dp
, struct sk_buff
*skb
,
1548 const struct sw_flow_actions
*acts
,
1549 struct sw_flow_key
*key
)
1553 level
= __this_cpu_inc_return(exec_actions_level
);
1554 if (unlikely(level
> OVS_RECURSION_LIMIT
)) {
1555 net_crit_ratelimited("ovs: recursion limit reached on datapath %s, probable configuration error\n",
1562 OVS_CB(skb
)->acts_origlen
= acts
->orig_len
;
1563 err
= do_execute_actions(dp
, skb
, key
,
1564 acts
->actions
, acts
->actions_len
);
1567 process_deferred_actions(dp
);
1570 __this_cpu_dec(exec_actions_level
);
1574 int action_fifos_init(void)
1576 action_fifos
= alloc_percpu(struct action_fifo
);
1580 flow_keys
= alloc_percpu(struct action_flow_keys
);
1582 free_percpu(action_fifos
);
1589 void action_fifos_exit(void)
1591 free_percpu(action_fifos
);
1592 free_percpu(flow_keys
);