2 * Copyright (c) 2007-2017 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/uaccess.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <net/llc_pdu.h>
29 #include <linux/kernel.h>
30 #include <linux/jhash.h>
31 #include <linux/jiffies.h>
32 #include <linux/llc.h>
33 #include <linux/module.h>
35 #include <linux/rcupdate.h>
36 #include <linux/if_arp.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
45 #include <net/geneve.h>
48 #include <net/ndisc.h>
50 #include <net/vxlan.h>
51 #include <net/tun_proto.h>
53 #include "flow_netlink.h"
57 const struct ovs_len_tbl
*next
;
60 #define OVS_ATTR_NESTED -1
61 #define OVS_ATTR_VARIABLE -2
63 static bool actions_may_change_flow(const struct nlattr
*actions
)
68 nla_for_each_nested(nla
, actions
, rem
) {
69 u16 action
= nla_type(nla
);
72 case OVS_ACTION_ATTR_OUTPUT
:
73 case OVS_ACTION_ATTR_RECIRC
:
74 case OVS_ACTION_ATTR_TRUNC
:
75 case OVS_ACTION_ATTR_USERSPACE
:
78 case OVS_ACTION_ATTR_CT
:
79 case OVS_ACTION_ATTR_CT_CLEAR
:
80 case OVS_ACTION_ATTR_HASH
:
81 case OVS_ACTION_ATTR_POP_ETH
:
82 case OVS_ACTION_ATTR_POP_MPLS
:
83 case OVS_ACTION_ATTR_POP_NSH
:
84 case OVS_ACTION_ATTR_POP_VLAN
:
85 case OVS_ACTION_ATTR_PUSH_ETH
:
86 case OVS_ACTION_ATTR_PUSH_MPLS
:
87 case OVS_ACTION_ATTR_PUSH_NSH
:
88 case OVS_ACTION_ATTR_PUSH_VLAN
:
89 case OVS_ACTION_ATTR_SAMPLE
:
90 case OVS_ACTION_ATTR_SET
:
91 case OVS_ACTION_ATTR_SET_MASKED
:
92 case OVS_ACTION_ATTR_METER
:
100 static void update_range(struct sw_flow_match
*match
,
101 size_t offset
, size_t size
, bool is_mask
)
103 struct sw_flow_key_range
*range
;
104 size_t start
= rounddown(offset
, sizeof(long));
105 size_t end
= roundup(offset
+ size
, sizeof(long));
108 range
= &match
->range
;
110 range
= &match
->mask
->range
;
112 if (range
->start
== range
->end
) {
113 range
->start
= start
;
118 if (range
->start
> start
)
119 range
->start
= start
;
121 if (range
->end
< end
)
125 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
127 update_range(match, offsetof(struct sw_flow_key, field), \
128 sizeof((match)->key->field), is_mask); \
130 (match)->mask->key.field = value; \
132 (match)->key->field = value; \
135 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
137 update_range(match, offset, len, is_mask); \
139 memcpy((u8 *)&(match)->mask->key + offset, value_p, \
142 memcpy((u8 *)(match)->key + offset, value_p, len); \
145 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
146 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
147 value_p, len, is_mask)
149 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
151 update_range(match, offsetof(struct sw_flow_key, field), \
152 sizeof((match)->key->field), is_mask); \
154 memset((u8 *)&(match)->mask->key.field, value, \
155 sizeof((match)->mask->key.field)); \
157 memset((u8 *)&(match)->key->field, value, \
158 sizeof((match)->key->field)); \
161 static bool match_validate(const struct sw_flow_match
*match
,
162 u64 key_attrs
, u64 mask_attrs
, bool log
)
164 u64 key_expected
= 0;
165 u64 mask_allowed
= key_attrs
; /* At most allow all key attributes */
167 /* The following mask attributes allowed only if they
168 * pass the validation tests. */
169 mask_allowed
&= ~((1 << OVS_KEY_ATTR_IPV4
)
170 | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
)
171 | (1 << OVS_KEY_ATTR_IPV6
)
172 | (1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
)
173 | (1 << OVS_KEY_ATTR_TCP
)
174 | (1 << OVS_KEY_ATTR_TCP_FLAGS
)
175 | (1 << OVS_KEY_ATTR_UDP
)
176 | (1 << OVS_KEY_ATTR_SCTP
)
177 | (1 << OVS_KEY_ATTR_ICMP
)
178 | (1 << OVS_KEY_ATTR_ICMPV6
)
179 | (1 << OVS_KEY_ATTR_ARP
)
180 | (1 << OVS_KEY_ATTR_ND
)
181 | (1 << OVS_KEY_ATTR_MPLS
)
182 | (1 << OVS_KEY_ATTR_NSH
));
184 /* Always allowed mask fields. */
185 mask_allowed
|= ((1 << OVS_KEY_ATTR_TUNNEL
)
186 | (1 << OVS_KEY_ATTR_IN_PORT
)
187 | (1 << OVS_KEY_ATTR_ETHERTYPE
));
189 /* Check key attributes. */
190 if (match
->key
->eth
.type
== htons(ETH_P_ARP
)
191 || match
->key
->eth
.type
== htons(ETH_P_RARP
)) {
192 key_expected
|= 1 << OVS_KEY_ATTR_ARP
;
193 if (match
->mask
&& (match
->mask
->key
.eth
.type
== htons(0xffff)))
194 mask_allowed
|= 1 << OVS_KEY_ATTR_ARP
;
197 if (eth_p_mpls(match
->key
->eth
.type
)) {
198 key_expected
|= 1 << OVS_KEY_ATTR_MPLS
;
199 if (match
->mask
&& (match
->mask
->key
.eth
.type
== htons(0xffff)))
200 mask_allowed
|= 1 << OVS_KEY_ATTR_MPLS
;
203 if (match
->key
->eth
.type
== htons(ETH_P_IP
)) {
204 key_expected
|= 1 << OVS_KEY_ATTR_IPV4
;
205 if (match
->mask
&& match
->mask
->key
.eth
.type
== htons(0xffff)) {
206 mask_allowed
|= 1 << OVS_KEY_ATTR_IPV4
;
207 mask_allowed
|= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
;
210 if (match
->key
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
211 if (match
->key
->ip
.proto
== IPPROTO_UDP
) {
212 key_expected
|= 1 << OVS_KEY_ATTR_UDP
;
213 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
214 mask_allowed
|= 1 << OVS_KEY_ATTR_UDP
;
217 if (match
->key
->ip
.proto
== IPPROTO_SCTP
) {
218 key_expected
|= 1 << OVS_KEY_ATTR_SCTP
;
219 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
220 mask_allowed
|= 1 << OVS_KEY_ATTR_SCTP
;
223 if (match
->key
->ip
.proto
== IPPROTO_TCP
) {
224 key_expected
|= 1 << OVS_KEY_ATTR_TCP
;
225 key_expected
|= 1 << OVS_KEY_ATTR_TCP_FLAGS
;
226 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff)) {
227 mask_allowed
|= 1 << OVS_KEY_ATTR_TCP
;
228 mask_allowed
|= 1 << OVS_KEY_ATTR_TCP_FLAGS
;
232 if (match
->key
->ip
.proto
== IPPROTO_ICMP
) {
233 key_expected
|= 1 << OVS_KEY_ATTR_ICMP
;
234 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
235 mask_allowed
|= 1 << OVS_KEY_ATTR_ICMP
;
240 if (match
->key
->eth
.type
== htons(ETH_P_IPV6
)) {
241 key_expected
|= 1 << OVS_KEY_ATTR_IPV6
;
242 if (match
->mask
&& match
->mask
->key
.eth
.type
== htons(0xffff)) {
243 mask_allowed
|= 1 << OVS_KEY_ATTR_IPV6
;
244 mask_allowed
|= 1 << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
;
247 if (match
->key
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
248 if (match
->key
->ip
.proto
== IPPROTO_UDP
) {
249 key_expected
|= 1 << OVS_KEY_ATTR_UDP
;
250 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
251 mask_allowed
|= 1 << OVS_KEY_ATTR_UDP
;
254 if (match
->key
->ip
.proto
== IPPROTO_SCTP
) {
255 key_expected
|= 1 << OVS_KEY_ATTR_SCTP
;
256 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
257 mask_allowed
|= 1 << OVS_KEY_ATTR_SCTP
;
260 if (match
->key
->ip
.proto
== IPPROTO_TCP
) {
261 key_expected
|= 1 << OVS_KEY_ATTR_TCP
;
262 key_expected
|= 1 << OVS_KEY_ATTR_TCP_FLAGS
;
263 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff)) {
264 mask_allowed
|= 1 << OVS_KEY_ATTR_TCP
;
265 mask_allowed
|= 1 << OVS_KEY_ATTR_TCP_FLAGS
;
269 if (match
->key
->ip
.proto
== IPPROTO_ICMPV6
) {
270 key_expected
|= 1 << OVS_KEY_ATTR_ICMPV6
;
271 if (match
->mask
&& (match
->mask
->key
.ip
.proto
== 0xff))
272 mask_allowed
|= 1 << OVS_KEY_ATTR_ICMPV6
;
274 if (match
->key
->tp
.src
==
275 htons(NDISC_NEIGHBOUR_SOLICITATION
) ||
276 match
->key
->tp
.src
== htons(NDISC_NEIGHBOUR_ADVERTISEMENT
)) {
277 key_expected
|= 1 << OVS_KEY_ATTR_ND
;
278 /* Original direction conntrack tuple
279 * uses the same space as the ND fields
280 * in the key, so both are not allowed
283 mask_allowed
&= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
);
284 if (match
->mask
&& (match
->mask
->key
.tp
.src
== htons(0xff)))
285 mask_allowed
|= 1 << OVS_KEY_ATTR_ND
;
291 if (match
->key
->eth
.type
== htons(ETH_P_NSH
)) {
292 key_expected
|= 1 << OVS_KEY_ATTR_NSH
;
294 match
->mask
->key
.eth
.type
== htons(0xffff)) {
295 mask_allowed
|= 1 << OVS_KEY_ATTR_NSH
;
299 if ((key_attrs
& key_expected
) != key_expected
) {
300 /* Key attributes check failed. */
301 OVS_NLERR(log
, "Missing key (keys=%llx, expected=%llx)",
302 (unsigned long long)key_attrs
,
303 (unsigned long long)key_expected
);
307 if ((mask_attrs
& mask_allowed
) != mask_attrs
) {
308 /* Mask attributes check failed. */
309 OVS_NLERR(log
, "Unexpected mask (mask=%llx, allowed=%llx)",
310 (unsigned long long)mask_attrs
,
311 (unsigned long long)mask_allowed
);
318 size_t ovs_tun_key_attr_size(void)
320 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
321 * updating this function.
323 return nla_total_size_64bit(8) /* OVS_TUNNEL_KEY_ATTR_ID */
324 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
325 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
326 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
327 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
328 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
329 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
330 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
331 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
332 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
333 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
335 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
336 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
339 static size_t ovs_nsh_key_attr_size(void)
341 /* Whenever adding new OVS_NSH_KEY_ FIELDS, we should consider
342 * updating this function.
344 return nla_total_size(NSH_BASE_HDR_LEN
) /* OVS_NSH_KEY_ATTR_BASE */
345 /* OVS_NSH_KEY_ATTR_MD1 and OVS_NSH_KEY_ATTR_MD2 are
346 * mutually exclusive, so the bigger one can cover
349 + nla_total_size(NSH_CTX_HDRS_MAX_LEN
);
352 size_t ovs_key_attr_size(void)
354 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
355 * updating this function.
357 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO
!= 29);
359 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
360 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
361 + ovs_tun_key_attr_size()
362 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
363 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
364 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
365 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
366 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
367 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
368 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
369 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
370 + nla_total_size(40) /* OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6 */
371 + nla_total_size(0) /* OVS_KEY_ATTR_NSH */
372 + ovs_nsh_key_attr_size()
373 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
374 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
375 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
376 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
377 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
378 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
379 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
380 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
383 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens
[OVS_VXLAN_EXT_MAX
+ 1] = {
384 [OVS_VXLAN_EXT_GBP
] = { .len
= sizeof(u32
) },
387 static const struct ovs_len_tbl ovs_tunnel_key_lens
[OVS_TUNNEL_KEY_ATTR_MAX
+ 1] = {
388 [OVS_TUNNEL_KEY_ATTR_ID
] = { .len
= sizeof(u64
) },
389 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC
] = { .len
= sizeof(u32
) },
390 [OVS_TUNNEL_KEY_ATTR_IPV4_DST
] = { .len
= sizeof(u32
) },
391 [OVS_TUNNEL_KEY_ATTR_TOS
] = { .len
= 1 },
392 [OVS_TUNNEL_KEY_ATTR_TTL
] = { .len
= 1 },
393 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT
] = { .len
= 0 },
394 [OVS_TUNNEL_KEY_ATTR_CSUM
] = { .len
= 0 },
395 [OVS_TUNNEL_KEY_ATTR_TP_SRC
] = { .len
= sizeof(u16
) },
396 [OVS_TUNNEL_KEY_ATTR_TP_DST
] = { .len
= sizeof(u16
) },
397 [OVS_TUNNEL_KEY_ATTR_OAM
] = { .len
= 0 },
398 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS
] = { .len
= OVS_ATTR_VARIABLE
},
399 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS
] = { .len
= OVS_ATTR_NESTED
,
400 .next
= ovs_vxlan_ext_key_lens
},
401 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC
] = { .len
= sizeof(struct in6_addr
) },
402 [OVS_TUNNEL_KEY_ATTR_IPV6_DST
] = { .len
= sizeof(struct in6_addr
) },
405 static const struct ovs_len_tbl
406 ovs_nsh_key_attr_lens
[OVS_NSH_KEY_ATTR_MAX
+ 1] = {
407 [OVS_NSH_KEY_ATTR_BASE
] = { .len
= sizeof(struct ovs_nsh_key_base
) },
408 [OVS_NSH_KEY_ATTR_MD1
] = { .len
= sizeof(struct ovs_nsh_key_md1
) },
409 [OVS_NSH_KEY_ATTR_MD2
] = { .len
= OVS_ATTR_VARIABLE
},
412 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
413 static const struct ovs_len_tbl ovs_key_lens
[OVS_KEY_ATTR_MAX
+ 1] = {
414 [OVS_KEY_ATTR_ENCAP
] = { .len
= OVS_ATTR_NESTED
},
415 [OVS_KEY_ATTR_PRIORITY
] = { .len
= sizeof(u32
) },
416 [OVS_KEY_ATTR_IN_PORT
] = { .len
= sizeof(u32
) },
417 [OVS_KEY_ATTR_SKB_MARK
] = { .len
= sizeof(u32
) },
418 [OVS_KEY_ATTR_ETHERNET
] = { .len
= sizeof(struct ovs_key_ethernet
) },
419 [OVS_KEY_ATTR_VLAN
] = { .len
= sizeof(__be16
) },
420 [OVS_KEY_ATTR_ETHERTYPE
] = { .len
= sizeof(__be16
) },
421 [OVS_KEY_ATTR_IPV4
] = { .len
= sizeof(struct ovs_key_ipv4
) },
422 [OVS_KEY_ATTR_IPV6
] = { .len
= sizeof(struct ovs_key_ipv6
) },
423 [OVS_KEY_ATTR_TCP
] = { .len
= sizeof(struct ovs_key_tcp
) },
424 [OVS_KEY_ATTR_TCP_FLAGS
] = { .len
= sizeof(__be16
) },
425 [OVS_KEY_ATTR_UDP
] = { .len
= sizeof(struct ovs_key_udp
) },
426 [OVS_KEY_ATTR_SCTP
] = { .len
= sizeof(struct ovs_key_sctp
) },
427 [OVS_KEY_ATTR_ICMP
] = { .len
= sizeof(struct ovs_key_icmp
) },
428 [OVS_KEY_ATTR_ICMPV6
] = { .len
= sizeof(struct ovs_key_icmpv6
) },
429 [OVS_KEY_ATTR_ARP
] = { .len
= sizeof(struct ovs_key_arp
) },
430 [OVS_KEY_ATTR_ND
] = { .len
= sizeof(struct ovs_key_nd
) },
431 [OVS_KEY_ATTR_RECIRC_ID
] = { .len
= sizeof(u32
) },
432 [OVS_KEY_ATTR_DP_HASH
] = { .len
= sizeof(u32
) },
433 [OVS_KEY_ATTR_TUNNEL
] = { .len
= OVS_ATTR_NESTED
,
434 .next
= ovs_tunnel_key_lens
, },
435 [OVS_KEY_ATTR_MPLS
] = { .len
= sizeof(struct ovs_key_mpls
) },
436 [OVS_KEY_ATTR_CT_STATE
] = { .len
= sizeof(u32
) },
437 [OVS_KEY_ATTR_CT_ZONE
] = { .len
= sizeof(u16
) },
438 [OVS_KEY_ATTR_CT_MARK
] = { .len
= sizeof(u32
) },
439 [OVS_KEY_ATTR_CT_LABELS
] = { .len
= sizeof(struct ovs_key_ct_labels
) },
440 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
] = {
441 .len
= sizeof(struct ovs_key_ct_tuple_ipv4
) },
442 [OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
] = {
443 .len
= sizeof(struct ovs_key_ct_tuple_ipv6
) },
444 [OVS_KEY_ATTR_NSH
] = { .len
= OVS_ATTR_NESTED
,
445 .next
= ovs_nsh_key_attr_lens
, },
448 static bool check_attr_len(unsigned int attr_len
, unsigned int expected_len
)
450 return expected_len
== attr_len
||
451 expected_len
== OVS_ATTR_NESTED
||
452 expected_len
== OVS_ATTR_VARIABLE
;
455 static bool is_all_zero(const u8
*fp
, size_t size
)
462 for (i
= 0; i
< size
; i
++)
469 static int __parse_flow_nlattrs(const struct nlattr
*attr
,
470 const struct nlattr
*a
[],
471 u64
*attrsp
, bool log
, bool nz
)
473 const struct nlattr
*nla
;
478 nla_for_each_nested(nla
, attr
, rem
) {
479 u16 type
= nla_type(nla
);
482 if (type
> OVS_KEY_ATTR_MAX
) {
483 OVS_NLERR(log
, "Key type %d is out of range max %d",
484 type
, OVS_KEY_ATTR_MAX
);
488 if (attrs
& (1 << type
)) {
489 OVS_NLERR(log
, "Duplicate key (type %d).", type
);
493 expected_len
= ovs_key_lens
[type
].len
;
494 if (!check_attr_len(nla_len(nla
), expected_len
)) {
495 OVS_NLERR(log
, "Key %d has unexpected len %d expected %d",
496 type
, nla_len(nla
), expected_len
);
500 if (!nz
|| !is_all_zero(nla_data(nla
), expected_len
)) {
506 OVS_NLERR(log
, "Message has %d unknown bytes.", rem
);
514 static int parse_flow_mask_nlattrs(const struct nlattr
*attr
,
515 const struct nlattr
*a
[], u64
*attrsp
,
518 return __parse_flow_nlattrs(attr
, a
, attrsp
, log
, true);
521 int parse_flow_nlattrs(const struct nlattr
*attr
, const struct nlattr
*a
[],
522 u64
*attrsp
, bool log
)
524 return __parse_flow_nlattrs(attr
, a
, attrsp
, log
, false);
527 static int genev_tun_opt_from_nlattr(const struct nlattr
*a
,
528 struct sw_flow_match
*match
, bool is_mask
,
531 unsigned long opt_key_offset
;
533 if (nla_len(a
) > sizeof(match
->key
->tun_opts
)) {
534 OVS_NLERR(log
, "Geneve option length err (len %d, max %zu).",
535 nla_len(a
), sizeof(match
->key
->tun_opts
));
539 if (nla_len(a
) % 4 != 0) {
540 OVS_NLERR(log
, "Geneve opt len %d is not a multiple of 4.",
545 /* We need to record the length of the options passed
546 * down, otherwise packets with the same format but
547 * additional options will be silently matched.
550 SW_FLOW_KEY_PUT(match
, tun_opts_len
, nla_len(a
),
553 /* This is somewhat unusual because it looks at
554 * both the key and mask while parsing the
555 * attributes (and by extension assumes the key
556 * is parsed first). Normally, we would verify
557 * that each is the correct length and that the
558 * attributes line up in the validate function.
559 * However, that is difficult because this is
560 * variable length and we won't have the
563 if (match
->key
->tun_opts_len
!= nla_len(a
)) {
564 OVS_NLERR(log
, "Geneve option len %d != mask len %d",
565 match
->key
->tun_opts_len
, nla_len(a
));
569 SW_FLOW_KEY_PUT(match
, tun_opts_len
, 0xff, true);
572 opt_key_offset
= TUN_METADATA_OFFSET(nla_len(a
));
573 SW_FLOW_KEY_MEMCPY_OFFSET(match
, opt_key_offset
, nla_data(a
),
574 nla_len(a
), is_mask
);
578 static int vxlan_tun_opt_from_nlattr(const struct nlattr
*attr
,
579 struct sw_flow_match
*match
, bool is_mask
,
584 unsigned long opt_key_offset
;
585 struct vxlan_metadata opts
;
587 BUILD_BUG_ON(sizeof(opts
) > sizeof(match
->key
->tun_opts
));
589 memset(&opts
, 0, sizeof(opts
));
590 nla_for_each_nested(a
, attr
, rem
) {
591 int type
= nla_type(a
);
593 if (type
> OVS_VXLAN_EXT_MAX
) {
594 OVS_NLERR(log
, "VXLAN extension %d out of range max %d",
595 type
, OVS_VXLAN_EXT_MAX
);
599 if (!check_attr_len(nla_len(a
),
600 ovs_vxlan_ext_key_lens
[type
].len
)) {
601 OVS_NLERR(log
, "VXLAN extension %d has unexpected len %d expected %d",
603 ovs_vxlan_ext_key_lens
[type
].len
);
608 case OVS_VXLAN_EXT_GBP
:
609 opts
.gbp
= nla_get_u32(a
);
612 OVS_NLERR(log
, "Unknown VXLAN extension attribute %d",
618 OVS_NLERR(log
, "VXLAN extension message has %d unknown bytes.",
624 SW_FLOW_KEY_PUT(match
, tun_opts_len
, sizeof(opts
), false);
626 SW_FLOW_KEY_PUT(match
, tun_opts_len
, 0xff, true);
628 opt_key_offset
= TUN_METADATA_OFFSET(sizeof(opts
));
629 SW_FLOW_KEY_MEMCPY_OFFSET(match
, opt_key_offset
, &opts
, sizeof(opts
),
634 static int ip_tun_from_nlattr(const struct nlattr
*attr
,
635 struct sw_flow_match
*match
, bool is_mask
,
638 bool ttl
= false, ipv4
= false, ipv6
= false;
639 __be16 tun_flags
= 0;
644 nla_for_each_nested(a
, attr
, rem
) {
645 int type
= nla_type(a
);
648 if (type
> OVS_TUNNEL_KEY_ATTR_MAX
) {
649 OVS_NLERR(log
, "Tunnel attr %d out of range max %d",
650 type
, OVS_TUNNEL_KEY_ATTR_MAX
);
654 if (!check_attr_len(nla_len(a
),
655 ovs_tunnel_key_lens
[type
].len
)) {
656 OVS_NLERR(log
, "Tunnel attr %d has unexpected len %d expected %d",
657 type
, nla_len(a
), ovs_tunnel_key_lens
[type
].len
);
662 case OVS_TUNNEL_KEY_ATTR_ID
:
663 SW_FLOW_KEY_PUT(match
, tun_key
.tun_id
,
664 nla_get_be64(a
), is_mask
);
665 tun_flags
|= TUNNEL_KEY
;
667 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC
:
668 SW_FLOW_KEY_PUT(match
, tun_key
.u
.ipv4
.src
,
669 nla_get_in_addr(a
), is_mask
);
672 case OVS_TUNNEL_KEY_ATTR_IPV4_DST
:
673 SW_FLOW_KEY_PUT(match
, tun_key
.u
.ipv4
.dst
,
674 nla_get_in_addr(a
), is_mask
);
677 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC
:
678 SW_FLOW_KEY_PUT(match
, tun_key
.u
.ipv6
.src
,
679 nla_get_in6_addr(a
), is_mask
);
682 case OVS_TUNNEL_KEY_ATTR_IPV6_DST
:
683 SW_FLOW_KEY_PUT(match
, tun_key
.u
.ipv6
.dst
,
684 nla_get_in6_addr(a
), is_mask
);
687 case OVS_TUNNEL_KEY_ATTR_TOS
:
688 SW_FLOW_KEY_PUT(match
, tun_key
.tos
,
689 nla_get_u8(a
), is_mask
);
691 case OVS_TUNNEL_KEY_ATTR_TTL
:
692 SW_FLOW_KEY_PUT(match
, tun_key
.ttl
,
693 nla_get_u8(a
), is_mask
);
696 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT
:
697 tun_flags
|= TUNNEL_DONT_FRAGMENT
;
699 case OVS_TUNNEL_KEY_ATTR_CSUM
:
700 tun_flags
|= TUNNEL_CSUM
;
702 case OVS_TUNNEL_KEY_ATTR_TP_SRC
:
703 SW_FLOW_KEY_PUT(match
, tun_key
.tp_src
,
704 nla_get_be16(a
), is_mask
);
706 case OVS_TUNNEL_KEY_ATTR_TP_DST
:
707 SW_FLOW_KEY_PUT(match
, tun_key
.tp_dst
,
708 nla_get_be16(a
), is_mask
);
710 case OVS_TUNNEL_KEY_ATTR_OAM
:
711 tun_flags
|= TUNNEL_OAM
;
713 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS
:
715 OVS_NLERR(log
, "Multiple metadata blocks provided");
719 err
= genev_tun_opt_from_nlattr(a
, match
, is_mask
, log
);
723 tun_flags
|= TUNNEL_GENEVE_OPT
;
726 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS
:
728 OVS_NLERR(log
, "Multiple metadata blocks provided");
732 err
= vxlan_tun_opt_from_nlattr(a
, match
, is_mask
, log
);
736 tun_flags
|= TUNNEL_VXLAN_OPT
;
739 case OVS_TUNNEL_KEY_ATTR_PAD
:
742 OVS_NLERR(log
, "Unknown IP tunnel attribute %d",
748 SW_FLOW_KEY_PUT(match
, tun_key
.tun_flags
, tun_flags
, is_mask
);
750 SW_FLOW_KEY_MEMSET_FIELD(match
, tun_proto
, 0xff, true);
752 SW_FLOW_KEY_PUT(match
, tun_proto
, ipv6
? AF_INET6
: AF_INET
,
756 OVS_NLERR(log
, "IP tunnel attribute has %d unknown bytes.",
762 OVS_NLERR(log
, "Mixed IPv4 and IPv6 tunnel attributes");
767 if (!ipv4
&& !ipv6
) {
768 OVS_NLERR(log
, "IP tunnel dst address not specified");
771 if (ipv4
&& !match
->key
->tun_key
.u
.ipv4
.dst
) {
772 OVS_NLERR(log
, "IPv4 tunnel dst address is zero");
775 if (ipv6
&& ipv6_addr_any(&match
->key
->tun_key
.u
.ipv6
.dst
)) {
776 OVS_NLERR(log
, "IPv6 tunnel dst address is zero");
781 OVS_NLERR(log
, "IP tunnel TTL not specified.");
789 static int vxlan_opt_to_nlattr(struct sk_buff
*skb
,
790 const void *tun_opts
, int swkey_tun_opts_len
)
792 const struct vxlan_metadata
*opts
= tun_opts
;
795 nla
= nla_nest_start(skb
, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS
);
799 if (nla_put_u32(skb
, OVS_VXLAN_EXT_GBP
, opts
->gbp
) < 0)
802 nla_nest_end(skb
, nla
);
806 static int __ip_tun_to_nlattr(struct sk_buff
*skb
,
807 const struct ip_tunnel_key
*output
,
808 const void *tun_opts
, int swkey_tun_opts_len
,
809 unsigned short tun_proto
)
811 if (output
->tun_flags
& TUNNEL_KEY
&&
812 nla_put_be64(skb
, OVS_TUNNEL_KEY_ATTR_ID
, output
->tun_id
,
813 OVS_TUNNEL_KEY_ATTR_PAD
))
817 if (output
->u
.ipv4
.src
&&
818 nla_put_in_addr(skb
, OVS_TUNNEL_KEY_ATTR_IPV4_SRC
,
821 if (output
->u
.ipv4
.dst
&&
822 nla_put_in_addr(skb
, OVS_TUNNEL_KEY_ATTR_IPV4_DST
,
827 if (!ipv6_addr_any(&output
->u
.ipv6
.src
) &&
828 nla_put_in6_addr(skb
, OVS_TUNNEL_KEY_ATTR_IPV6_SRC
,
829 &output
->u
.ipv6
.src
))
831 if (!ipv6_addr_any(&output
->u
.ipv6
.dst
) &&
832 nla_put_in6_addr(skb
, OVS_TUNNEL_KEY_ATTR_IPV6_DST
,
833 &output
->u
.ipv6
.dst
))
838 nla_put_u8(skb
, OVS_TUNNEL_KEY_ATTR_TOS
, output
->tos
))
840 if (nla_put_u8(skb
, OVS_TUNNEL_KEY_ATTR_TTL
, output
->ttl
))
842 if ((output
->tun_flags
& TUNNEL_DONT_FRAGMENT
) &&
843 nla_put_flag(skb
, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT
))
845 if ((output
->tun_flags
& TUNNEL_CSUM
) &&
846 nla_put_flag(skb
, OVS_TUNNEL_KEY_ATTR_CSUM
))
848 if (output
->tp_src
&&
849 nla_put_be16(skb
, OVS_TUNNEL_KEY_ATTR_TP_SRC
, output
->tp_src
))
851 if (output
->tp_dst
&&
852 nla_put_be16(skb
, OVS_TUNNEL_KEY_ATTR_TP_DST
, output
->tp_dst
))
854 if ((output
->tun_flags
& TUNNEL_OAM
) &&
855 nla_put_flag(skb
, OVS_TUNNEL_KEY_ATTR_OAM
))
857 if (swkey_tun_opts_len
) {
858 if (output
->tun_flags
& TUNNEL_GENEVE_OPT
&&
859 nla_put(skb
, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS
,
860 swkey_tun_opts_len
, tun_opts
))
862 else if (output
->tun_flags
& TUNNEL_VXLAN_OPT
&&
863 vxlan_opt_to_nlattr(skb
, tun_opts
, swkey_tun_opts_len
))
870 static int ip_tun_to_nlattr(struct sk_buff
*skb
,
871 const struct ip_tunnel_key
*output
,
872 const void *tun_opts
, int swkey_tun_opts_len
,
873 unsigned short tun_proto
)
878 nla
= nla_nest_start(skb
, OVS_KEY_ATTR_TUNNEL
);
882 err
= __ip_tun_to_nlattr(skb
, output
, tun_opts
, swkey_tun_opts_len
,
887 nla_nest_end(skb
, nla
);
891 int ovs_nla_put_tunnel_info(struct sk_buff
*skb
,
892 struct ip_tunnel_info
*tun_info
)
894 return __ip_tun_to_nlattr(skb
, &tun_info
->key
,
895 ip_tunnel_info_opts(tun_info
),
896 tun_info
->options_len
,
897 ip_tunnel_info_af(tun_info
));
900 static int encode_vlan_from_nlattrs(struct sw_flow_match
*match
,
901 const struct nlattr
*a
[],
902 bool is_mask
, bool inner
)
907 if (a
[OVS_KEY_ATTR_VLAN
])
908 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
910 if (a
[OVS_KEY_ATTR_ETHERTYPE
])
911 tpid
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
913 if (likely(!inner
)) {
914 SW_FLOW_KEY_PUT(match
, eth
.vlan
.tpid
, tpid
, is_mask
);
915 SW_FLOW_KEY_PUT(match
, eth
.vlan
.tci
, tci
, is_mask
);
917 SW_FLOW_KEY_PUT(match
, eth
.cvlan
.tpid
, tpid
, is_mask
);
918 SW_FLOW_KEY_PUT(match
, eth
.cvlan
.tci
, tci
, is_mask
);
923 static int validate_vlan_from_nlattrs(const struct sw_flow_match
*match
,
924 u64 key_attrs
, bool inner
,
925 const struct nlattr
**a
, bool log
)
929 if (!((key_attrs
& (1 << OVS_KEY_ATTR_ETHERNET
)) &&
930 (key_attrs
& (1 << OVS_KEY_ATTR_ETHERTYPE
)) &&
931 eth_type_vlan(nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
])))) {
936 if (!((key_attrs
& (1 << OVS_KEY_ATTR_VLAN
)) &&
937 (key_attrs
& (1 << OVS_KEY_ATTR_ENCAP
)))) {
938 OVS_NLERR(log
, "Invalid %s frame", (inner
) ? "C-VLAN" : "VLAN");
942 if (a
[OVS_KEY_ATTR_VLAN
])
943 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
945 if (!(tci
& htons(VLAN_TAG_PRESENT
))) {
947 OVS_NLERR(log
, "%s TCI does not have VLAN_TAG_PRESENT bit set.",
948 (inner
) ? "C-VLAN" : "VLAN");
950 } else if (nla_len(a
[OVS_KEY_ATTR_ENCAP
])) {
951 /* Corner case for truncated VLAN header. */
952 OVS_NLERR(log
, "Truncated %s header has non-zero encap attribute.",
953 (inner
) ? "C-VLAN" : "VLAN");
961 static int validate_vlan_mask_from_nlattrs(const struct sw_flow_match
*match
,
962 u64 key_attrs
, bool inner
,
963 const struct nlattr
**a
, bool log
)
967 bool encap_valid
= !!(match
->key
->eth
.vlan
.tci
&
968 htons(VLAN_TAG_PRESENT
));
969 bool i_encap_valid
= !!(match
->key
->eth
.cvlan
.tci
&
970 htons(VLAN_TAG_PRESENT
));
972 if (!(key_attrs
& (1 << OVS_KEY_ATTR_ENCAP
))) {
977 if ((!inner
&& !encap_valid
) || (inner
&& !i_encap_valid
)) {
978 OVS_NLERR(log
, "Encap mask attribute is set for non-%s frame.",
979 (inner
) ? "C-VLAN" : "VLAN");
983 if (a
[OVS_KEY_ATTR_VLAN
])
984 tci
= nla_get_be16(a
[OVS_KEY_ATTR_VLAN
]);
986 if (a
[OVS_KEY_ATTR_ETHERTYPE
])
987 tpid
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
989 if (tpid
!= htons(0xffff)) {
990 OVS_NLERR(log
, "Must have an exact match on %s TPID (mask=%x).",
991 (inner
) ? "C-VLAN" : "VLAN", ntohs(tpid
));
994 if (!(tci
& htons(VLAN_TAG_PRESENT
))) {
995 OVS_NLERR(log
, "%s TCI mask does not have exact match for VLAN_TAG_PRESENT bit.",
996 (inner
) ? "C-VLAN" : "VLAN");
1003 static int __parse_vlan_from_nlattrs(struct sw_flow_match
*match
,
1004 u64
*key_attrs
, bool inner
,
1005 const struct nlattr
**a
, bool is_mask
,
1009 const struct nlattr
*encap
;
1012 err
= validate_vlan_from_nlattrs(match
, *key_attrs
, inner
,
1015 err
= validate_vlan_mask_from_nlattrs(match
, *key_attrs
, inner
,
1020 err
= encode_vlan_from_nlattrs(match
, a
, is_mask
, inner
);
1024 *key_attrs
&= ~(1 << OVS_KEY_ATTR_ENCAP
);
1025 *key_attrs
&= ~(1 << OVS_KEY_ATTR_VLAN
);
1026 *key_attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1028 encap
= a
[OVS_KEY_ATTR_ENCAP
];
1031 err
= parse_flow_nlattrs(encap
, a
, key_attrs
, log
);
1033 err
= parse_flow_mask_nlattrs(encap
, a
, key_attrs
, log
);
1038 static int parse_vlan_from_nlattrs(struct sw_flow_match
*match
,
1039 u64
*key_attrs
, const struct nlattr
**a
,
1040 bool is_mask
, bool log
)
1043 bool encap_valid
= false;
1045 err
= __parse_vlan_from_nlattrs(match
, key_attrs
, false, a
,
1050 encap_valid
= !!(match
->key
->eth
.vlan
.tci
& htons(VLAN_TAG_PRESENT
));
1052 err
= __parse_vlan_from_nlattrs(match
, key_attrs
, true, a
,
1061 static int parse_eth_type_from_nlattrs(struct sw_flow_match
*match
,
1062 u64
*attrs
, const struct nlattr
**a
,
1063 bool is_mask
, bool log
)
1067 eth_type
= nla_get_be16(a
[OVS_KEY_ATTR_ETHERTYPE
]);
1069 /* Always exact match EtherType. */
1070 eth_type
= htons(0xffff);
1071 } else if (!eth_proto_is_802_3(eth_type
)) {
1072 OVS_NLERR(log
, "EtherType %x is less than min %x",
1073 ntohs(eth_type
), ETH_P_802_3_MIN
);
1077 SW_FLOW_KEY_PUT(match
, eth
.type
, eth_type
, is_mask
);
1078 *attrs
&= ~(1 << OVS_KEY_ATTR_ETHERTYPE
);
1082 static int metadata_from_nlattrs(struct net
*net
, struct sw_flow_match
*match
,
1083 u64
*attrs
, const struct nlattr
**a
,
1084 bool is_mask
, bool log
)
1086 u8 mac_proto
= MAC_PROTO_ETHERNET
;
1088 if (*attrs
& (1 << OVS_KEY_ATTR_DP_HASH
)) {
1089 u32 hash_val
= nla_get_u32(a
[OVS_KEY_ATTR_DP_HASH
]);
1091 SW_FLOW_KEY_PUT(match
, ovs_flow_hash
, hash_val
, is_mask
);
1092 *attrs
&= ~(1 << OVS_KEY_ATTR_DP_HASH
);
1095 if (*attrs
& (1 << OVS_KEY_ATTR_RECIRC_ID
)) {
1096 u32 recirc_id
= nla_get_u32(a
[OVS_KEY_ATTR_RECIRC_ID
]);
1098 SW_FLOW_KEY_PUT(match
, recirc_id
, recirc_id
, is_mask
);
1099 *attrs
&= ~(1 << OVS_KEY_ATTR_RECIRC_ID
);
1102 if (*attrs
& (1 << OVS_KEY_ATTR_PRIORITY
)) {
1103 SW_FLOW_KEY_PUT(match
, phy
.priority
,
1104 nla_get_u32(a
[OVS_KEY_ATTR_PRIORITY
]), is_mask
);
1105 *attrs
&= ~(1 << OVS_KEY_ATTR_PRIORITY
);
1108 if (*attrs
& (1 << OVS_KEY_ATTR_IN_PORT
)) {
1109 u32 in_port
= nla_get_u32(a
[OVS_KEY_ATTR_IN_PORT
]);
1112 in_port
= 0xffffffff; /* Always exact match in_port. */
1113 } else if (in_port
>= DP_MAX_PORTS
) {
1114 OVS_NLERR(log
, "Port %d exceeds max allowable %d",
1115 in_port
, DP_MAX_PORTS
);
1119 SW_FLOW_KEY_PUT(match
, phy
.in_port
, in_port
, is_mask
);
1120 *attrs
&= ~(1 << OVS_KEY_ATTR_IN_PORT
);
1121 } else if (!is_mask
) {
1122 SW_FLOW_KEY_PUT(match
, phy
.in_port
, DP_MAX_PORTS
, is_mask
);
1125 if (*attrs
& (1 << OVS_KEY_ATTR_SKB_MARK
)) {
1126 uint32_t mark
= nla_get_u32(a
[OVS_KEY_ATTR_SKB_MARK
]);
1128 SW_FLOW_KEY_PUT(match
, phy
.skb_mark
, mark
, is_mask
);
1129 *attrs
&= ~(1 << OVS_KEY_ATTR_SKB_MARK
);
1131 if (*attrs
& (1 << OVS_KEY_ATTR_TUNNEL
)) {
1132 if (ip_tun_from_nlattr(a
[OVS_KEY_ATTR_TUNNEL
], match
,
1135 *attrs
&= ~(1 << OVS_KEY_ATTR_TUNNEL
);
1138 if (*attrs
& (1 << OVS_KEY_ATTR_CT_STATE
) &&
1139 ovs_ct_verify(net
, OVS_KEY_ATTR_CT_STATE
)) {
1140 u32 ct_state
= nla_get_u32(a
[OVS_KEY_ATTR_CT_STATE
]);
1142 if (ct_state
& ~CT_SUPPORTED_MASK
) {
1143 OVS_NLERR(log
, "ct_state flags %08x unsupported",
1148 SW_FLOW_KEY_PUT(match
, ct_state
, ct_state
, is_mask
);
1149 *attrs
&= ~(1ULL << OVS_KEY_ATTR_CT_STATE
);
1151 if (*attrs
& (1 << OVS_KEY_ATTR_CT_ZONE
) &&
1152 ovs_ct_verify(net
, OVS_KEY_ATTR_CT_ZONE
)) {
1153 u16 ct_zone
= nla_get_u16(a
[OVS_KEY_ATTR_CT_ZONE
]);
1155 SW_FLOW_KEY_PUT(match
, ct_zone
, ct_zone
, is_mask
);
1156 *attrs
&= ~(1ULL << OVS_KEY_ATTR_CT_ZONE
);
1158 if (*attrs
& (1 << OVS_KEY_ATTR_CT_MARK
) &&
1159 ovs_ct_verify(net
, OVS_KEY_ATTR_CT_MARK
)) {
1160 u32 mark
= nla_get_u32(a
[OVS_KEY_ATTR_CT_MARK
]);
1162 SW_FLOW_KEY_PUT(match
, ct
.mark
, mark
, is_mask
);
1163 *attrs
&= ~(1ULL << OVS_KEY_ATTR_CT_MARK
);
1165 if (*attrs
& (1 << OVS_KEY_ATTR_CT_LABELS
) &&
1166 ovs_ct_verify(net
, OVS_KEY_ATTR_CT_LABELS
)) {
1167 const struct ovs_key_ct_labels
*cl
;
1169 cl
= nla_data(a
[OVS_KEY_ATTR_CT_LABELS
]);
1170 SW_FLOW_KEY_MEMCPY(match
, ct
.labels
, cl
->ct_labels
,
1171 sizeof(*cl
), is_mask
);
1172 *attrs
&= ~(1ULL << OVS_KEY_ATTR_CT_LABELS
);
1174 if (*attrs
& (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
)) {
1175 const struct ovs_key_ct_tuple_ipv4
*ct
;
1177 ct
= nla_data(a
[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
]);
1179 SW_FLOW_KEY_PUT(match
, ipv4
.ct_orig
.src
, ct
->ipv4_src
, is_mask
);
1180 SW_FLOW_KEY_PUT(match
, ipv4
.ct_orig
.dst
, ct
->ipv4_dst
, is_mask
);
1181 SW_FLOW_KEY_PUT(match
, ct
.orig_tp
.src
, ct
->src_port
, is_mask
);
1182 SW_FLOW_KEY_PUT(match
, ct
.orig_tp
.dst
, ct
->dst_port
, is_mask
);
1183 SW_FLOW_KEY_PUT(match
, ct_orig_proto
, ct
->ipv4_proto
, is_mask
);
1184 *attrs
&= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4
);
1186 if (*attrs
& (1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
)) {
1187 const struct ovs_key_ct_tuple_ipv6
*ct
;
1189 ct
= nla_data(a
[OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
]);
1191 SW_FLOW_KEY_MEMCPY(match
, ipv6
.ct_orig
.src
, &ct
->ipv6_src
,
1192 sizeof(match
->key
->ipv6
.ct_orig
.src
),
1194 SW_FLOW_KEY_MEMCPY(match
, ipv6
.ct_orig
.dst
, &ct
->ipv6_dst
,
1195 sizeof(match
->key
->ipv6
.ct_orig
.dst
),
1197 SW_FLOW_KEY_PUT(match
, ct
.orig_tp
.src
, ct
->src_port
, is_mask
);
1198 SW_FLOW_KEY_PUT(match
, ct
.orig_tp
.dst
, ct
->dst_port
, is_mask
);
1199 SW_FLOW_KEY_PUT(match
, ct_orig_proto
, ct
->ipv6_proto
, is_mask
);
1200 *attrs
&= ~(1ULL << OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6
);
1203 /* For layer 3 packets the Ethernet type is provided
1204 * and treated as metadata but no MAC addresses are provided.
1206 if (!(*attrs
& (1ULL << OVS_KEY_ATTR_ETHERNET
)) &&
1207 (*attrs
& (1ULL << OVS_KEY_ATTR_ETHERTYPE
)))
1208 mac_proto
= MAC_PROTO_NONE
;
1210 /* Always exact match mac_proto */
1211 SW_FLOW_KEY_PUT(match
, mac_proto
, is_mask
? 0xff : mac_proto
, is_mask
);
1213 if (mac_proto
== MAC_PROTO_NONE
)
1214 return parse_eth_type_from_nlattrs(match
, attrs
, a
, is_mask
,
1220 int nsh_hdr_from_nlattr(const struct nlattr
*attr
,
1221 struct nshhdr
*nh
, size_t size
)
1229 /* validate_nsh has check this, so we needn't do duplicate check here
1231 if (size
< NSH_BASE_HDR_LEN
)
1234 nla_for_each_nested(a
, attr
, rem
) {
1235 int type
= nla_type(a
);
1238 case OVS_NSH_KEY_ATTR_BASE
: {
1239 const struct ovs_nsh_key_base
*base
= nla_data(a
);
1241 flags
= base
->flags
;
1244 nh
->mdtype
= base
->mdtype
;
1245 nh
->path_hdr
= base
->path_hdr
;
1248 case OVS_NSH_KEY_ATTR_MD1
:
1250 if (mdlen
> size
- NSH_BASE_HDR_LEN
)
1252 memcpy(&nh
->md1
, nla_data(a
), mdlen
);
1255 case OVS_NSH_KEY_ATTR_MD2
:
1257 if (mdlen
> size
- NSH_BASE_HDR_LEN
)
1259 memcpy(&nh
->md2
, nla_data(a
), mdlen
);
1267 /* nsh header length = NSH_BASE_HDR_LEN + mdlen */
1268 nh
->ver_flags_ttl_len
= 0;
1269 nsh_set_flags_ttl_len(nh
, flags
, ttl
, NSH_BASE_HDR_LEN
+ mdlen
);
1274 int nsh_key_from_nlattr(const struct nlattr
*attr
,
1275 struct ovs_key_nsh
*nsh
, struct ovs_key_nsh
*nsh_mask
)
1280 /* validate_nsh has check this, so we needn't do duplicate check here
1282 nla_for_each_nested(a
, attr
, rem
) {
1283 int type
= nla_type(a
);
1286 case OVS_NSH_KEY_ATTR_BASE
: {
1287 const struct ovs_nsh_key_base
*base
= nla_data(a
);
1288 const struct ovs_nsh_key_base
*base_mask
= base
+ 1;
1291 nsh_mask
->base
= *base_mask
;
1294 case OVS_NSH_KEY_ATTR_MD1
: {
1295 const struct ovs_nsh_key_md1
*md1
= nla_data(a
);
1296 const struct ovs_nsh_key_md1
*md1_mask
= md1
+ 1;
1298 memcpy(nsh
->context
, md1
->context
, sizeof(*md1
));
1299 memcpy(nsh_mask
->context
, md1_mask
->context
,
1303 case OVS_NSH_KEY_ATTR_MD2
:
1304 /* Not supported yet */
1314 static int nsh_key_put_from_nlattr(const struct nlattr
*attr
,
1315 struct sw_flow_match
*match
, bool is_mask
,
1316 bool is_push_nsh
, bool log
)
1320 bool has_base
= false;
1321 bool has_md1
= false;
1322 bool has_md2
= false;
1326 if (WARN_ON(is_push_nsh
&& is_mask
))
1329 nla_for_each_nested(a
, attr
, rem
) {
1330 int type
= nla_type(a
);
1333 if (type
> OVS_NSH_KEY_ATTR_MAX
) {
1334 OVS_NLERR(log
, "nsh attr %d is out of range max %d",
1335 type
, OVS_NSH_KEY_ATTR_MAX
);
1339 if (!check_attr_len(nla_len(a
),
1340 ovs_nsh_key_attr_lens
[type
].len
)) {
1343 "nsh attr %d has unexpected len %d expected %d",
1346 ovs_nsh_key_attr_lens
[type
].len
1352 case OVS_NSH_KEY_ATTR_BASE
: {
1353 const struct ovs_nsh_key_base
*base
= nla_data(a
);
1356 mdtype
= base
->mdtype
;
1357 SW_FLOW_KEY_PUT(match
, nsh
.base
.flags
,
1358 base
->flags
, is_mask
);
1359 SW_FLOW_KEY_PUT(match
, nsh
.base
.ttl
,
1360 base
->ttl
, is_mask
);
1361 SW_FLOW_KEY_PUT(match
, nsh
.base
.mdtype
,
1362 base
->mdtype
, is_mask
);
1363 SW_FLOW_KEY_PUT(match
, nsh
.base
.np
,
1365 SW_FLOW_KEY_PUT(match
, nsh
.base
.path_hdr
,
1366 base
->path_hdr
, is_mask
);
1369 case OVS_NSH_KEY_ATTR_MD1
: {
1370 const struct ovs_nsh_key_md1
*md1
= nla_data(a
);
1373 for (i
= 0; i
< NSH_MD1_CONTEXT_SIZE
; i
++)
1374 SW_FLOW_KEY_PUT(match
, nsh
.context
[i
],
1375 md1
->context
[i
], is_mask
);
1378 case OVS_NSH_KEY_ATTR_MD2
:
1379 if (!is_push_nsh
) /* Not supported MD type 2 yet */
1384 if (mdlen
> NSH_CTX_HDRS_MAX_LEN
|| mdlen
<= 0) {
1387 "Invalid MD length %d for MD type %d",
1395 OVS_NLERR(log
, "Unknown nsh attribute %d",
1402 OVS_NLERR(log
, "nsh attribute has %d unknown bytes.", rem
);
1406 if (has_md1
&& has_md2
) {
1409 "invalid nsh attribute: md1 and md2 are exclusive."
1415 if ((has_md1
&& mdtype
!= NSH_M_TYPE1
) ||
1416 (has_md2
&& mdtype
!= NSH_M_TYPE2
)) {
1417 OVS_NLERR(1, "nsh attribute has unmatched MD type %d.",
1423 (!has_base
|| (!has_md1
&& !has_md2
))) {
1426 "push_nsh: missing base or metadata attributes"
1435 static int ovs_key_from_nlattrs(struct net
*net
, struct sw_flow_match
*match
,
1436 u64 attrs
, const struct nlattr
**a
,
1437 bool is_mask
, bool log
)
1441 err
= metadata_from_nlattrs(net
, match
, &attrs
, a
, is_mask
, log
);
1445 if (attrs
& (1 << OVS_KEY_ATTR_ETHERNET
)) {
1446 const struct ovs_key_ethernet
*eth_key
;
1448 eth_key
= nla_data(a
[OVS_KEY_ATTR_ETHERNET
]);
1449 SW_FLOW_KEY_MEMCPY(match
, eth
.src
,
1450 eth_key
->eth_src
, ETH_ALEN
, is_mask
);
1451 SW_FLOW_KEY_MEMCPY(match
, eth
.dst
,
1452 eth_key
->eth_dst
, ETH_ALEN
, is_mask
);
1453 attrs
&= ~(1 << OVS_KEY_ATTR_ETHERNET
);
1455 if (attrs
& (1 << OVS_KEY_ATTR_VLAN
)) {
1456 /* VLAN attribute is always parsed before getting here since it
1457 * may occur multiple times.
1459 OVS_NLERR(log
, "VLAN attribute unexpected.");
1463 if (attrs
& (1 << OVS_KEY_ATTR_ETHERTYPE
)) {
1464 err
= parse_eth_type_from_nlattrs(match
, &attrs
, a
, is_mask
,
1468 } else if (!is_mask
) {
1469 SW_FLOW_KEY_PUT(match
, eth
.type
, htons(ETH_P_802_2
), is_mask
);
1471 } else if (!match
->key
->eth
.type
) {
1472 OVS_NLERR(log
, "Either Ethernet header or EtherType is required.");
1476 if (attrs
& (1 << OVS_KEY_ATTR_IPV4
)) {
1477 const struct ovs_key_ipv4
*ipv4_key
;
1479 ipv4_key
= nla_data(a
[OVS_KEY_ATTR_IPV4
]);
1480 if (!is_mask
&& ipv4_key
->ipv4_frag
> OVS_FRAG_TYPE_MAX
) {
1481 OVS_NLERR(log
, "IPv4 frag type %d is out of range max %d",
1482 ipv4_key
->ipv4_frag
, OVS_FRAG_TYPE_MAX
);
1485 SW_FLOW_KEY_PUT(match
, ip
.proto
,
1486 ipv4_key
->ipv4_proto
, is_mask
);
1487 SW_FLOW_KEY_PUT(match
, ip
.tos
,
1488 ipv4_key
->ipv4_tos
, is_mask
);
1489 SW_FLOW_KEY_PUT(match
, ip
.ttl
,
1490 ipv4_key
->ipv4_ttl
, is_mask
);
1491 SW_FLOW_KEY_PUT(match
, ip
.frag
,
1492 ipv4_key
->ipv4_frag
, is_mask
);
1493 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.src
,
1494 ipv4_key
->ipv4_src
, is_mask
);
1495 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.dst
,
1496 ipv4_key
->ipv4_dst
, is_mask
);
1497 attrs
&= ~(1 << OVS_KEY_ATTR_IPV4
);
1500 if (attrs
& (1 << OVS_KEY_ATTR_IPV6
)) {
1501 const struct ovs_key_ipv6
*ipv6_key
;
1503 ipv6_key
= nla_data(a
[OVS_KEY_ATTR_IPV6
]);
1504 if (!is_mask
&& ipv6_key
->ipv6_frag
> OVS_FRAG_TYPE_MAX
) {
1505 OVS_NLERR(log
, "IPv6 frag type %d is out of range max %d",
1506 ipv6_key
->ipv6_frag
, OVS_FRAG_TYPE_MAX
);
1510 if (!is_mask
&& ipv6_key
->ipv6_label
& htonl(0xFFF00000)) {
1511 OVS_NLERR(log
, "IPv6 flow label %x is out of range (max=%x)",
1512 ntohl(ipv6_key
->ipv6_label
), (1 << 20) - 1);
1516 SW_FLOW_KEY_PUT(match
, ipv6
.label
,
1517 ipv6_key
->ipv6_label
, is_mask
);
1518 SW_FLOW_KEY_PUT(match
, ip
.proto
,
1519 ipv6_key
->ipv6_proto
, is_mask
);
1520 SW_FLOW_KEY_PUT(match
, ip
.tos
,
1521 ipv6_key
->ipv6_tclass
, is_mask
);
1522 SW_FLOW_KEY_PUT(match
, ip
.ttl
,
1523 ipv6_key
->ipv6_hlimit
, is_mask
);
1524 SW_FLOW_KEY_PUT(match
, ip
.frag
,
1525 ipv6_key
->ipv6_frag
, is_mask
);
1526 SW_FLOW_KEY_MEMCPY(match
, ipv6
.addr
.src
,
1528 sizeof(match
->key
->ipv6
.addr
.src
),
1530 SW_FLOW_KEY_MEMCPY(match
, ipv6
.addr
.dst
,
1532 sizeof(match
->key
->ipv6
.addr
.dst
),
1535 attrs
&= ~(1 << OVS_KEY_ATTR_IPV6
);
1538 if (attrs
& (1 << OVS_KEY_ATTR_ARP
)) {
1539 const struct ovs_key_arp
*arp_key
;
1541 arp_key
= nla_data(a
[OVS_KEY_ATTR_ARP
]);
1542 if (!is_mask
&& (arp_key
->arp_op
& htons(0xff00))) {
1543 OVS_NLERR(log
, "Unknown ARP opcode (opcode=%d).",
1548 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.src
,
1549 arp_key
->arp_sip
, is_mask
);
1550 SW_FLOW_KEY_PUT(match
, ipv4
.addr
.dst
,
1551 arp_key
->arp_tip
, is_mask
);
1552 SW_FLOW_KEY_PUT(match
, ip
.proto
,
1553 ntohs(arp_key
->arp_op
), is_mask
);
1554 SW_FLOW_KEY_MEMCPY(match
, ipv4
.arp
.sha
,
1555 arp_key
->arp_sha
, ETH_ALEN
, is_mask
);
1556 SW_FLOW_KEY_MEMCPY(match
, ipv4
.arp
.tha
,
1557 arp_key
->arp_tha
, ETH_ALEN
, is_mask
);
1559 attrs
&= ~(1 << OVS_KEY_ATTR_ARP
);
1562 if (attrs
& (1 << OVS_KEY_ATTR_NSH
)) {
1563 if (nsh_key_put_from_nlattr(a
[OVS_KEY_ATTR_NSH
], match
,
1564 is_mask
, false, log
) < 0)
1566 attrs
&= ~(1 << OVS_KEY_ATTR_NSH
);
1569 if (attrs
& (1 << OVS_KEY_ATTR_MPLS
)) {
1570 const struct ovs_key_mpls
*mpls_key
;
1572 mpls_key
= nla_data(a
[OVS_KEY_ATTR_MPLS
]);
1573 SW_FLOW_KEY_PUT(match
, mpls
.top_lse
,
1574 mpls_key
->mpls_lse
, is_mask
);
1576 attrs
&= ~(1 << OVS_KEY_ATTR_MPLS
);
1579 if (attrs
& (1 << OVS_KEY_ATTR_TCP
)) {
1580 const struct ovs_key_tcp
*tcp_key
;
1582 tcp_key
= nla_data(a
[OVS_KEY_ATTR_TCP
]);
1583 SW_FLOW_KEY_PUT(match
, tp
.src
, tcp_key
->tcp_src
, is_mask
);
1584 SW_FLOW_KEY_PUT(match
, tp
.dst
, tcp_key
->tcp_dst
, is_mask
);
1585 attrs
&= ~(1 << OVS_KEY_ATTR_TCP
);
1588 if (attrs
& (1 << OVS_KEY_ATTR_TCP_FLAGS
)) {
1589 SW_FLOW_KEY_PUT(match
, tp
.flags
,
1590 nla_get_be16(a
[OVS_KEY_ATTR_TCP_FLAGS
]),
1592 attrs
&= ~(1 << OVS_KEY_ATTR_TCP_FLAGS
);
1595 if (attrs
& (1 << OVS_KEY_ATTR_UDP
)) {
1596 const struct ovs_key_udp
*udp_key
;
1598 udp_key
= nla_data(a
[OVS_KEY_ATTR_UDP
]);
1599 SW_FLOW_KEY_PUT(match
, tp
.src
, udp_key
->udp_src
, is_mask
);
1600 SW_FLOW_KEY_PUT(match
, tp
.dst
, udp_key
->udp_dst
, is_mask
);
1601 attrs
&= ~(1 << OVS_KEY_ATTR_UDP
);
1604 if (attrs
& (1 << OVS_KEY_ATTR_SCTP
)) {
1605 const struct ovs_key_sctp
*sctp_key
;
1607 sctp_key
= nla_data(a
[OVS_KEY_ATTR_SCTP
]);
1608 SW_FLOW_KEY_PUT(match
, tp
.src
, sctp_key
->sctp_src
, is_mask
);
1609 SW_FLOW_KEY_PUT(match
, tp
.dst
, sctp_key
->sctp_dst
, is_mask
);
1610 attrs
&= ~(1 << OVS_KEY_ATTR_SCTP
);
1613 if (attrs
& (1 << OVS_KEY_ATTR_ICMP
)) {
1614 const struct ovs_key_icmp
*icmp_key
;
1616 icmp_key
= nla_data(a
[OVS_KEY_ATTR_ICMP
]);
1617 SW_FLOW_KEY_PUT(match
, tp
.src
,
1618 htons(icmp_key
->icmp_type
), is_mask
);
1619 SW_FLOW_KEY_PUT(match
, tp
.dst
,
1620 htons(icmp_key
->icmp_code
), is_mask
);
1621 attrs
&= ~(1 << OVS_KEY_ATTR_ICMP
);
1624 if (attrs
& (1 << OVS_KEY_ATTR_ICMPV6
)) {
1625 const struct ovs_key_icmpv6
*icmpv6_key
;
1627 icmpv6_key
= nla_data(a
[OVS_KEY_ATTR_ICMPV6
]);
1628 SW_FLOW_KEY_PUT(match
, tp
.src
,
1629 htons(icmpv6_key
->icmpv6_type
), is_mask
);
1630 SW_FLOW_KEY_PUT(match
, tp
.dst
,
1631 htons(icmpv6_key
->icmpv6_code
), is_mask
);
1632 attrs
&= ~(1 << OVS_KEY_ATTR_ICMPV6
);
1635 if (attrs
& (1 << OVS_KEY_ATTR_ND
)) {
1636 const struct ovs_key_nd
*nd_key
;
1638 nd_key
= nla_data(a
[OVS_KEY_ATTR_ND
]);
1639 SW_FLOW_KEY_MEMCPY(match
, ipv6
.nd
.target
,
1641 sizeof(match
->key
->ipv6
.nd
.target
),
1643 SW_FLOW_KEY_MEMCPY(match
, ipv6
.nd
.sll
,
1644 nd_key
->nd_sll
, ETH_ALEN
, is_mask
);
1645 SW_FLOW_KEY_MEMCPY(match
, ipv6
.nd
.tll
,
1646 nd_key
->nd_tll
, ETH_ALEN
, is_mask
);
1647 attrs
&= ~(1 << OVS_KEY_ATTR_ND
);
1651 OVS_NLERR(log
, "Unknown key attributes %llx",
1652 (unsigned long long)attrs
);
1659 static void nlattr_set(struct nlattr
*attr
, u8 val
,
1660 const struct ovs_len_tbl
*tbl
)
1665 /* The nlattr stream should already have been validated */
1666 nla_for_each_nested(nla
, attr
, rem
) {
1667 if (tbl
[nla_type(nla
)].len
== OVS_ATTR_NESTED
) {
1668 if (tbl
[nla_type(nla
)].next
)
1669 tbl
= tbl
[nla_type(nla
)].next
;
1670 nlattr_set(nla
, val
, tbl
);
1672 memset(nla_data(nla
), val
, nla_len(nla
));
1675 if (nla_type(nla
) == OVS_KEY_ATTR_CT_STATE
)
1676 *(u32
*)nla_data(nla
) &= CT_SUPPORTED_MASK
;
1680 static void mask_set_nlattr(struct nlattr
*attr
, u8 val
)
1682 nlattr_set(attr
, val
, ovs_key_lens
);
1686 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1687 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1688 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1689 * does not include any don't care bit.
1690 * @net: Used to determine per-namespace field support.
1691 * @match: receives the extracted flow match information.
1692 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1693 * sequence. The fields should of the packet that triggered the creation
1695 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1696 * attribute specifies the mask field of the wildcarded flow.
1697 * @log: Boolean to allow kernel error logging. Normally true, but when
1698 * probing for feature compatibility this should be passed in as false to
1699 * suppress unnecessary error logging.
1701 int ovs_nla_get_match(struct net
*net
, struct sw_flow_match
*match
,
1702 const struct nlattr
*nla_key
,
1703 const struct nlattr
*nla_mask
,
1706 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1];
1707 struct nlattr
*newmask
= NULL
;
1712 err
= parse_flow_nlattrs(nla_key
, a
, &key_attrs
, log
);
1716 err
= parse_vlan_from_nlattrs(match
, &key_attrs
, a
, false, log
);
1720 err
= ovs_key_from_nlattrs(net
, match
, key_attrs
, a
, false, log
);
1726 /* Create an exact match mask. We need to set to 0xff
1727 * all the 'match->mask' fields that have been touched
1728 * in 'match->key'. We cannot simply memset
1729 * 'match->mask', because padding bytes and fields not
1730 * specified in 'match->key' should be left to 0.
1731 * Instead, we use a stream of netlink attributes,
1732 * copied from 'key' and set to 0xff.
1733 * ovs_key_from_nlattrs() will take care of filling
1734 * 'match->mask' appropriately.
1736 newmask
= kmemdup(nla_key
,
1737 nla_total_size(nla_len(nla_key
)),
1742 mask_set_nlattr(newmask
, 0xff);
1744 /* The userspace does not send tunnel attributes that
1745 * are 0, but we should not wildcard them nonetheless.
1747 if (match
->key
->tun_proto
)
1748 SW_FLOW_KEY_MEMSET_FIELD(match
, tun_key
,
1754 err
= parse_flow_mask_nlattrs(nla_mask
, a
, &mask_attrs
, log
);
1758 /* Always match on tci. */
1759 SW_FLOW_KEY_PUT(match
, eth
.vlan
.tci
, htons(0xffff), true);
1760 SW_FLOW_KEY_PUT(match
, eth
.cvlan
.tci
, htons(0xffff), true);
1762 err
= parse_vlan_from_nlattrs(match
, &mask_attrs
, a
, true, log
);
1766 err
= ovs_key_from_nlattrs(net
, match
, mask_attrs
, a
, true,
1772 if (!match_validate(match
, key_attrs
, mask_attrs
, log
))
1780 static size_t get_ufid_len(const struct nlattr
*attr
, bool log
)
1787 len
= nla_len(attr
);
1788 if (len
< 1 || len
> MAX_UFID_LENGTH
) {
1789 OVS_NLERR(log
, "ufid size %u bytes exceeds the range (1, %d)",
1790 nla_len(attr
), MAX_UFID_LENGTH
);
1797 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1798 * or false otherwise.
1800 bool ovs_nla_get_ufid(struct sw_flow_id
*sfid
, const struct nlattr
*attr
,
1803 sfid
->ufid_len
= get_ufid_len(attr
, log
);
1805 memcpy(sfid
->ufid
, nla_data(attr
), sfid
->ufid_len
);
1807 return sfid
->ufid_len
;
1810 int ovs_nla_get_identifier(struct sw_flow_id
*sfid
, const struct nlattr
*ufid
,
1811 const struct sw_flow_key
*key
, bool log
)
1813 struct sw_flow_key
*new_key
;
1815 if (ovs_nla_get_ufid(sfid
, ufid
, log
))
1818 /* If UFID was not provided, use unmasked key. */
1819 new_key
= kmalloc(sizeof(*new_key
), GFP_KERNEL
);
1822 memcpy(new_key
, key
, sizeof(*key
));
1823 sfid
->unmasked_key
= new_key
;
1828 u32
ovs_nla_get_ufid_flags(const struct nlattr
*attr
)
1830 return attr
? nla_get_u32(attr
) : 0;
1834 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1835 * @net: Network namespace.
1836 * @key: Receives extracted in_port, priority, tun_key, skb_mark and conntrack
1838 * @a: Array of netlink attributes holding parsed %OVS_KEY_ATTR_* Netlink
1840 * @attrs: Bit mask for the netlink attributes included in @a.
1841 * @log: Boolean to allow kernel error logging. Normally true, but when
1842 * probing for feature compatibility this should be passed in as false to
1843 * suppress unnecessary error logging.
1845 * This parses a series of Netlink attributes that form a flow key, which must
1846 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1847 * get the metadata, that is, the parts of the flow key that cannot be
1848 * extracted from the packet itself.
1850 * This must be called before the packet key fields are filled in 'key'.
1853 int ovs_nla_get_flow_metadata(struct net
*net
,
1854 const struct nlattr
*a
[OVS_KEY_ATTR_MAX
+ 1],
1855 u64 attrs
, struct sw_flow_key
*key
, bool log
)
1857 struct sw_flow_match match
;
1859 memset(&match
, 0, sizeof(match
));
1864 key
->ct_orig_proto
= 0;
1865 memset(&key
->ct
, 0, sizeof(key
->ct
));
1866 memset(&key
->ipv4
.ct_orig
, 0, sizeof(key
->ipv4
.ct_orig
));
1867 memset(&key
->ipv6
.ct_orig
, 0, sizeof(key
->ipv6
.ct_orig
));
1869 key
->phy
.in_port
= DP_MAX_PORTS
;
1871 return metadata_from_nlattrs(net
, &match
, &attrs
, a
, false, log
);
1874 static int ovs_nla_put_vlan(struct sk_buff
*skb
, const struct vlan_head
*vh
,
1877 __be16 eth_type
= !is_mask
? vh
->tpid
: htons(0xffff);
1879 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, eth_type
) ||
1880 nla_put_be16(skb
, OVS_KEY_ATTR_VLAN
, vh
->tci
))
1885 static int nsh_key_to_nlattr(const struct ovs_key_nsh
*nsh
, bool is_mask
,
1886 struct sk_buff
*skb
)
1888 struct nlattr
*start
;
1890 start
= nla_nest_start(skb
, OVS_KEY_ATTR_NSH
);
1894 if (nla_put(skb
, OVS_NSH_KEY_ATTR_BASE
, sizeof(nsh
->base
), &nsh
->base
))
1895 goto nla_put_failure
;
1897 if (is_mask
|| nsh
->base
.mdtype
== NSH_M_TYPE1
) {
1898 if (nla_put(skb
, OVS_NSH_KEY_ATTR_MD1
,
1899 sizeof(nsh
->context
), nsh
->context
))
1900 goto nla_put_failure
;
1903 /* Don't support MD type 2 yet */
1905 nla_nest_end(skb
, start
);
1913 static int __ovs_nla_put_key(const struct sw_flow_key
*swkey
,
1914 const struct sw_flow_key
*output
, bool is_mask
,
1915 struct sk_buff
*skb
)
1917 struct ovs_key_ethernet
*eth_key
;
1919 struct nlattr
*encap
= NULL
;
1920 struct nlattr
*in_encap
= NULL
;
1922 if (nla_put_u32(skb
, OVS_KEY_ATTR_RECIRC_ID
, output
->recirc_id
))
1923 goto nla_put_failure
;
1925 if (nla_put_u32(skb
, OVS_KEY_ATTR_DP_HASH
, output
->ovs_flow_hash
))
1926 goto nla_put_failure
;
1928 if (nla_put_u32(skb
, OVS_KEY_ATTR_PRIORITY
, output
->phy
.priority
))
1929 goto nla_put_failure
;
1931 if ((swkey
->tun_proto
|| is_mask
)) {
1932 const void *opts
= NULL
;
1934 if (output
->tun_key
.tun_flags
& TUNNEL_OPTIONS_PRESENT
)
1935 opts
= TUN_METADATA_OPTS(output
, swkey
->tun_opts_len
);
1937 if (ip_tun_to_nlattr(skb
, &output
->tun_key
, opts
,
1938 swkey
->tun_opts_len
, swkey
->tun_proto
))
1939 goto nla_put_failure
;
1942 if (swkey
->phy
.in_port
== DP_MAX_PORTS
) {
1943 if (is_mask
&& (output
->phy
.in_port
== 0xffff))
1944 if (nla_put_u32(skb
, OVS_KEY_ATTR_IN_PORT
, 0xffffffff))
1945 goto nla_put_failure
;
1948 upper_u16
= !is_mask
? 0 : 0xffff;
1950 if (nla_put_u32(skb
, OVS_KEY_ATTR_IN_PORT
,
1951 (upper_u16
<< 16) | output
->phy
.in_port
))
1952 goto nla_put_failure
;
1955 if (nla_put_u32(skb
, OVS_KEY_ATTR_SKB_MARK
, output
->phy
.skb_mark
))
1956 goto nla_put_failure
;
1958 if (ovs_ct_put_key(swkey
, output
, skb
))
1959 goto nla_put_failure
;
1961 if (ovs_key_mac_proto(swkey
) == MAC_PROTO_ETHERNET
) {
1962 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ETHERNET
, sizeof(*eth_key
));
1964 goto nla_put_failure
;
1966 eth_key
= nla_data(nla
);
1967 ether_addr_copy(eth_key
->eth_src
, output
->eth
.src
);
1968 ether_addr_copy(eth_key
->eth_dst
, output
->eth
.dst
);
1970 if (swkey
->eth
.vlan
.tci
|| eth_type_vlan(swkey
->eth
.type
)) {
1971 if (ovs_nla_put_vlan(skb
, &output
->eth
.vlan
, is_mask
))
1972 goto nla_put_failure
;
1973 encap
= nla_nest_start(skb
, OVS_KEY_ATTR_ENCAP
);
1974 if (!swkey
->eth
.vlan
.tci
)
1977 if (swkey
->eth
.cvlan
.tci
|| eth_type_vlan(swkey
->eth
.type
)) {
1978 if (ovs_nla_put_vlan(skb
, &output
->eth
.cvlan
, is_mask
))
1979 goto nla_put_failure
;
1980 in_encap
= nla_nest_start(skb
, OVS_KEY_ATTR_ENCAP
);
1981 if (!swkey
->eth
.cvlan
.tci
)
1986 if (swkey
->eth
.type
== htons(ETH_P_802_2
)) {
1988 * Ethertype 802.2 is represented in the netlink with omitted
1989 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1990 * 0xffff in the mask attribute. Ethertype can also
1993 if (is_mask
&& output
->eth
.type
)
1994 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
,
1996 goto nla_put_failure
;
2001 if (nla_put_be16(skb
, OVS_KEY_ATTR_ETHERTYPE
, output
->eth
.type
))
2002 goto nla_put_failure
;
2004 if (eth_type_vlan(swkey
->eth
.type
)) {
2005 /* There are 3 VLAN tags, we don't know anything about the rest
2006 * of the packet, so truncate here.
2008 WARN_ON_ONCE(!(encap
&& in_encap
));
2012 if (swkey
->eth
.type
== htons(ETH_P_IP
)) {
2013 struct ovs_key_ipv4
*ipv4_key
;
2015 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV4
, sizeof(*ipv4_key
));
2017 goto nla_put_failure
;
2018 ipv4_key
= nla_data(nla
);
2019 ipv4_key
->ipv4_src
= output
->ipv4
.addr
.src
;
2020 ipv4_key
->ipv4_dst
= output
->ipv4
.addr
.dst
;
2021 ipv4_key
->ipv4_proto
= output
->ip
.proto
;
2022 ipv4_key
->ipv4_tos
= output
->ip
.tos
;
2023 ipv4_key
->ipv4_ttl
= output
->ip
.ttl
;
2024 ipv4_key
->ipv4_frag
= output
->ip
.frag
;
2025 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
)) {
2026 struct ovs_key_ipv6
*ipv6_key
;
2028 nla
= nla_reserve(skb
, OVS_KEY_ATTR_IPV6
, sizeof(*ipv6_key
));
2030 goto nla_put_failure
;
2031 ipv6_key
= nla_data(nla
);
2032 memcpy(ipv6_key
->ipv6_src
, &output
->ipv6
.addr
.src
,
2033 sizeof(ipv6_key
->ipv6_src
));
2034 memcpy(ipv6_key
->ipv6_dst
, &output
->ipv6
.addr
.dst
,
2035 sizeof(ipv6_key
->ipv6_dst
));
2036 ipv6_key
->ipv6_label
= output
->ipv6
.label
;
2037 ipv6_key
->ipv6_proto
= output
->ip
.proto
;
2038 ipv6_key
->ipv6_tclass
= output
->ip
.tos
;
2039 ipv6_key
->ipv6_hlimit
= output
->ip
.ttl
;
2040 ipv6_key
->ipv6_frag
= output
->ip
.frag
;
2041 } else if (swkey
->eth
.type
== htons(ETH_P_NSH
)) {
2042 if (nsh_key_to_nlattr(&output
->nsh
, is_mask
, skb
))
2043 goto nla_put_failure
;
2044 } else if (swkey
->eth
.type
== htons(ETH_P_ARP
) ||
2045 swkey
->eth
.type
== htons(ETH_P_RARP
)) {
2046 struct ovs_key_arp
*arp_key
;
2048 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ARP
, sizeof(*arp_key
));
2050 goto nla_put_failure
;
2051 arp_key
= nla_data(nla
);
2052 memset(arp_key
, 0, sizeof(struct ovs_key_arp
));
2053 arp_key
->arp_sip
= output
->ipv4
.addr
.src
;
2054 arp_key
->arp_tip
= output
->ipv4
.addr
.dst
;
2055 arp_key
->arp_op
= htons(output
->ip
.proto
);
2056 ether_addr_copy(arp_key
->arp_sha
, output
->ipv4
.arp
.sha
);
2057 ether_addr_copy(arp_key
->arp_tha
, output
->ipv4
.arp
.tha
);
2058 } else if (eth_p_mpls(swkey
->eth
.type
)) {
2059 struct ovs_key_mpls
*mpls_key
;
2061 nla
= nla_reserve(skb
, OVS_KEY_ATTR_MPLS
, sizeof(*mpls_key
));
2063 goto nla_put_failure
;
2064 mpls_key
= nla_data(nla
);
2065 mpls_key
->mpls_lse
= output
->mpls
.top_lse
;
2068 if ((swkey
->eth
.type
== htons(ETH_P_IP
) ||
2069 swkey
->eth
.type
== htons(ETH_P_IPV6
)) &&
2070 swkey
->ip
.frag
!= OVS_FRAG_TYPE_LATER
) {
2072 if (swkey
->ip
.proto
== IPPROTO_TCP
) {
2073 struct ovs_key_tcp
*tcp_key
;
2075 nla
= nla_reserve(skb
, OVS_KEY_ATTR_TCP
, sizeof(*tcp_key
));
2077 goto nla_put_failure
;
2078 tcp_key
= nla_data(nla
);
2079 tcp_key
->tcp_src
= output
->tp
.src
;
2080 tcp_key
->tcp_dst
= output
->tp
.dst
;
2081 if (nla_put_be16(skb
, OVS_KEY_ATTR_TCP_FLAGS
,
2083 goto nla_put_failure
;
2084 } else if (swkey
->ip
.proto
== IPPROTO_UDP
) {
2085 struct ovs_key_udp
*udp_key
;
2087 nla
= nla_reserve(skb
, OVS_KEY_ATTR_UDP
, sizeof(*udp_key
));
2089 goto nla_put_failure
;
2090 udp_key
= nla_data(nla
);
2091 udp_key
->udp_src
= output
->tp
.src
;
2092 udp_key
->udp_dst
= output
->tp
.dst
;
2093 } else if (swkey
->ip
.proto
== IPPROTO_SCTP
) {
2094 struct ovs_key_sctp
*sctp_key
;
2096 nla
= nla_reserve(skb
, OVS_KEY_ATTR_SCTP
, sizeof(*sctp_key
));
2098 goto nla_put_failure
;
2099 sctp_key
= nla_data(nla
);
2100 sctp_key
->sctp_src
= output
->tp
.src
;
2101 sctp_key
->sctp_dst
= output
->tp
.dst
;
2102 } else if (swkey
->eth
.type
== htons(ETH_P_IP
) &&
2103 swkey
->ip
.proto
== IPPROTO_ICMP
) {
2104 struct ovs_key_icmp
*icmp_key
;
2106 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMP
, sizeof(*icmp_key
));
2108 goto nla_put_failure
;
2109 icmp_key
= nla_data(nla
);
2110 icmp_key
->icmp_type
= ntohs(output
->tp
.src
);
2111 icmp_key
->icmp_code
= ntohs(output
->tp
.dst
);
2112 } else if (swkey
->eth
.type
== htons(ETH_P_IPV6
) &&
2113 swkey
->ip
.proto
== IPPROTO_ICMPV6
) {
2114 struct ovs_key_icmpv6
*icmpv6_key
;
2116 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ICMPV6
,
2117 sizeof(*icmpv6_key
));
2119 goto nla_put_failure
;
2120 icmpv6_key
= nla_data(nla
);
2121 icmpv6_key
->icmpv6_type
= ntohs(output
->tp
.src
);
2122 icmpv6_key
->icmpv6_code
= ntohs(output
->tp
.dst
);
2124 if (icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_SOLICITATION
||
2125 icmpv6_key
->icmpv6_type
== NDISC_NEIGHBOUR_ADVERTISEMENT
) {
2126 struct ovs_key_nd
*nd_key
;
2128 nla
= nla_reserve(skb
, OVS_KEY_ATTR_ND
, sizeof(*nd_key
));
2130 goto nla_put_failure
;
2131 nd_key
= nla_data(nla
);
2132 memcpy(nd_key
->nd_target
, &output
->ipv6
.nd
.target
,
2133 sizeof(nd_key
->nd_target
));
2134 ether_addr_copy(nd_key
->nd_sll
, output
->ipv6
.nd
.sll
);
2135 ether_addr_copy(nd_key
->nd_tll
, output
->ipv6
.nd
.tll
);
2142 nla_nest_end(skb
, in_encap
);
2144 nla_nest_end(skb
, encap
);
2152 int ovs_nla_put_key(const struct sw_flow_key
*swkey
,
2153 const struct sw_flow_key
*output
, int attr
, bool is_mask
,
2154 struct sk_buff
*skb
)
2159 nla
= nla_nest_start(skb
, attr
);
2162 err
= __ovs_nla_put_key(swkey
, output
, is_mask
, skb
);
2165 nla_nest_end(skb
, nla
);
2170 /* Called with ovs_mutex or RCU read lock. */
2171 int ovs_nla_put_identifier(const struct sw_flow
*flow
, struct sk_buff
*skb
)
2173 if (ovs_identifier_is_ufid(&flow
->id
))
2174 return nla_put(skb
, OVS_FLOW_ATTR_UFID
, flow
->id
.ufid_len
,
2177 return ovs_nla_put_key(flow
->id
.unmasked_key
, flow
->id
.unmasked_key
,
2178 OVS_FLOW_ATTR_KEY
, false, skb
);
2181 /* Called with ovs_mutex or RCU read lock. */
2182 int ovs_nla_put_masked_key(const struct sw_flow
*flow
, struct sk_buff
*skb
)
2184 return ovs_nla_put_key(&flow
->key
, &flow
->key
,
2185 OVS_FLOW_ATTR_KEY
, false, skb
);
2188 /* Called with ovs_mutex or RCU read lock. */
2189 int ovs_nla_put_mask(const struct sw_flow
*flow
, struct sk_buff
*skb
)
2191 return ovs_nla_put_key(&flow
->key
, &flow
->mask
->key
,
2192 OVS_FLOW_ATTR_MASK
, true, skb
);
2195 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
2197 static struct sw_flow_actions
*nla_alloc_flow_actions(int size
)
2199 struct sw_flow_actions
*sfa
;
2201 WARN_ON_ONCE(size
> MAX_ACTIONS_BUFSIZE
);
2203 sfa
= kmalloc(sizeof(*sfa
) + size
, GFP_KERNEL
);
2205 return ERR_PTR(-ENOMEM
);
2207 sfa
->actions_len
= 0;
2211 static void ovs_nla_free_set_action(const struct nlattr
*a
)
2213 const struct nlattr
*ovs_key
= nla_data(a
);
2214 struct ovs_tunnel_info
*ovs_tun
;
2216 switch (nla_type(ovs_key
)) {
2217 case OVS_KEY_ATTR_TUNNEL_INFO
:
2218 ovs_tun
= nla_data(ovs_key
);
2219 dst_release((struct dst_entry
*)ovs_tun
->tun_dst
);
2224 void ovs_nla_free_flow_actions(struct sw_flow_actions
*sf_acts
)
2226 const struct nlattr
*a
;
2232 nla_for_each_attr(a
, sf_acts
->actions
, sf_acts
->actions_len
, rem
) {
2233 switch (nla_type(a
)) {
2234 case OVS_ACTION_ATTR_SET
:
2235 ovs_nla_free_set_action(a
);
2237 case OVS_ACTION_ATTR_CT
:
2238 ovs_ct_free_action(a
);
2246 static void __ovs_nla_free_flow_actions(struct rcu_head
*head
)
2248 ovs_nla_free_flow_actions(container_of(head
, struct sw_flow_actions
, rcu
));
2251 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
2252 * The caller must hold rcu_read_lock for this to be sensible. */
2253 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions
*sf_acts
)
2255 call_rcu(&sf_acts
->rcu
, __ovs_nla_free_flow_actions
);
2258 static struct nlattr
*reserve_sfa_size(struct sw_flow_actions
**sfa
,
2259 int attr_len
, bool log
)
2262 struct sw_flow_actions
*acts
;
2264 int req_size
= NLA_ALIGN(attr_len
);
2265 int next_offset
= offsetof(struct sw_flow_actions
, actions
) +
2266 (*sfa
)->actions_len
;
2268 if (req_size
<= (ksize(*sfa
) - next_offset
))
2271 new_acts_size
= ksize(*sfa
) * 2;
2273 if (new_acts_size
> MAX_ACTIONS_BUFSIZE
) {
2274 if ((MAX_ACTIONS_BUFSIZE
- next_offset
) < req_size
) {
2275 OVS_NLERR(log
, "Flow action size exceeds max %u",
2276 MAX_ACTIONS_BUFSIZE
);
2277 return ERR_PTR(-EMSGSIZE
);
2279 new_acts_size
= MAX_ACTIONS_BUFSIZE
;
2282 acts
= nla_alloc_flow_actions(new_acts_size
);
2284 return (void *)acts
;
2286 memcpy(acts
->actions
, (*sfa
)->actions
, (*sfa
)->actions_len
);
2287 acts
->actions_len
= (*sfa
)->actions_len
;
2288 acts
->orig_len
= (*sfa
)->orig_len
;
2293 (*sfa
)->actions_len
+= req_size
;
2294 return (struct nlattr
*) ((unsigned char *)(*sfa
) + next_offset
);
2297 static struct nlattr
*__add_action(struct sw_flow_actions
**sfa
,
2298 int attrtype
, void *data
, int len
, bool log
)
2302 a
= reserve_sfa_size(sfa
, nla_attr_size(len
), log
);
2306 a
->nla_type
= attrtype
;
2307 a
->nla_len
= nla_attr_size(len
);
2310 memcpy(nla_data(a
), data
, len
);
2311 memset((unsigned char *) a
+ a
->nla_len
, 0, nla_padlen(len
));
2316 int ovs_nla_add_action(struct sw_flow_actions
**sfa
, int attrtype
, void *data
,
2321 a
= __add_action(sfa
, attrtype
, data
, len
, log
);
2323 return PTR_ERR_OR_ZERO(a
);
2326 static inline int add_nested_action_start(struct sw_flow_actions
**sfa
,
2327 int attrtype
, bool log
)
2329 int used
= (*sfa
)->actions_len
;
2332 err
= ovs_nla_add_action(sfa
, attrtype
, NULL
, 0, log
);
2339 static inline void add_nested_action_end(struct sw_flow_actions
*sfa
,
2342 struct nlattr
*a
= (struct nlattr
*) ((unsigned char *)sfa
->actions
+
2345 a
->nla_len
= sfa
->actions_len
- st_offset
;
2348 static int __ovs_nla_copy_actions(struct net
*net
, const struct nlattr
*attr
,
2349 const struct sw_flow_key
*key
,
2350 struct sw_flow_actions
**sfa
,
2351 __be16 eth_type
, __be16 vlan_tci
, bool log
);
2353 static int validate_and_copy_sample(struct net
*net
, const struct nlattr
*attr
,
2354 const struct sw_flow_key
*key
,
2355 struct sw_flow_actions
**sfa
,
2356 __be16 eth_type
, __be16 vlan_tci
,
2357 bool log
, bool last
)
2359 const struct nlattr
*attrs
[OVS_SAMPLE_ATTR_MAX
+ 1];
2360 const struct nlattr
*probability
, *actions
;
2361 const struct nlattr
*a
;
2362 int rem
, start
, err
;
2363 struct sample_arg arg
;
2365 memset(attrs
, 0, sizeof(attrs
));
2366 nla_for_each_nested(a
, attr
, rem
) {
2367 int type
= nla_type(a
);
2368 if (!type
|| type
> OVS_SAMPLE_ATTR_MAX
|| attrs
[type
])
2375 probability
= attrs
[OVS_SAMPLE_ATTR_PROBABILITY
];
2376 if (!probability
|| nla_len(probability
) != sizeof(u32
))
2379 actions
= attrs
[OVS_SAMPLE_ATTR_ACTIONS
];
2380 if (!actions
|| (nla_len(actions
) && nla_len(actions
) < NLA_HDRLEN
))
2383 /* validation done, copy sample action. */
2384 start
= add_nested_action_start(sfa
, OVS_ACTION_ATTR_SAMPLE
, log
);
2388 /* When both skb and flow may be changed, put the sample
2389 * into a deferred fifo. On the other hand, if only skb
2390 * may be modified, the actions can be executed in place.
2392 * Do this analysis at the flow installation time.
2393 * Set 'clone_action->exec' to true if the actions can be
2394 * executed without being deferred.
2396 * If the sample is the last action, it can always be excuted
2397 * rather than deferred.
2399 arg
.exec
= last
|| !actions_may_change_flow(actions
);
2400 arg
.probability
= nla_get_u32(probability
);
2402 err
= ovs_nla_add_action(sfa
, OVS_SAMPLE_ATTR_ARG
, &arg
, sizeof(arg
),
2407 err
= __ovs_nla_copy_actions(net
, actions
, key
, sfa
,
2408 eth_type
, vlan_tci
, log
);
2413 add_nested_action_end(*sfa
, start
);
2418 void ovs_match_init(struct sw_flow_match
*match
,
2419 struct sw_flow_key
*key
,
2421 struct sw_flow_mask
*mask
)
2423 memset(match
, 0, sizeof(*match
));
2428 memset(key
, 0, sizeof(*key
));
2431 memset(&mask
->key
, 0, sizeof(mask
->key
));
2432 mask
->range
.start
= mask
->range
.end
= 0;
2436 static int validate_geneve_opts(struct sw_flow_key
*key
)
2438 struct geneve_opt
*option
;
2439 int opts_len
= key
->tun_opts_len
;
2440 bool crit_opt
= false;
2442 option
= (struct geneve_opt
*)TUN_METADATA_OPTS(key
, key
->tun_opts_len
);
2443 while (opts_len
> 0) {
2446 if (opts_len
< sizeof(*option
))
2449 len
= sizeof(*option
) + option
->length
* 4;
2453 crit_opt
|= !!(option
->type
& GENEVE_CRIT_OPT_TYPE
);
2455 option
= (struct geneve_opt
*)((u8
*)option
+ len
);
2459 key
->tun_key
.tun_flags
|= crit_opt
? TUNNEL_CRIT_OPT
: 0;
2464 static int validate_and_copy_set_tun(const struct nlattr
*attr
,
2465 struct sw_flow_actions
**sfa
, bool log
)
2467 struct sw_flow_match match
;
2468 struct sw_flow_key key
;
2469 struct metadata_dst
*tun_dst
;
2470 struct ip_tunnel_info
*tun_info
;
2471 struct ovs_tunnel_info
*ovs_tun
;
2473 int err
= 0, start
, opts_type
;
2475 ovs_match_init(&match
, &key
, true, NULL
);
2476 opts_type
= ip_tun_from_nlattr(nla_data(attr
), &match
, false, log
);
2480 if (key
.tun_opts_len
) {
2481 switch (opts_type
) {
2482 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS
:
2483 err
= validate_geneve_opts(&key
);
2487 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS
:
2492 start
= add_nested_action_start(sfa
, OVS_ACTION_ATTR_SET
, log
);
2496 tun_dst
= metadata_dst_alloc(key
.tun_opts_len
, METADATA_IP_TUNNEL
,
2502 err
= dst_cache_init(&tun_dst
->u
.tun_info
.dst_cache
, GFP_KERNEL
);
2504 dst_release((struct dst_entry
*)tun_dst
);
2508 a
= __add_action(sfa
, OVS_KEY_ATTR_TUNNEL_INFO
, NULL
,
2509 sizeof(*ovs_tun
), log
);
2511 dst_release((struct dst_entry
*)tun_dst
);
2515 ovs_tun
= nla_data(a
);
2516 ovs_tun
->tun_dst
= tun_dst
;
2518 tun_info
= &tun_dst
->u
.tun_info
;
2519 tun_info
->mode
= IP_TUNNEL_INFO_TX
;
2520 if (key
.tun_proto
== AF_INET6
)
2521 tun_info
->mode
|= IP_TUNNEL_INFO_IPV6
;
2522 tun_info
->key
= key
.tun_key
;
2524 /* We need to store the options in the action itself since
2525 * everything else will go away after flow setup. We can append
2526 * it to tun_info and then point there.
2528 ip_tunnel_info_opts_set(tun_info
,
2529 TUN_METADATA_OPTS(&key
, key
.tun_opts_len
),
2531 add_nested_action_end(*sfa
, start
);
2536 static bool validate_nsh(const struct nlattr
*attr
, bool is_mask
,
2537 bool is_push_nsh
, bool log
)
2539 struct sw_flow_match match
;
2540 struct sw_flow_key key
;
2543 ovs_match_init(&match
, &key
, true, NULL
);
2544 ret
= nsh_key_put_from_nlattr(attr
, &match
, is_mask
,
2549 /* Return false if there are any non-masked bits set.
2550 * Mask follows data immediately, before any netlink padding.
2552 static bool validate_masked(u8
*data
, int len
)
2554 u8
*mask
= data
+ len
;
2557 if (*data
++ & ~*mask
++)
2563 static int validate_set(const struct nlattr
*a
,
2564 const struct sw_flow_key
*flow_key
,
2565 struct sw_flow_actions
**sfa
, bool *skip_copy
,
2566 u8 mac_proto
, __be16 eth_type
, bool masked
, bool log
)
2568 const struct nlattr
*ovs_key
= nla_data(a
);
2569 int key_type
= nla_type(ovs_key
);
2572 /* There can be only one key in a action */
2573 if (nla_total_size(nla_len(ovs_key
)) != nla_len(a
))
2576 key_len
= nla_len(ovs_key
);
2580 if (key_type
> OVS_KEY_ATTR_MAX
||
2581 !check_attr_len(key_len
, ovs_key_lens
[key_type
].len
))
2584 if (masked
&& !validate_masked(nla_data(ovs_key
), key_len
))
2588 const struct ovs_key_ipv4
*ipv4_key
;
2589 const struct ovs_key_ipv6
*ipv6_key
;
2592 case OVS_KEY_ATTR_PRIORITY
:
2593 case OVS_KEY_ATTR_SKB_MARK
:
2594 case OVS_KEY_ATTR_CT_MARK
:
2595 case OVS_KEY_ATTR_CT_LABELS
:
2598 case OVS_KEY_ATTR_ETHERNET
:
2599 if (mac_proto
!= MAC_PROTO_ETHERNET
)
2603 case OVS_KEY_ATTR_TUNNEL
:
2605 return -EINVAL
; /* Masked tunnel set not supported. */
2608 err
= validate_and_copy_set_tun(a
, sfa
, log
);
2613 case OVS_KEY_ATTR_IPV4
:
2614 if (eth_type
!= htons(ETH_P_IP
))
2617 ipv4_key
= nla_data(ovs_key
);
2620 const struct ovs_key_ipv4
*mask
= ipv4_key
+ 1;
2622 /* Non-writeable fields. */
2623 if (mask
->ipv4_proto
|| mask
->ipv4_frag
)
2626 if (ipv4_key
->ipv4_proto
!= flow_key
->ip
.proto
)
2629 if (ipv4_key
->ipv4_frag
!= flow_key
->ip
.frag
)
2634 case OVS_KEY_ATTR_IPV6
:
2635 if (eth_type
!= htons(ETH_P_IPV6
))
2638 ipv6_key
= nla_data(ovs_key
);
2641 const struct ovs_key_ipv6
*mask
= ipv6_key
+ 1;
2643 /* Non-writeable fields. */
2644 if (mask
->ipv6_proto
|| mask
->ipv6_frag
)
2647 /* Invalid bits in the flow label mask? */
2648 if (ntohl(mask
->ipv6_label
) & 0xFFF00000)
2651 if (ipv6_key
->ipv6_proto
!= flow_key
->ip
.proto
)
2654 if (ipv6_key
->ipv6_frag
!= flow_key
->ip
.frag
)
2657 if (ntohl(ipv6_key
->ipv6_label
) & 0xFFF00000)
2662 case OVS_KEY_ATTR_TCP
:
2663 if ((eth_type
!= htons(ETH_P_IP
) &&
2664 eth_type
!= htons(ETH_P_IPV6
)) ||
2665 flow_key
->ip
.proto
!= IPPROTO_TCP
)
2670 case OVS_KEY_ATTR_UDP
:
2671 if ((eth_type
!= htons(ETH_P_IP
) &&
2672 eth_type
!= htons(ETH_P_IPV6
)) ||
2673 flow_key
->ip
.proto
!= IPPROTO_UDP
)
2678 case OVS_KEY_ATTR_MPLS
:
2679 if (!eth_p_mpls(eth_type
))
2683 case OVS_KEY_ATTR_SCTP
:
2684 if ((eth_type
!= htons(ETH_P_IP
) &&
2685 eth_type
!= htons(ETH_P_IPV6
)) ||
2686 flow_key
->ip
.proto
!= IPPROTO_SCTP
)
2691 case OVS_KEY_ATTR_NSH
:
2692 if (eth_type
!= htons(ETH_P_NSH
))
2694 if (!validate_nsh(nla_data(a
), masked
, false, log
))
2702 /* Convert non-masked non-tunnel set actions to masked set actions. */
2703 if (!masked
&& key_type
!= OVS_KEY_ATTR_TUNNEL
) {
2704 int start
, len
= key_len
* 2;
2709 start
= add_nested_action_start(sfa
,
2710 OVS_ACTION_ATTR_SET_TO_MASKED
,
2715 at
= __add_action(sfa
, key_type
, NULL
, len
, log
);
2719 memcpy(nla_data(at
), nla_data(ovs_key
), key_len
); /* Key. */
2720 memset(nla_data(at
) + key_len
, 0xff, key_len
); /* Mask. */
2721 /* Clear non-writeable bits from otherwise writeable fields. */
2722 if (key_type
== OVS_KEY_ATTR_IPV6
) {
2723 struct ovs_key_ipv6
*mask
= nla_data(at
) + key_len
;
2725 mask
->ipv6_label
&= htonl(0x000FFFFF);
2727 add_nested_action_end(*sfa
, start
);
2733 static int validate_userspace(const struct nlattr
*attr
)
2735 static const struct nla_policy userspace_policy
[OVS_USERSPACE_ATTR_MAX
+ 1] = {
2736 [OVS_USERSPACE_ATTR_PID
] = {.type
= NLA_U32
},
2737 [OVS_USERSPACE_ATTR_USERDATA
] = {.type
= NLA_UNSPEC
},
2738 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT
] = {.type
= NLA_U32
},
2740 struct nlattr
*a
[OVS_USERSPACE_ATTR_MAX
+ 1];
2743 error
= nla_parse_nested(a
, OVS_USERSPACE_ATTR_MAX
, attr
,
2744 userspace_policy
, NULL
);
2748 if (!a
[OVS_USERSPACE_ATTR_PID
] ||
2749 !nla_get_u32(a
[OVS_USERSPACE_ATTR_PID
]))
2755 static int copy_action(const struct nlattr
*from
,
2756 struct sw_flow_actions
**sfa
, bool log
)
2758 int totlen
= NLA_ALIGN(from
->nla_len
);
2761 to
= reserve_sfa_size(sfa
, from
->nla_len
, log
);
2765 memcpy(to
, from
, totlen
);
2769 static int __ovs_nla_copy_actions(struct net
*net
, const struct nlattr
*attr
,
2770 const struct sw_flow_key
*key
,
2771 struct sw_flow_actions
**sfa
,
2772 __be16 eth_type
, __be16 vlan_tci
, bool log
)
2774 u8 mac_proto
= ovs_key_mac_proto(key
);
2775 const struct nlattr
*a
;
2778 nla_for_each_nested(a
, attr
, rem
) {
2779 /* Expected argument lengths, (u32)-1 for variable length. */
2780 static const u32 action_lens
[OVS_ACTION_ATTR_MAX
+ 1] = {
2781 [OVS_ACTION_ATTR_OUTPUT
] = sizeof(u32
),
2782 [OVS_ACTION_ATTR_RECIRC
] = sizeof(u32
),
2783 [OVS_ACTION_ATTR_USERSPACE
] = (u32
)-1,
2784 [OVS_ACTION_ATTR_PUSH_MPLS
] = sizeof(struct ovs_action_push_mpls
),
2785 [OVS_ACTION_ATTR_POP_MPLS
] = sizeof(__be16
),
2786 [OVS_ACTION_ATTR_PUSH_VLAN
] = sizeof(struct ovs_action_push_vlan
),
2787 [OVS_ACTION_ATTR_POP_VLAN
] = 0,
2788 [OVS_ACTION_ATTR_SET
] = (u32
)-1,
2789 [OVS_ACTION_ATTR_SET_MASKED
] = (u32
)-1,
2790 [OVS_ACTION_ATTR_SAMPLE
] = (u32
)-1,
2791 [OVS_ACTION_ATTR_HASH
] = sizeof(struct ovs_action_hash
),
2792 [OVS_ACTION_ATTR_CT
] = (u32
)-1,
2793 [OVS_ACTION_ATTR_CT_CLEAR
] = 0,
2794 [OVS_ACTION_ATTR_TRUNC
] = sizeof(struct ovs_action_trunc
),
2795 [OVS_ACTION_ATTR_PUSH_ETH
] = sizeof(struct ovs_action_push_eth
),
2796 [OVS_ACTION_ATTR_POP_ETH
] = 0,
2797 [OVS_ACTION_ATTR_PUSH_NSH
] = (u32
)-1,
2798 [OVS_ACTION_ATTR_POP_NSH
] = 0,
2799 [OVS_ACTION_ATTR_METER
] = sizeof(u32
),
2801 const struct ovs_action_push_vlan
*vlan
;
2802 int type
= nla_type(a
);
2805 if (type
> OVS_ACTION_ATTR_MAX
||
2806 (action_lens
[type
] != nla_len(a
) &&
2807 action_lens
[type
] != (u32
)-1))
2812 case OVS_ACTION_ATTR_UNSPEC
:
2815 case OVS_ACTION_ATTR_USERSPACE
:
2816 err
= validate_userspace(a
);
2821 case OVS_ACTION_ATTR_OUTPUT
:
2822 if (nla_get_u32(a
) >= DP_MAX_PORTS
)
2826 case OVS_ACTION_ATTR_TRUNC
: {
2827 const struct ovs_action_trunc
*trunc
= nla_data(a
);
2829 if (trunc
->max_len
< ETH_HLEN
)
2834 case OVS_ACTION_ATTR_HASH
: {
2835 const struct ovs_action_hash
*act_hash
= nla_data(a
);
2837 switch (act_hash
->hash_alg
) {
2838 case OVS_HASH_ALG_L4
:
2847 case OVS_ACTION_ATTR_POP_VLAN
:
2848 if (mac_proto
!= MAC_PROTO_ETHERNET
)
2850 vlan_tci
= htons(0);
2853 case OVS_ACTION_ATTR_PUSH_VLAN
:
2854 if (mac_proto
!= MAC_PROTO_ETHERNET
)
2857 if (!eth_type_vlan(vlan
->vlan_tpid
))
2859 if (!(vlan
->vlan_tci
& htons(VLAN_TAG_PRESENT
)))
2861 vlan_tci
= vlan
->vlan_tci
;
2864 case OVS_ACTION_ATTR_RECIRC
:
2867 case OVS_ACTION_ATTR_PUSH_MPLS
: {
2868 const struct ovs_action_push_mpls
*mpls
= nla_data(a
);
2870 if (!eth_p_mpls(mpls
->mpls_ethertype
))
2872 /* Prohibit push MPLS other than to a white list
2873 * for packets that have a known tag order.
2875 if (vlan_tci
& htons(VLAN_TAG_PRESENT
) ||
2876 (eth_type
!= htons(ETH_P_IP
) &&
2877 eth_type
!= htons(ETH_P_IPV6
) &&
2878 eth_type
!= htons(ETH_P_ARP
) &&
2879 eth_type
!= htons(ETH_P_RARP
) &&
2880 !eth_p_mpls(eth_type
)))
2882 eth_type
= mpls
->mpls_ethertype
;
2886 case OVS_ACTION_ATTR_POP_MPLS
:
2887 if (vlan_tci
& htons(VLAN_TAG_PRESENT
) ||
2888 !eth_p_mpls(eth_type
))
2891 /* Disallow subsequent L2.5+ set and mpls_pop actions
2892 * as there is no check here to ensure that the new
2893 * eth_type is valid and thus set actions could
2894 * write off the end of the packet or otherwise
2897 * Support for these actions is planned using packet
2900 eth_type
= htons(0);
2903 case OVS_ACTION_ATTR_SET
:
2904 err
= validate_set(a
, key
, sfa
,
2905 &skip_copy
, mac_proto
, eth_type
,
2911 case OVS_ACTION_ATTR_SET_MASKED
:
2912 err
= validate_set(a
, key
, sfa
,
2913 &skip_copy
, mac_proto
, eth_type
,
2919 case OVS_ACTION_ATTR_SAMPLE
: {
2920 bool last
= nla_is_last(a
, rem
);
2922 err
= validate_and_copy_sample(net
, a
, key
, sfa
,
2931 case OVS_ACTION_ATTR_CT
:
2932 err
= ovs_ct_copy_action(net
, a
, key
, sfa
, log
);
2938 case OVS_ACTION_ATTR_CT_CLEAR
:
2941 case OVS_ACTION_ATTR_PUSH_ETH
:
2942 /* Disallow pushing an Ethernet header if one
2943 * is already present */
2944 if (mac_proto
!= MAC_PROTO_NONE
)
2946 mac_proto
= MAC_PROTO_NONE
;
2949 case OVS_ACTION_ATTR_POP_ETH
:
2950 if (mac_proto
!= MAC_PROTO_ETHERNET
)
2952 if (vlan_tci
& htons(VLAN_TAG_PRESENT
))
2954 mac_proto
= MAC_PROTO_ETHERNET
;
2957 case OVS_ACTION_ATTR_PUSH_NSH
:
2958 if (mac_proto
!= MAC_PROTO_ETHERNET
) {
2961 next_proto
= tun_p_from_eth_p(eth_type
);
2965 mac_proto
= MAC_PROTO_NONE
;
2966 if (!validate_nsh(nla_data(a
), false, true, true))
2970 case OVS_ACTION_ATTR_POP_NSH
: {
2973 if (eth_type
!= htons(ETH_P_NSH
))
2975 inner_proto
= tun_p_to_eth_p(key
->nsh
.base
.np
);
2978 if (key
->nsh
.base
.np
== TUN_P_ETHERNET
)
2979 mac_proto
= MAC_PROTO_ETHERNET
;
2981 mac_proto
= MAC_PROTO_NONE
;
2985 case OVS_ACTION_ATTR_METER
:
2986 /* Non-existent meters are simply ignored. */
2990 OVS_NLERR(log
, "Unknown Action type %d", type
);
2994 err
= copy_action(a
, sfa
, log
);
3006 /* 'key' must be the masked key. */
3007 int ovs_nla_copy_actions(struct net
*net
, const struct nlattr
*attr
,
3008 const struct sw_flow_key
*key
,
3009 struct sw_flow_actions
**sfa
, bool log
)
3013 *sfa
= nla_alloc_flow_actions(min(nla_len(attr
), MAX_ACTIONS_BUFSIZE
));
3015 return PTR_ERR(*sfa
);
3017 (*sfa
)->orig_len
= nla_len(attr
);
3018 err
= __ovs_nla_copy_actions(net
, attr
, key
, sfa
, key
->eth
.type
,
3019 key
->eth
.vlan
.tci
, log
);
3021 ovs_nla_free_flow_actions(*sfa
);
3026 static int sample_action_to_attr(const struct nlattr
*attr
,
3027 struct sk_buff
*skb
)
3029 struct nlattr
*start
, *ac_start
= NULL
, *sample_arg
;
3030 int err
= 0, rem
= nla_len(attr
);
3031 const struct sample_arg
*arg
;
3032 struct nlattr
*actions
;
3034 start
= nla_nest_start(skb
, OVS_ACTION_ATTR_SAMPLE
);
3038 sample_arg
= nla_data(attr
);
3039 arg
= nla_data(sample_arg
);
3040 actions
= nla_next(sample_arg
, &rem
);
3042 if (nla_put_u32(skb
, OVS_SAMPLE_ATTR_PROBABILITY
, arg
->probability
)) {
3047 ac_start
= nla_nest_start(skb
, OVS_SAMPLE_ATTR_ACTIONS
);
3053 err
= ovs_nla_put_actions(actions
, rem
, skb
);
3057 nla_nest_cancel(skb
, ac_start
);
3058 nla_nest_cancel(skb
, start
);
3060 nla_nest_end(skb
, ac_start
);
3061 nla_nest_end(skb
, start
);
3067 static int set_action_to_attr(const struct nlattr
*a
, struct sk_buff
*skb
)
3069 const struct nlattr
*ovs_key
= nla_data(a
);
3070 int key_type
= nla_type(ovs_key
);
3071 struct nlattr
*start
;
3075 case OVS_KEY_ATTR_TUNNEL_INFO
: {
3076 struct ovs_tunnel_info
*ovs_tun
= nla_data(ovs_key
);
3077 struct ip_tunnel_info
*tun_info
= &ovs_tun
->tun_dst
->u
.tun_info
;
3079 start
= nla_nest_start(skb
, OVS_ACTION_ATTR_SET
);
3083 err
= ip_tun_to_nlattr(skb
, &tun_info
->key
,
3084 ip_tunnel_info_opts(tun_info
),
3085 tun_info
->options_len
,
3086 ip_tunnel_info_af(tun_info
));
3089 nla_nest_end(skb
, start
);
3093 if (nla_put(skb
, OVS_ACTION_ATTR_SET
, nla_len(a
), ovs_key
))
3101 static int masked_set_action_to_set_action_attr(const struct nlattr
*a
,
3102 struct sk_buff
*skb
)
3104 const struct nlattr
*ovs_key
= nla_data(a
);
3106 size_t key_len
= nla_len(ovs_key
) / 2;
3108 /* Revert the conversion we did from a non-masked set action to
3109 * masked set action.
3111 nla
= nla_nest_start(skb
, OVS_ACTION_ATTR_SET
);
3115 if (nla_put(skb
, nla_type(ovs_key
), key_len
, nla_data(ovs_key
)))
3118 nla_nest_end(skb
, nla
);
3122 int ovs_nla_put_actions(const struct nlattr
*attr
, int len
, struct sk_buff
*skb
)
3124 const struct nlattr
*a
;
3127 nla_for_each_attr(a
, attr
, len
, rem
) {
3128 int type
= nla_type(a
);
3131 case OVS_ACTION_ATTR_SET
:
3132 err
= set_action_to_attr(a
, skb
);
3137 case OVS_ACTION_ATTR_SET_TO_MASKED
:
3138 err
= masked_set_action_to_set_action_attr(a
, skb
);
3143 case OVS_ACTION_ATTR_SAMPLE
:
3144 err
= sample_action_to_attr(a
, skb
);
3149 case OVS_ACTION_ATTR_CT
:
3150 err
= ovs_ct_action_to_attr(nla_data(a
), skb
);
3156 if (nla_put(skb
, type
, nla_len(a
), nla_data(a
)))