Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / net / openvswitch / flow_netlink.c
blob689c1726422162abfbaa47c2b103c94971d5190e
1 /*
2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include "flow.h"
22 #include "datapath.h"
23 #include <linux/uaccess.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <net/llc_pdu.h>
29 #include <linux/kernel.h>
30 #include <linux/jhash.h>
31 #include <linux/jiffies.h>
32 #include <linux/llc.h>
33 #include <linux/module.h>
34 #include <linux/in.h>
35 #include <linux/rcupdate.h>
36 #include <linux/if_arp.h>
37 #include <linux/ip.h>
38 #include <linux/ipv6.h>
39 #include <linux/sctp.h>
40 #include <linux/tcp.h>
41 #include <linux/udp.h>
42 #include <linux/icmp.h>
43 #include <linux/icmpv6.h>
44 #include <linux/rculist.h>
45 #include <net/geneve.h>
46 #include <net/ip.h>
47 #include <net/ipv6.h>
48 #include <net/ndisc.h>
49 #include <net/mpls.h>
50 #include <net/vxlan.h>
52 #include "flow_netlink.h"
54 struct ovs_len_tbl {
55 int len;
56 const struct ovs_len_tbl *next;
59 #define OVS_ATTR_NESTED -1
60 #define OVS_ATTR_VARIABLE -2
62 static void update_range(struct sw_flow_match *match,
63 size_t offset, size_t size, bool is_mask)
65 struct sw_flow_key_range *range;
66 size_t start = rounddown(offset, sizeof(long));
67 size_t end = roundup(offset + size, sizeof(long));
69 if (!is_mask)
70 range = &match->range;
71 else
72 range = &match->mask->range;
74 if (range->start == range->end) {
75 range->start = start;
76 range->end = end;
77 return;
80 if (range->start > start)
81 range->start = start;
83 if (range->end < end)
84 range->end = end;
87 #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
88 do { \
89 update_range(match, offsetof(struct sw_flow_key, field), \
90 sizeof((match)->key->field), is_mask); \
91 if (is_mask) \
92 (match)->mask->key.field = value; \
93 else \
94 (match)->key->field = value; \
95 } while (0)
97 #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
98 do { \
99 update_range(match, offset, len, is_mask); \
100 if (is_mask) \
101 memcpy((u8 *)&(match)->mask->key + offset, value_p, \
102 len); \
103 else \
104 memcpy((u8 *)(match)->key + offset, value_p, len); \
105 } while (0)
107 #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
108 SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
109 value_p, len, is_mask)
111 #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \
112 do { \
113 update_range(match, offsetof(struct sw_flow_key, field), \
114 sizeof((match)->key->field), is_mask); \
115 if (is_mask) \
116 memset((u8 *)&(match)->mask->key.field, value, \
117 sizeof((match)->mask->key.field)); \
118 else \
119 memset((u8 *)&(match)->key->field, value, \
120 sizeof((match)->key->field)); \
121 } while (0)
123 static bool match_validate(const struct sw_flow_match *match,
124 u64 key_attrs, u64 mask_attrs, bool log)
126 u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
127 u64 mask_allowed = key_attrs; /* At most allow all key attributes */
129 /* The following mask attributes allowed only if they
130 * pass the validation tests. */
131 mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
132 | (1 << OVS_KEY_ATTR_IPV6)
133 | (1 << OVS_KEY_ATTR_TCP)
134 | (1 << OVS_KEY_ATTR_TCP_FLAGS)
135 | (1 << OVS_KEY_ATTR_UDP)
136 | (1 << OVS_KEY_ATTR_SCTP)
137 | (1 << OVS_KEY_ATTR_ICMP)
138 | (1 << OVS_KEY_ATTR_ICMPV6)
139 | (1 << OVS_KEY_ATTR_ARP)
140 | (1 << OVS_KEY_ATTR_ND)
141 | (1 << OVS_KEY_ATTR_MPLS));
143 /* Always allowed mask fields. */
144 mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
145 | (1 << OVS_KEY_ATTR_IN_PORT)
146 | (1 << OVS_KEY_ATTR_ETHERTYPE));
148 /* Check key attributes. */
149 if (match->key->eth.type == htons(ETH_P_ARP)
150 || match->key->eth.type == htons(ETH_P_RARP)) {
151 key_expected |= 1 << OVS_KEY_ATTR_ARP;
152 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
153 mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
156 if (eth_p_mpls(match->key->eth.type)) {
157 key_expected |= 1 << OVS_KEY_ATTR_MPLS;
158 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
159 mask_allowed |= 1 << OVS_KEY_ATTR_MPLS;
162 if (match->key->eth.type == htons(ETH_P_IP)) {
163 key_expected |= 1 << OVS_KEY_ATTR_IPV4;
164 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
165 mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
167 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
168 if (match->key->ip.proto == IPPROTO_UDP) {
169 key_expected |= 1 << OVS_KEY_ATTR_UDP;
170 if (match->mask && (match->mask->key.ip.proto == 0xff))
171 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
174 if (match->key->ip.proto == IPPROTO_SCTP) {
175 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
176 if (match->mask && (match->mask->key.ip.proto == 0xff))
177 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
180 if (match->key->ip.proto == IPPROTO_TCP) {
181 key_expected |= 1 << OVS_KEY_ATTR_TCP;
182 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
183 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
184 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
185 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
189 if (match->key->ip.proto == IPPROTO_ICMP) {
190 key_expected |= 1 << OVS_KEY_ATTR_ICMP;
191 if (match->mask && (match->mask->key.ip.proto == 0xff))
192 mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
197 if (match->key->eth.type == htons(ETH_P_IPV6)) {
198 key_expected |= 1 << OVS_KEY_ATTR_IPV6;
199 if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
200 mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
202 if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
203 if (match->key->ip.proto == IPPROTO_UDP) {
204 key_expected |= 1 << OVS_KEY_ATTR_UDP;
205 if (match->mask && (match->mask->key.ip.proto == 0xff))
206 mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
209 if (match->key->ip.proto == IPPROTO_SCTP) {
210 key_expected |= 1 << OVS_KEY_ATTR_SCTP;
211 if (match->mask && (match->mask->key.ip.proto == 0xff))
212 mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
215 if (match->key->ip.proto == IPPROTO_TCP) {
216 key_expected |= 1 << OVS_KEY_ATTR_TCP;
217 key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
218 if (match->mask && (match->mask->key.ip.proto == 0xff)) {
219 mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
220 mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS;
224 if (match->key->ip.proto == IPPROTO_ICMPV6) {
225 key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
226 if (match->mask && (match->mask->key.ip.proto == 0xff))
227 mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
229 if (match->key->tp.src ==
230 htons(NDISC_NEIGHBOUR_SOLICITATION) ||
231 match->key->tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
232 key_expected |= 1 << OVS_KEY_ATTR_ND;
233 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
234 mask_allowed |= 1 << OVS_KEY_ATTR_ND;
240 if ((key_attrs & key_expected) != key_expected) {
241 /* Key attributes check failed. */
242 OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)",
243 (unsigned long long)key_attrs,
244 (unsigned long long)key_expected);
245 return false;
248 if ((mask_attrs & mask_allowed) != mask_attrs) {
249 /* Mask attributes check failed. */
250 OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)",
251 (unsigned long long)mask_attrs,
252 (unsigned long long)mask_allowed);
253 return false;
256 return true;
259 size_t ovs_tun_key_attr_size(void)
261 /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider
262 * updating this function.
264 return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
265 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
266 + nla_total_size(16) /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
267 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
268 + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
269 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
270 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
271 + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
272 + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
273 /* OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS is mutually exclusive with
274 * OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS and covered by it.
276 + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */
277 + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */
280 size_t ovs_key_attr_size(void)
282 /* Whenever adding new OVS_KEY_ FIELDS, we should consider
283 * updating this function.
285 BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 26);
287 return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
288 + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
289 + ovs_tun_key_attr_size()
290 + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
291 + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
292 + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */
293 + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */
294 + nla_total_size(4) /* OVS_KEY_ATTR_CT_STATE */
295 + nla_total_size(2) /* OVS_KEY_ATTR_CT_ZONE */
296 + nla_total_size(4) /* OVS_KEY_ATTR_CT_MARK */
297 + nla_total_size(16) /* OVS_KEY_ATTR_CT_LABELS */
298 + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
299 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
300 + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */
301 + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
302 + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
303 + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
304 + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
305 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
308 static const struct ovs_len_tbl ovs_vxlan_ext_key_lens[OVS_VXLAN_EXT_MAX + 1] = {
309 [OVS_VXLAN_EXT_GBP] = { .len = sizeof(u32) },
312 static const struct ovs_len_tbl ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = {
313 [OVS_TUNNEL_KEY_ATTR_ID] = { .len = sizeof(u64) },
314 [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = { .len = sizeof(u32) },
315 [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = { .len = sizeof(u32) },
316 [OVS_TUNNEL_KEY_ATTR_TOS] = { .len = 1 },
317 [OVS_TUNNEL_KEY_ATTR_TTL] = { .len = 1 },
318 [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = { .len = 0 },
319 [OVS_TUNNEL_KEY_ATTR_CSUM] = { .len = 0 },
320 [OVS_TUNNEL_KEY_ATTR_TP_SRC] = { .len = sizeof(u16) },
321 [OVS_TUNNEL_KEY_ATTR_TP_DST] = { .len = sizeof(u16) },
322 [OVS_TUNNEL_KEY_ATTR_OAM] = { .len = 0 },
323 [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = { .len = OVS_ATTR_VARIABLE },
324 [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS] = { .len = OVS_ATTR_NESTED,
325 .next = ovs_vxlan_ext_key_lens },
326 [OVS_TUNNEL_KEY_ATTR_IPV6_SRC] = { .len = sizeof(struct in6_addr) },
327 [OVS_TUNNEL_KEY_ATTR_IPV6_DST] = { .len = sizeof(struct in6_addr) },
330 /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
331 static const struct ovs_len_tbl ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
332 [OVS_KEY_ATTR_ENCAP] = { .len = OVS_ATTR_NESTED },
333 [OVS_KEY_ATTR_PRIORITY] = { .len = sizeof(u32) },
334 [OVS_KEY_ATTR_IN_PORT] = { .len = sizeof(u32) },
335 [OVS_KEY_ATTR_SKB_MARK] = { .len = sizeof(u32) },
336 [OVS_KEY_ATTR_ETHERNET] = { .len = sizeof(struct ovs_key_ethernet) },
337 [OVS_KEY_ATTR_VLAN] = { .len = sizeof(__be16) },
338 [OVS_KEY_ATTR_ETHERTYPE] = { .len = sizeof(__be16) },
339 [OVS_KEY_ATTR_IPV4] = { .len = sizeof(struct ovs_key_ipv4) },
340 [OVS_KEY_ATTR_IPV6] = { .len = sizeof(struct ovs_key_ipv6) },
341 [OVS_KEY_ATTR_TCP] = { .len = sizeof(struct ovs_key_tcp) },
342 [OVS_KEY_ATTR_TCP_FLAGS] = { .len = sizeof(__be16) },
343 [OVS_KEY_ATTR_UDP] = { .len = sizeof(struct ovs_key_udp) },
344 [OVS_KEY_ATTR_SCTP] = { .len = sizeof(struct ovs_key_sctp) },
345 [OVS_KEY_ATTR_ICMP] = { .len = sizeof(struct ovs_key_icmp) },
346 [OVS_KEY_ATTR_ICMPV6] = { .len = sizeof(struct ovs_key_icmpv6) },
347 [OVS_KEY_ATTR_ARP] = { .len = sizeof(struct ovs_key_arp) },
348 [OVS_KEY_ATTR_ND] = { .len = sizeof(struct ovs_key_nd) },
349 [OVS_KEY_ATTR_RECIRC_ID] = { .len = sizeof(u32) },
350 [OVS_KEY_ATTR_DP_HASH] = { .len = sizeof(u32) },
351 [OVS_KEY_ATTR_TUNNEL] = { .len = OVS_ATTR_NESTED,
352 .next = ovs_tunnel_key_lens, },
353 [OVS_KEY_ATTR_MPLS] = { .len = sizeof(struct ovs_key_mpls) },
354 [OVS_KEY_ATTR_CT_STATE] = { .len = sizeof(u32) },
355 [OVS_KEY_ATTR_CT_ZONE] = { .len = sizeof(u16) },
356 [OVS_KEY_ATTR_CT_MARK] = { .len = sizeof(u32) },
357 [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
360 static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
362 return expected_len == attr_len ||
363 expected_len == OVS_ATTR_NESTED ||
364 expected_len == OVS_ATTR_VARIABLE;
367 static bool is_all_zero(const u8 *fp, size_t size)
369 int i;
371 if (!fp)
372 return false;
374 for (i = 0; i < size; i++)
375 if (fp[i])
376 return false;
378 return true;
381 static int __parse_flow_nlattrs(const struct nlattr *attr,
382 const struct nlattr *a[],
383 u64 *attrsp, bool log, bool nz)
385 const struct nlattr *nla;
386 u64 attrs;
387 int rem;
389 attrs = *attrsp;
390 nla_for_each_nested(nla, attr, rem) {
391 u16 type = nla_type(nla);
392 int expected_len;
394 if (type > OVS_KEY_ATTR_MAX) {
395 OVS_NLERR(log, "Key type %d is out of range max %d",
396 type, OVS_KEY_ATTR_MAX);
397 return -EINVAL;
400 if (attrs & (1 << type)) {
401 OVS_NLERR(log, "Duplicate key (type %d).", type);
402 return -EINVAL;
405 expected_len = ovs_key_lens[type].len;
406 if (!check_attr_len(nla_len(nla), expected_len)) {
407 OVS_NLERR(log, "Key %d has unexpected len %d expected %d",
408 type, nla_len(nla), expected_len);
409 return -EINVAL;
412 if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
413 attrs |= 1 << type;
414 a[type] = nla;
417 if (rem) {
418 OVS_NLERR(log, "Message has %d unknown bytes.", rem);
419 return -EINVAL;
422 *attrsp = attrs;
423 return 0;
426 static int parse_flow_mask_nlattrs(const struct nlattr *attr,
427 const struct nlattr *a[], u64 *attrsp,
428 bool log)
430 return __parse_flow_nlattrs(attr, a, attrsp, log, true);
433 static int parse_flow_nlattrs(const struct nlattr *attr,
434 const struct nlattr *a[], u64 *attrsp,
435 bool log)
437 return __parse_flow_nlattrs(attr, a, attrsp, log, false);
440 static int genev_tun_opt_from_nlattr(const struct nlattr *a,
441 struct sw_flow_match *match, bool is_mask,
442 bool log)
444 unsigned long opt_key_offset;
446 if (nla_len(a) > sizeof(match->key->tun_opts)) {
447 OVS_NLERR(log, "Geneve option length err (len %d, max %zu).",
448 nla_len(a), sizeof(match->key->tun_opts));
449 return -EINVAL;
452 if (nla_len(a) % 4 != 0) {
453 OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.",
454 nla_len(a));
455 return -EINVAL;
458 /* We need to record the length of the options passed
459 * down, otherwise packets with the same format but
460 * additional options will be silently matched.
462 if (!is_mask) {
463 SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
464 false);
465 } else {
466 /* This is somewhat unusual because it looks at
467 * both the key and mask while parsing the
468 * attributes (and by extension assumes the key
469 * is parsed first). Normally, we would verify
470 * that each is the correct length and that the
471 * attributes line up in the validate function.
472 * However, that is difficult because this is
473 * variable length and we won't have the
474 * information later.
476 if (match->key->tun_opts_len != nla_len(a)) {
477 OVS_NLERR(log, "Geneve option len %d != mask len %d",
478 match->key->tun_opts_len, nla_len(a));
479 return -EINVAL;
482 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
485 opt_key_offset = TUN_METADATA_OFFSET(nla_len(a));
486 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a),
487 nla_len(a), is_mask);
488 return 0;
491 static int vxlan_tun_opt_from_nlattr(const struct nlattr *attr,
492 struct sw_flow_match *match, bool is_mask,
493 bool log)
495 struct nlattr *a;
496 int rem;
497 unsigned long opt_key_offset;
498 struct vxlan_metadata opts;
500 BUILD_BUG_ON(sizeof(opts) > sizeof(match->key->tun_opts));
502 memset(&opts, 0, sizeof(opts));
503 nla_for_each_nested(a, attr, rem) {
504 int type = nla_type(a);
506 if (type > OVS_VXLAN_EXT_MAX) {
507 OVS_NLERR(log, "VXLAN extension %d out of range max %d",
508 type, OVS_VXLAN_EXT_MAX);
509 return -EINVAL;
512 if (!check_attr_len(nla_len(a),
513 ovs_vxlan_ext_key_lens[type].len)) {
514 OVS_NLERR(log, "VXLAN extension %d has unexpected len %d expected %d",
515 type, nla_len(a),
516 ovs_vxlan_ext_key_lens[type].len);
517 return -EINVAL;
520 switch (type) {
521 case OVS_VXLAN_EXT_GBP:
522 opts.gbp = nla_get_u32(a);
523 break;
524 default:
525 OVS_NLERR(log, "Unknown VXLAN extension attribute %d",
526 type);
527 return -EINVAL;
530 if (rem) {
531 OVS_NLERR(log, "VXLAN extension message has %d unknown bytes.",
532 rem);
533 return -EINVAL;
536 if (!is_mask)
537 SW_FLOW_KEY_PUT(match, tun_opts_len, sizeof(opts), false);
538 else
539 SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true);
541 opt_key_offset = TUN_METADATA_OFFSET(sizeof(opts));
542 SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, &opts, sizeof(opts),
543 is_mask);
544 return 0;
547 static int ip_tun_from_nlattr(const struct nlattr *attr,
548 struct sw_flow_match *match, bool is_mask,
549 bool log)
551 bool ttl = false, ipv4 = false, ipv6 = false;
552 __be16 tun_flags = 0;
553 int opts_type = 0;
554 struct nlattr *a;
555 int rem;
557 nla_for_each_nested(a, attr, rem) {
558 int type = nla_type(a);
559 int err;
561 if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
562 OVS_NLERR(log, "Tunnel attr %d out of range max %d",
563 type, OVS_TUNNEL_KEY_ATTR_MAX);
564 return -EINVAL;
567 if (!check_attr_len(nla_len(a),
568 ovs_tunnel_key_lens[type].len)) {
569 OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d",
570 type, nla_len(a), ovs_tunnel_key_lens[type].len);
571 return -EINVAL;
574 switch (type) {
575 case OVS_TUNNEL_KEY_ATTR_ID:
576 SW_FLOW_KEY_PUT(match, tun_key.tun_id,
577 nla_get_be64(a), is_mask);
578 tun_flags |= TUNNEL_KEY;
579 break;
580 case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
581 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
582 nla_get_in_addr(a), is_mask);
583 ipv4 = true;
584 break;
585 case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
586 SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
587 nla_get_in_addr(a), is_mask);
588 ipv4 = true;
589 break;
590 case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
591 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
592 nla_get_in6_addr(a), is_mask);
593 ipv6 = true;
594 break;
595 case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
596 SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
597 nla_get_in6_addr(a), is_mask);
598 ipv6 = true;
599 break;
600 case OVS_TUNNEL_KEY_ATTR_TOS:
601 SW_FLOW_KEY_PUT(match, tun_key.tos,
602 nla_get_u8(a), is_mask);
603 break;
604 case OVS_TUNNEL_KEY_ATTR_TTL:
605 SW_FLOW_KEY_PUT(match, tun_key.ttl,
606 nla_get_u8(a), is_mask);
607 ttl = true;
608 break;
609 case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
610 tun_flags |= TUNNEL_DONT_FRAGMENT;
611 break;
612 case OVS_TUNNEL_KEY_ATTR_CSUM:
613 tun_flags |= TUNNEL_CSUM;
614 break;
615 case OVS_TUNNEL_KEY_ATTR_TP_SRC:
616 SW_FLOW_KEY_PUT(match, tun_key.tp_src,
617 nla_get_be16(a), is_mask);
618 break;
619 case OVS_TUNNEL_KEY_ATTR_TP_DST:
620 SW_FLOW_KEY_PUT(match, tun_key.tp_dst,
621 nla_get_be16(a), is_mask);
622 break;
623 case OVS_TUNNEL_KEY_ATTR_OAM:
624 tun_flags |= TUNNEL_OAM;
625 break;
626 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
627 if (opts_type) {
628 OVS_NLERR(log, "Multiple metadata blocks provided");
629 return -EINVAL;
632 err = genev_tun_opt_from_nlattr(a, match, is_mask, log);
633 if (err)
634 return err;
636 tun_flags |= TUNNEL_GENEVE_OPT;
637 opts_type = type;
638 break;
639 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
640 if (opts_type) {
641 OVS_NLERR(log, "Multiple metadata blocks provided");
642 return -EINVAL;
645 err = vxlan_tun_opt_from_nlattr(a, match, is_mask, log);
646 if (err)
647 return err;
649 tun_flags |= TUNNEL_VXLAN_OPT;
650 opts_type = type;
651 break;
652 default:
653 OVS_NLERR(log, "Unknown IP tunnel attribute %d",
654 type);
655 return -EINVAL;
659 SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
660 if (is_mask)
661 SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
662 else
663 SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
664 false);
666 if (rem > 0) {
667 OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
668 rem);
669 return -EINVAL;
672 if (ipv4 && ipv6) {
673 OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
674 return -EINVAL;
677 if (!is_mask) {
678 if (!ipv4 && !ipv6) {
679 OVS_NLERR(log, "IP tunnel dst address not specified");
680 return -EINVAL;
682 if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
683 OVS_NLERR(log, "IPv4 tunnel dst address is zero");
684 return -EINVAL;
686 if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
687 OVS_NLERR(log, "IPv6 tunnel dst address is zero");
688 return -EINVAL;
691 if (!ttl) {
692 OVS_NLERR(log, "IP tunnel TTL not specified.");
693 return -EINVAL;
697 return opts_type;
700 static int vxlan_opt_to_nlattr(struct sk_buff *skb,
701 const void *tun_opts, int swkey_tun_opts_len)
703 const struct vxlan_metadata *opts = tun_opts;
704 struct nlattr *nla;
706 nla = nla_nest_start(skb, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
707 if (!nla)
708 return -EMSGSIZE;
710 if (nla_put_u32(skb, OVS_VXLAN_EXT_GBP, opts->gbp) < 0)
711 return -EMSGSIZE;
713 nla_nest_end(skb, nla);
714 return 0;
717 static int __ip_tun_to_nlattr(struct sk_buff *skb,
718 const struct ip_tunnel_key *output,
719 const void *tun_opts, int swkey_tun_opts_len,
720 unsigned short tun_proto)
722 if (output->tun_flags & TUNNEL_KEY &&
723 nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
724 return -EMSGSIZE;
725 switch (tun_proto) {
726 case AF_INET:
727 if (output->u.ipv4.src &&
728 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
729 output->u.ipv4.src))
730 return -EMSGSIZE;
731 if (output->u.ipv4.dst &&
732 nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
733 output->u.ipv4.dst))
734 return -EMSGSIZE;
735 break;
736 case AF_INET6:
737 if (!ipv6_addr_any(&output->u.ipv6.src) &&
738 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
739 &output->u.ipv6.src))
740 return -EMSGSIZE;
741 if (!ipv6_addr_any(&output->u.ipv6.dst) &&
742 nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
743 &output->u.ipv6.dst))
744 return -EMSGSIZE;
745 break;
747 if (output->tos &&
748 nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
749 return -EMSGSIZE;
750 if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ttl))
751 return -EMSGSIZE;
752 if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
753 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
754 return -EMSGSIZE;
755 if ((output->tun_flags & TUNNEL_CSUM) &&
756 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
757 return -EMSGSIZE;
758 if (output->tp_src &&
759 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src))
760 return -EMSGSIZE;
761 if (output->tp_dst &&
762 nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst))
763 return -EMSGSIZE;
764 if ((output->tun_flags & TUNNEL_OAM) &&
765 nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
766 return -EMSGSIZE;
767 if (swkey_tun_opts_len) {
768 if (output->tun_flags & TUNNEL_GENEVE_OPT &&
769 nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
770 swkey_tun_opts_len, tun_opts))
771 return -EMSGSIZE;
772 else if (output->tun_flags & TUNNEL_VXLAN_OPT &&
773 vxlan_opt_to_nlattr(skb, tun_opts, swkey_tun_opts_len))
774 return -EMSGSIZE;
777 return 0;
780 static int ip_tun_to_nlattr(struct sk_buff *skb,
781 const struct ip_tunnel_key *output,
782 const void *tun_opts, int swkey_tun_opts_len,
783 unsigned short tun_proto)
785 struct nlattr *nla;
786 int err;
788 nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
789 if (!nla)
790 return -EMSGSIZE;
792 err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
793 tun_proto);
794 if (err)
795 return err;
797 nla_nest_end(skb, nla);
798 return 0;
801 int ovs_nla_put_tunnel_info(struct sk_buff *skb,
802 struct ip_tunnel_info *tun_info)
804 return __ip_tun_to_nlattr(skb, &tun_info->key,
805 ip_tunnel_info_opts(tun_info),
806 tun_info->options_len,
807 ip_tunnel_info_af(tun_info));
810 static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
811 u64 *attrs, const struct nlattr **a,
812 bool is_mask, bool log)
814 if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
815 u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
817 SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
818 *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
821 if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
822 u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
824 SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
825 *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
828 if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
829 SW_FLOW_KEY_PUT(match, phy.priority,
830 nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
831 *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
834 if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
835 u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
837 if (is_mask) {
838 in_port = 0xffffffff; /* Always exact match in_port. */
839 } else if (in_port >= DP_MAX_PORTS) {
840 OVS_NLERR(log, "Port %d exceeds max allowable %d",
841 in_port, DP_MAX_PORTS);
842 return -EINVAL;
845 SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
846 *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
847 } else if (!is_mask) {
848 SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
851 if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
852 uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
854 SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
855 *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
857 if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
858 if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
859 is_mask, log) < 0)
860 return -EINVAL;
861 *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
864 if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
865 ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
866 u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
868 if (ct_state & ~CT_SUPPORTED_MASK) {
869 OVS_NLERR(log, "ct_state flags %08x unsupported",
870 ct_state);
871 return -EINVAL;
874 SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
875 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
877 if (*attrs & (1 << OVS_KEY_ATTR_CT_ZONE) &&
878 ovs_ct_verify(net, OVS_KEY_ATTR_CT_ZONE)) {
879 u16 ct_zone = nla_get_u16(a[OVS_KEY_ATTR_CT_ZONE]);
881 SW_FLOW_KEY_PUT(match, ct.zone, ct_zone, is_mask);
882 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_ZONE);
884 if (*attrs & (1 << OVS_KEY_ATTR_CT_MARK) &&
885 ovs_ct_verify(net, OVS_KEY_ATTR_CT_MARK)) {
886 u32 mark = nla_get_u32(a[OVS_KEY_ATTR_CT_MARK]);
888 SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
889 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
891 if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
892 ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
893 const struct ovs_key_ct_labels *cl;
895 cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
896 SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
897 sizeof(*cl), is_mask);
898 *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
900 return 0;
903 static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match,
904 u64 attrs, const struct nlattr **a,
905 bool is_mask, bool log)
907 int err;
909 err = metadata_from_nlattrs(net, match, &attrs, a, is_mask, log);
910 if (err)
911 return err;
913 if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
914 const struct ovs_key_ethernet *eth_key;
916 eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
917 SW_FLOW_KEY_MEMCPY(match, eth.src,
918 eth_key->eth_src, ETH_ALEN, is_mask);
919 SW_FLOW_KEY_MEMCPY(match, eth.dst,
920 eth_key->eth_dst, ETH_ALEN, is_mask);
921 attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
924 if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
925 __be16 tci;
927 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
928 if (!(tci & htons(VLAN_TAG_PRESENT))) {
929 if (is_mask)
930 OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.");
931 else
932 OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set.");
934 return -EINVAL;
937 SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
938 attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
941 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
942 __be16 eth_type;
944 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
945 if (is_mask) {
946 /* Always exact match EtherType. */
947 eth_type = htons(0xffff);
948 } else if (!eth_proto_is_802_3(eth_type)) {
949 OVS_NLERR(log, "EtherType %x is less than min %x",
950 ntohs(eth_type), ETH_P_802_3_MIN);
951 return -EINVAL;
954 SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
955 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
956 } else if (!is_mask) {
957 SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
960 if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
961 const struct ovs_key_ipv4 *ipv4_key;
963 ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
964 if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
965 OVS_NLERR(log, "IPv4 frag type %d is out of range max %d",
966 ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
967 return -EINVAL;
969 SW_FLOW_KEY_PUT(match, ip.proto,
970 ipv4_key->ipv4_proto, is_mask);
971 SW_FLOW_KEY_PUT(match, ip.tos,
972 ipv4_key->ipv4_tos, is_mask);
973 SW_FLOW_KEY_PUT(match, ip.ttl,
974 ipv4_key->ipv4_ttl, is_mask);
975 SW_FLOW_KEY_PUT(match, ip.frag,
976 ipv4_key->ipv4_frag, is_mask);
977 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
978 ipv4_key->ipv4_src, is_mask);
979 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
980 ipv4_key->ipv4_dst, is_mask);
981 attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
984 if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
985 const struct ovs_key_ipv6 *ipv6_key;
987 ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
988 if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
989 OVS_NLERR(log, "IPv6 frag type %d is out of range max %d",
990 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
991 return -EINVAL;
994 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
995 OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x).\n",
996 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
997 return -EINVAL;
1000 SW_FLOW_KEY_PUT(match, ipv6.label,
1001 ipv6_key->ipv6_label, is_mask);
1002 SW_FLOW_KEY_PUT(match, ip.proto,
1003 ipv6_key->ipv6_proto, is_mask);
1004 SW_FLOW_KEY_PUT(match, ip.tos,
1005 ipv6_key->ipv6_tclass, is_mask);
1006 SW_FLOW_KEY_PUT(match, ip.ttl,
1007 ipv6_key->ipv6_hlimit, is_mask);
1008 SW_FLOW_KEY_PUT(match, ip.frag,
1009 ipv6_key->ipv6_frag, is_mask);
1010 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
1011 ipv6_key->ipv6_src,
1012 sizeof(match->key->ipv6.addr.src),
1013 is_mask);
1014 SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
1015 ipv6_key->ipv6_dst,
1016 sizeof(match->key->ipv6.addr.dst),
1017 is_mask);
1019 attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
1022 if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
1023 const struct ovs_key_arp *arp_key;
1025 arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
1026 if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
1027 OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).",
1028 arp_key->arp_op);
1029 return -EINVAL;
1032 SW_FLOW_KEY_PUT(match, ipv4.addr.src,
1033 arp_key->arp_sip, is_mask);
1034 SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
1035 arp_key->arp_tip, is_mask);
1036 SW_FLOW_KEY_PUT(match, ip.proto,
1037 ntohs(arp_key->arp_op), is_mask);
1038 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
1039 arp_key->arp_sha, ETH_ALEN, is_mask);
1040 SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
1041 arp_key->arp_tha, ETH_ALEN, is_mask);
1043 attrs &= ~(1 << OVS_KEY_ATTR_ARP);
1046 if (attrs & (1 << OVS_KEY_ATTR_MPLS)) {
1047 const struct ovs_key_mpls *mpls_key;
1049 mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]);
1050 SW_FLOW_KEY_PUT(match, mpls.top_lse,
1051 mpls_key->mpls_lse, is_mask);
1053 attrs &= ~(1 << OVS_KEY_ATTR_MPLS);
1056 if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
1057 const struct ovs_key_tcp *tcp_key;
1059 tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
1060 SW_FLOW_KEY_PUT(match, tp.src, tcp_key->tcp_src, is_mask);
1061 SW_FLOW_KEY_PUT(match, tp.dst, tcp_key->tcp_dst, is_mask);
1062 attrs &= ~(1 << OVS_KEY_ATTR_TCP);
1065 if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) {
1066 SW_FLOW_KEY_PUT(match, tp.flags,
1067 nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]),
1068 is_mask);
1069 attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS);
1072 if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
1073 const struct ovs_key_udp *udp_key;
1075 udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
1076 SW_FLOW_KEY_PUT(match, tp.src, udp_key->udp_src, is_mask);
1077 SW_FLOW_KEY_PUT(match, tp.dst, udp_key->udp_dst, is_mask);
1078 attrs &= ~(1 << OVS_KEY_ATTR_UDP);
1081 if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
1082 const struct ovs_key_sctp *sctp_key;
1084 sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
1085 SW_FLOW_KEY_PUT(match, tp.src, sctp_key->sctp_src, is_mask);
1086 SW_FLOW_KEY_PUT(match, tp.dst, sctp_key->sctp_dst, is_mask);
1087 attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
1090 if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
1091 const struct ovs_key_icmp *icmp_key;
1093 icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
1094 SW_FLOW_KEY_PUT(match, tp.src,
1095 htons(icmp_key->icmp_type), is_mask);
1096 SW_FLOW_KEY_PUT(match, tp.dst,
1097 htons(icmp_key->icmp_code), is_mask);
1098 attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
1101 if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
1102 const struct ovs_key_icmpv6 *icmpv6_key;
1104 icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
1105 SW_FLOW_KEY_PUT(match, tp.src,
1106 htons(icmpv6_key->icmpv6_type), is_mask);
1107 SW_FLOW_KEY_PUT(match, tp.dst,
1108 htons(icmpv6_key->icmpv6_code), is_mask);
1109 attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
1112 if (attrs & (1 << OVS_KEY_ATTR_ND)) {
1113 const struct ovs_key_nd *nd_key;
1115 nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
1116 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
1117 nd_key->nd_target,
1118 sizeof(match->key->ipv6.nd.target),
1119 is_mask);
1120 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
1121 nd_key->nd_sll, ETH_ALEN, is_mask);
1122 SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
1123 nd_key->nd_tll, ETH_ALEN, is_mask);
1124 attrs &= ~(1 << OVS_KEY_ATTR_ND);
1127 if (attrs != 0) {
1128 OVS_NLERR(log, "Unknown key attributes %llx",
1129 (unsigned long long)attrs);
1130 return -EINVAL;
1133 return 0;
1136 static void nlattr_set(struct nlattr *attr, u8 val,
1137 const struct ovs_len_tbl *tbl)
1139 struct nlattr *nla;
1140 int rem;
1142 /* The nlattr stream should already have been validated */
1143 nla_for_each_nested(nla, attr, rem) {
1144 if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
1145 if (tbl[nla_type(nla)].next)
1146 tbl = tbl[nla_type(nla)].next;
1147 nlattr_set(nla, val, tbl);
1148 } else {
1149 memset(nla_data(nla), val, nla_len(nla));
1152 if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1153 *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1157 static void mask_set_nlattr(struct nlattr *attr, u8 val)
1159 nlattr_set(attr, val, ovs_key_lens);
1163 * ovs_nla_get_match - parses Netlink attributes into a flow key and
1164 * mask. In case the 'mask' is NULL, the flow is treated as exact match
1165 * flow. Otherwise, it is treated as a wildcarded flow, except the mask
1166 * does not include any don't care bit.
1167 * @net: Used to determine per-namespace field support.
1168 * @match: receives the extracted flow match information.
1169 * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1170 * sequence. The fields should of the packet that triggered the creation
1171 * of this flow.
1172 * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
1173 * attribute specifies the mask field of the wildcarded flow.
1174 * @log: Boolean to allow kernel error logging. Normally true, but when
1175 * probing for feature compatibility this should be passed in as false to
1176 * suppress unnecessary error logging.
1178 int ovs_nla_get_match(struct net *net, struct sw_flow_match *match,
1179 const struct nlattr *nla_key,
1180 const struct nlattr *nla_mask,
1181 bool log)
1183 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1184 const struct nlattr *encap;
1185 struct nlattr *newmask = NULL;
1186 u64 key_attrs = 0;
1187 u64 mask_attrs = 0;
1188 bool encap_valid = false;
1189 int err;
1191 err = parse_flow_nlattrs(nla_key, a, &key_attrs, log);
1192 if (err)
1193 return err;
1195 if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
1196 (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
1197 (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
1198 __be16 tci;
1200 if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
1201 (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
1202 OVS_NLERR(log, "Invalid Vlan frame.");
1203 return -EINVAL;
1206 key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1207 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1208 encap = a[OVS_KEY_ATTR_ENCAP];
1209 key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1210 encap_valid = true;
1212 if (tci & htons(VLAN_TAG_PRESENT)) {
1213 err = parse_flow_nlattrs(encap, a, &key_attrs, log);
1214 if (err)
1215 return err;
1216 } else if (!tci) {
1217 /* Corner case for truncated 802.1Q header. */
1218 if (nla_len(encap)) {
1219 OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute.");
1220 return -EINVAL;
1222 } else {
1223 OVS_NLERR(log, "Encap attr is set for non-VLAN frame");
1224 return -EINVAL;
1228 err = ovs_key_from_nlattrs(net, match, key_attrs, a, false, log);
1229 if (err)
1230 return err;
1232 if (match->mask) {
1233 if (!nla_mask) {
1234 /* Create an exact match mask. We need to set to 0xff
1235 * all the 'match->mask' fields that have been touched
1236 * in 'match->key'. We cannot simply memset
1237 * 'match->mask', because padding bytes and fields not
1238 * specified in 'match->key' should be left to 0.
1239 * Instead, we use a stream of netlink attributes,
1240 * copied from 'key' and set to 0xff.
1241 * ovs_key_from_nlattrs() will take care of filling
1242 * 'match->mask' appropriately.
1244 newmask = kmemdup(nla_key,
1245 nla_total_size(nla_len(nla_key)),
1246 GFP_KERNEL);
1247 if (!newmask)
1248 return -ENOMEM;
1250 mask_set_nlattr(newmask, 0xff);
1252 /* The userspace does not send tunnel attributes that
1253 * are 0, but we should not wildcard them nonetheless.
1255 if (match->key->tun_proto)
1256 SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
1257 0xff, true);
1259 nla_mask = newmask;
1262 err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log);
1263 if (err)
1264 goto free_newmask;
1266 /* Always match on tci. */
1267 SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
1269 if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) {
1270 __be16 eth_type = 0;
1271 __be16 tci = 0;
1273 if (!encap_valid) {
1274 OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame.");
1275 err = -EINVAL;
1276 goto free_newmask;
1279 mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
1280 if (a[OVS_KEY_ATTR_ETHERTYPE])
1281 eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1283 if (eth_type == htons(0xffff)) {
1284 mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1285 encap = a[OVS_KEY_ATTR_ENCAP];
1286 err = parse_flow_mask_nlattrs(encap, a,
1287 &mask_attrs, log);
1288 if (err)
1289 goto free_newmask;
1290 } else {
1291 OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).",
1292 ntohs(eth_type));
1293 err = -EINVAL;
1294 goto free_newmask;
1297 if (a[OVS_KEY_ATTR_VLAN])
1298 tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
1300 if (!(tci & htons(VLAN_TAG_PRESENT))) {
1301 OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).",
1302 ntohs(tci));
1303 err = -EINVAL;
1304 goto free_newmask;
1308 err = ovs_key_from_nlattrs(net, match, mask_attrs, a, true,
1309 log);
1310 if (err)
1311 goto free_newmask;
1314 if (!match_validate(match, key_attrs, mask_attrs, log))
1315 err = -EINVAL;
1317 free_newmask:
1318 kfree(newmask);
1319 return err;
1322 static size_t get_ufid_len(const struct nlattr *attr, bool log)
1324 size_t len;
1326 if (!attr)
1327 return 0;
1329 len = nla_len(attr);
1330 if (len < 1 || len > MAX_UFID_LENGTH) {
1331 OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)",
1332 nla_len(attr), MAX_UFID_LENGTH);
1333 return 0;
1336 return len;
1339 /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID,
1340 * or false otherwise.
1342 bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr,
1343 bool log)
1345 sfid->ufid_len = get_ufid_len(attr, log);
1346 if (sfid->ufid_len)
1347 memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len);
1349 return sfid->ufid_len;
1352 int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid,
1353 const struct sw_flow_key *key, bool log)
1355 struct sw_flow_key *new_key;
1357 if (ovs_nla_get_ufid(sfid, ufid, log))
1358 return 0;
1360 /* If UFID was not provided, use unmasked key. */
1361 new_key = kmalloc(sizeof(*new_key), GFP_KERNEL);
1362 if (!new_key)
1363 return -ENOMEM;
1364 memcpy(new_key, key, sizeof(*key));
1365 sfid->unmasked_key = new_key;
1367 return 0;
1370 u32 ovs_nla_get_ufid_flags(const struct nlattr *attr)
1372 return attr ? nla_get_u32(attr) : 0;
1376 * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
1377 * @key: Receives extracted in_port, priority, tun_key and skb_mark.
1378 * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
1379 * sequence.
1380 * @log: Boolean to allow kernel error logging. Normally true, but when
1381 * probing for feature compatibility this should be passed in as false to
1382 * suppress unnecessary error logging.
1384 * This parses a series of Netlink attributes that form a flow key, which must
1385 * take the same form accepted by flow_from_nlattrs(), but only enough of it to
1386 * get the metadata, that is, the parts of the flow key that cannot be
1387 * extracted from the packet itself.
1390 int ovs_nla_get_flow_metadata(struct net *net, const struct nlattr *attr,
1391 struct sw_flow_key *key,
1392 bool log)
1394 const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
1395 struct sw_flow_match match;
1396 u64 attrs = 0;
1397 int err;
1399 err = parse_flow_nlattrs(attr, a, &attrs, log);
1400 if (err)
1401 return -EINVAL;
1403 memset(&match, 0, sizeof(match));
1404 match.key = key;
1406 memset(&key->ct, 0, sizeof(key->ct));
1407 key->phy.in_port = DP_MAX_PORTS;
1409 return metadata_from_nlattrs(net, &match, &attrs, a, false, log);
1412 static int __ovs_nla_put_key(const struct sw_flow_key *swkey,
1413 const struct sw_flow_key *output, bool is_mask,
1414 struct sk_buff *skb)
1416 struct ovs_key_ethernet *eth_key;
1417 struct nlattr *nla, *encap;
1419 if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
1420 goto nla_put_failure;
1422 if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
1423 goto nla_put_failure;
1425 if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
1426 goto nla_put_failure;
1428 if ((swkey->tun_proto || is_mask)) {
1429 const void *opts = NULL;
1431 if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
1432 opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
1434 if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
1435 swkey->tun_opts_len, swkey->tun_proto))
1436 goto nla_put_failure;
1439 if (swkey->phy.in_port == DP_MAX_PORTS) {
1440 if (is_mask && (output->phy.in_port == 0xffff))
1441 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
1442 goto nla_put_failure;
1443 } else {
1444 u16 upper_u16;
1445 upper_u16 = !is_mask ? 0 : 0xffff;
1447 if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
1448 (upper_u16 << 16) | output->phy.in_port))
1449 goto nla_put_failure;
1452 if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
1453 goto nla_put_failure;
1455 if (ovs_ct_put_key(output, skb))
1456 goto nla_put_failure;
1458 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1459 if (!nla)
1460 goto nla_put_failure;
1462 eth_key = nla_data(nla);
1463 ether_addr_copy(eth_key->eth_src, output->eth.src);
1464 ether_addr_copy(eth_key->eth_dst, output->eth.dst);
1466 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1467 __be16 eth_type;
1468 eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
1469 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
1470 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
1471 goto nla_put_failure;
1472 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1473 if (!swkey->eth.tci)
1474 goto unencap;
1475 } else
1476 encap = NULL;
1478 if (swkey->eth.type == htons(ETH_P_802_2)) {
1480 * Ethertype 802.2 is represented in the netlink with omitted
1481 * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
1482 * 0xffff in the mask attribute. Ethertype can also
1483 * be wildcarded.
1485 if (is_mask && output->eth.type)
1486 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
1487 output->eth.type))
1488 goto nla_put_failure;
1489 goto unencap;
1492 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
1493 goto nla_put_failure;
1495 if (swkey->eth.type == htons(ETH_P_IP)) {
1496 struct ovs_key_ipv4 *ipv4_key;
1498 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
1499 if (!nla)
1500 goto nla_put_failure;
1501 ipv4_key = nla_data(nla);
1502 ipv4_key->ipv4_src = output->ipv4.addr.src;
1503 ipv4_key->ipv4_dst = output->ipv4.addr.dst;
1504 ipv4_key->ipv4_proto = output->ip.proto;
1505 ipv4_key->ipv4_tos = output->ip.tos;
1506 ipv4_key->ipv4_ttl = output->ip.ttl;
1507 ipv4_key->ipv4_frag = output->ip.frag;
1508 } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
1509 struct ovs_key_ipv6 *ipv6_key;
1511 nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
1512 if (!nla)
1513 goto nla_put_failure;
1514 ipv6_key = nla_data(nla);
1515 memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
1516 sizeof(ipv6_key->ipv6_src));
1517 memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
1518 sizeof(ipv6_key->ipv6_dst));
1519 ipv6_key->ipv6_label = output->ipv6.label;
1520 ipv6_key->ipv6_proto = output->ip.proto;
1521 ipv6_key->ipv6_tclass = output->ip.tos;
1522 ipv6_key->ipv6_hlimit = output->ip.ttl;
1523 ipv6_key->ipv6_frag = output->ip.frag;
1524 } else if (swkey->eth.type == htons(ETH_P_ARP) ||
1525 swkey->eth.type == htons(ETH_P_RARP)) {
1526 struct ovs_key_arp *arp_key;
1528 nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
1529 if (!nla)
1530 goto nla_put_failure;
1531 arp_key = nla_data(nla);
1532 memset(arp_key, 0, sizeof(struct ovs_key_arp));
1533 arp_key->arp_sip = output->ipv4.addr.src;
1534 arp_key->arp_tip = output->ipv4.addr.dst;
1535 arp_key->arp_op = htons(output->ip.proto);
1536 ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha);
1537 ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha);
1538 } else if (eth_p_mpls(swkey->eth.type)) {
1539 struct ovs_key_mpls *mpls_key;
1541 nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key));
1542 if (!nla)
1543 goto nla_put_failure;
1544 mpls_key = nla_data(nla);
1545 mpls_key->mpls_lse = output->mpls.top_lse;
1548 if ((swkey->eth.type == htons(ETH_P_IP) ||
1549 swkey->eth.type == htons(ETH_P_IPV6)) &&
1550 swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
1552 if (swkey->ip.proto == IPPROTO_TCP) {
1553 struct ovs_key_tcp *tcp_key;
1555 nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
1556 if (!nla)
1557 goto nla_put_failure;
1558 tcp_key = nla_data(nla);
1559 tcp_key->tcp_src = output->tp.src;
1560 tcp_key->tcp_dst = output->tp.dst;
1561 if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS,
1562 output->tp.flags))
1563 goto nla_put_failure;
1564 } else if (swkey->ip.proto == IPPROTO_UDP) {
1565 struct ovs_key_udp *udp_key;
1567 nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
1568 if (!nla)
1569 goto nla_put_failure;
1570 udp_key = nla_data(nla);
1571 udp_key->udp_src = output->tp.src;
1572 udp_key->udp_dst = output->tp.dst;
1573 } else if (swkey->ip.proto == IPPROTO_SCTP) {
1574 struct ovs_key_sctp *sctp_key;
1576 nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
1577 if (!nla)
1578 goto nla_put_failure;
1579 sctp_key = nla_data(nla);
1580 sctp_key->sctp_src = output->tp.src;
1581 sctp_key->sctp_dst = output->tp.dst;
1582 } else if (swkey->eth.type == htons(ETH_P_IP) &&
1583 swkey->ip.proto == IPPROTO_ICMP) {
1584 struct ovs_key_icmp *icmp_key;
1586 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
1587 if (!nla)
1588 goto nla_put_failure;
1589 icmp_key = nla_data(nla);
1590 icmp_key->icmp_type = ntohs(output->tp.src);
1591 icmp_key->icmp_code = ntohs(output->tp.dst);
1592 } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
1593 swkey->ip.proto == IPPROTO_ICMPV6) {
1594 struct ovs_key_icmpv6 *icmpv6_key;
1596 nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
1597 sizeof(*icmpv6_key));
1598 if (!nla)
1599 goto nla_put_failure;
1600 icmpv6_key = nla_data(nla);
1601 icmpv6_key->icmpv6_type = ntohs(output->tp.src);
1602 icmpv6_key->icmpv6_code = ntohs(output->tp.dst);
1604 if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
1605 icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
1606 struct ovs_key_nd *nd_key;
1608 nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
1609 if (!nla)
1610 goto nla_put_failure;
1611 nd_key = nla_data(nla);
1612 memcpy(nd_key->nd_target, &output->ipv6.nd.target,
1613 sizeof(nd_key->nd_target));
1614 ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
1615 ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
1620 unencap:
1621 if (encap)
1622 nla_nest_end(skb, encap);
1624 return 0;
1626 nla_put_failure:
1627 return -EMSGSIZE;
1630 int ovs_nla_put_key(const struct sw_flow_key *swkey,
1631 const struct sw_flow_key *output, int attr, bool is_mask,
1632 struct sk_buff *skb)
1634 int err;
1635 struct nlattr *nla;
1637 nla = nla_nest_start(skb, attr);
1638 if (!nla)
1639 return -EMSGSIZE;
1640 err = __ovs_nla_put_key(swkey, output, is_mask, skb);
1641 if (err)
1642 return err;
1643 nla_nest_end(skb, nla);
1645 return 0;
1648 /* Called with ovs_mutex or RCU read lock. */
1649 int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb)
1651 if (ovs_identifier_is_ufid(&flow->id))
1652 return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len,
1653 flow->id.ufid);
1655 return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key,
1656 OVS_FLOW_ATTR_KEY, false, skb);
1659 /* Called with ovs_mutex or RCU read lock. */
1660 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb)
1662 return ovs_nla_put_key(&flow->key, &flow->key,
1663 OVS_FLOW_ATTR_KEY, false, skb);
1666 /* Called with ovs_mutex or RCU read lock. */
1667 int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
1669 return ovs_nla_put_key(&flow->key, &flow->mask->key,
1670 OVS_FLOW_ATTR_MASK, true, skb);
1673 #define MAX_ACTIONS_BUFSIZE (32 * 1024)
1675 static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
1677 struct sw_flow_actions *sfa;
1679 if (size > MAX_ACTIONS_BUFSIZE) {
1680 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
1681 return ERR_PTR(-EINVAL);
1684 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
1685 if (!sfa)
1686 return ERR_PTR(-ENOMEM);
1688 sfa->actions_len = 0;
1689 return sfa;
1692 static void ovs_nla_free_set_action(const struct nlattr *a)
1694 const struct nlattr *ovs_key = nla_data(a);
1695 struct ovs_tunnel_info *ovs_tun;
1697 switch (nla_type(ovs_key)) {
1698 case OVS_KEY_ATTR_TUNNEL_INFO:
1699 ovs_tun = nla_data(ovs_key);
1700 dst_release((struct dst_entry *)ovs_tun->tun_dst);
1701 break;
1705 void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
1707 const struct nlattr *a;
1708 int rem;
1710 if (!sf_acts)
1711 return;
1713 nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
1714 switch (nla_type(a)) {
1715 case OVS_ACTION_ATTR_SET:
1716 ovs_nla_free_set_action(a);
1717 break;
1718 case OVS_ACTION_ATTR_CT:
1719 ovs_ct_free_action(a);
1720 break;
1724 kfree(sf_acts);
1727 static void __ovs_nla_free_flow_actions(struct rcu_head *head)
1729 ovs_nla_free_flow_actions(container_of(head, struct sw_flow_actions, rcu));
1732 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
1733 * The caller must hold rcu_read_lock for this to be sensible. */
1734 void ovs_nla_free_flow_actions_rcu(struct sw_flow_actions *sf_acts)
1736 call_rcu(&sf_acts->rcu, __ovs_nla_free_flow_actions);
1739 static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1740 int attr_len, bool log)
1743 struct sw_flow_actions *acts;
1744 int new_acts_size;
1745 int req_size = NLA_ALIGN(attr_len);
1746 int next_offset = offsetof(struct sw_flow_actions, actions) +
1747 (*sfa)->actions_len;
1749 if (req_size <= (ksize(*sfa) - next_offset))
1750 goto out;
1752 new_acts_size = ksize(*sfa) * 2;
1754 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1755 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
1756 return ERR_PTR(-EMSGSIZE);
1757 new_acts_size = MAX_ACTIONS_BUFSIZE;
1760 acts = nla_alloc_flow_actions(new_acts_size, log);
1761 if (IS_ERR(acts))
1762 return (void *)acts;
1764 memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
1765 acts->actions_len = (*sfa)->actions_len;
1766 acts->orig_len = (*sfa)->orig_len;
1767 kfree(*sfa);
1768 *sfa = acts;
1770 out:
1771 (*sfa)->actions_len += req_size;
1772 return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
1775 static struct nlattr *__add_action(struct sw_flow_actions **sfa,
1776 int attrtype, void *data, int len, bool log)
1778 struct nlattr *a;
1780 a = reserve_sfa_size(sfa, nla_attr_size(len), log);
1781 if (IS_ERR(a))
1782 return a;
1784 a->nla_type = attrtype;
1785 a->nla_len = nla_attr_size(len);
1787 if (data)
1788 memcpy(nla_data(a), data, len);
1789 memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
1791 return a;
1794 int ovs_nla_add_action(struct sw_flow_actions **sfa, int attrtype, void *data,
1795 int len, bool log)
1797 struct nlattr *a;
1799 a = __add_action(sfa, attrtype, data, len, log);
1801 return PTR_ERR_OR_ZERO(a);
1804 static inline int add_nested_action_start(struct sw_flow_actions **sfa,
1805 int attrtype, bool log)
1807 int used = (*sfa)->actions_len;
1808 int err;
1810 err = ovs_nla_add_action(sfa, attrtype, NULL, 0, log);
1811 if (err)
1812 return err;
1814 return used;
1817 static inline void add_nested_action_end(struct sw_flow_actions *sfa,
1818 int st_offset)
1820 struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions +
1821 st_offset);
1823 a->nla_len = sfa->actions_len - st_offset;
1826 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
1827 const struct sw_flow_key *key,
1828 int depth, struct sw_flow_actions **sfa,
1829 __be16 eth_type, __be16 vlan_tci, bool log);
1831 static int validate_and_copy_sample(struct net *net, const struct nlattr *attr,
1832 const struct sw_flow_key *key, int depth,
1833 struct sw_flow_actions **sfa,
1834 __be16 eth_type, __be16 vlan_tci, bool log)
1836 const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
1837 const struct nlattr *probability, *actions;
1838 const struct nlattr *a;
1839 int rem, start, err, st_acts;
1841 memset(attrs, 0, sizeof(attrs));
1842 nla_for_each_nested(a, attr, rem) {
1843 int type = nla_type(a);
1844 if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
1845 return -EINVAL;
1846 attrs[type] = a;
1848 if (rem)
1849 return -EINVAL;
1851 probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
1852 if (!probability || nla_len(probability) != sizeof(u32))
1853 return -EINVAL;
1855 actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
1856 if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
1857 return -EINVAL;
1859 /* validation done, copy sample action. */
1860 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log);
1861 if (start < 0)
1862 return start;
1863 err = ovs_nla_add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY,
1864 nla_data(probability), sizeof(u32), log);
1865 if (err)
1866 return err;
1867 st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log);
1868 if (st_acts < 0)
1869 return st_acts;
1871 err = __ovs_nla_copy_actions(net, actions, key, depth + 1, sfa,
1872 eth_type, vlan_tci, log);
1873 if (err)
1874 return err;
1876 add_nested_action_end(*sfa, st_acts);
1877 add_nested_action_end(*sfa, start);
1879 return 0;
1882 void ovs_match_init(struct sw_flow_match *match,
1883 struct sw_flow_key *key,
1884 struct sw_flow_mask *mask)
1886 memset(match, 0, sizeof(*match));
1887 match->key = key;
1888 match->mask = mask;
1890 memset(key, 0, sizeof(*key));
1892 if (mask) {
1893 memset(&mask->key, 0, sizeof(mask->key));
1894 mask->range.start = mask->range.end = 0;
1898 static int validate_geneve_opts(struct sw_flow_key *key)
1900 struct geneve_opt *option;
1901 int opts_len = key->tun_opts_len;
1902 bool crit_opt = false;
1904 option = (struct geneve_opt *)TUN_METADATA_OPTS(key, key->tun_opts_len);
1905 while (opts_len > 0) {
1906 int len;
1908 if (opts_len < sizeof(*option))
1909 return -EINVAL;
1911 len = sizeof(*option) + option->length * 4;
1912 if (len > opts_len)
1913 return -EINVAL;
1915 crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
1917 option = (struct geneve_opt *)((u8 *)option + len);
1918 opts_len -= len;
1921 key->tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
1923 return 0;
1926 static int validate_and_copy_set_tun(const struct nlattr *attr,
1927 struct sw_flow_actions **sfa, bool log)
1929 struct sw_flow_match match;
1930 struct sw_flow_key key;
1931 struct metadata_dst *tun_dst;
1932 struct ip_tunnel_info *tun_info;
1933 struct ovs_tunnel_info *ovs_tun;
1934 struct nlattr *a;
1935 int err = 0, start, opts_type;
1937 ovs_match_init(&match, &key, NULL);
1938 opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
1939 if (opts_type < 0)
1940 return opts_type;
1942 if (key.tun_opts_len) {
1943 switch (opts_type) {
1944 case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
1945 err = validate_geneve_opts(&key);
1946 if (err < 0)
1947 return err;
1948 break;
1949 case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS:
1950 break;
1954 start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log);
1955 if (start < 0)
1956 return start;
1958 tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
1959 if (!tun_dst)
1960 return -ENOMEM;
1962 err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL);
1963 if (err) {
1964 dst_release((struct dst_entry *)tun_dst);
1965 return err;
1968 a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
1969 sizeof(*ovs_tun), log);
1970 if (IS_ERR(a)) {
1971 dst_release((struct dst_entry *)tun_dst);
1972 return PTR_ERR(a);
1975 ovs_tun = nla_data(a);
1976 ovs_tun->tun_dst = tun_dst;
1978 tun_info = &tun_dst->u.tun_info;
1979 tun_info->mode = IP_TUNNEL_INFO_TX;
1980 if (key.tun_proto == AF_INET6)
1981 tun_info->mode |= IP_TUNNEL_INFO_IPV6;
1982 tun_info->key = key.tun_key;
1984 /* We need to store the options in the action itself since
1985 * everything else will go away after flow setup. We can append
1986 * it to tun_info and then point there.
1988 ip_tunnel_info_opts_set(tun_info,
1989 TUN_METADATA_OPTS(&key, key.tun_opts_len),
1990 key.tun_opts_len);
1991 add_nested_action_end(*sfa, start);
1993 return err;
1996 /* Return false if there are any non-masked bits set.
1997 * Mask follows data immediately, before any netlink padding.
1999 static bool validate_masked(u8 *data, int len)
2001 u8 *mask = data + len;
2003 while (len--)
2004 if (*data++ & ~*mask++)
2005 return false;
2007 return true;
2010 static int validate_set(const struct nlattr *a,
2011 const struct sw_flow_key *flow_key,
2012 struct sw_flow_actions **sfa,
2013 bool *skip_copy, __be16 eth_type, bool masked, bool log)
2015 const struct nlattr *ovs_key = nla_data(a);
2016 int key_type = nla_type(ovs_key);
2017 size_t key_len;
2019 /* There can be only one key in a action */
2020 if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
2021 return -EINVAL;
2023 key_len = nla_len(ovs_key);
2024 if (masked)
2025 key_len /= 2;
2027 if (key_type > OVS_KEY_ATTR_MAX ||
2028 !check_attr_len(key_len, ovs_key_lens[key_type].len))
2029 return -EINVAL;
2031 if (masked && !validate_masked(nla_data(ovs_key), key_len))
2032 return -EINVAL;
2034 switch (key_type) {
2035 const struct ovs_key_ipv4 *ipv4_key;
2036 const struct ovs_key_ipv6 *ipv6_key;
2037 int err;
2039 case OVS_KEY_ATTR_PRIORITY:
2040 case OVS_KEY_ATTR_SKB_MARK:
2041 case OVS_KEY_ATTR_CT_MARK:
2042 case OVS_KEY_ATTR_CT_LABELS:
2043 case OVS_KEY_ATTR_ETHERNET:
2044 break;
2046 case OVS_KEY_ATTR_TUNNEL:
2047 if (masked)
2048 return -EINVAL; /* Masked tunnel set not supported. */
2050 *skip_copy = true;
2051 err = validate_and_copy_set_tun(a, sfa, log);
2052 if (err)
2053 return err;
2054 break;
2056 case OVS_KEY_ATTR_IPV4:
2057 if (eth_type != htons(ETH_P_IP))
2058 return -EINVAL;
2060 ipv4_key = nla_data(ovs_key);
2062 if (masked) {
2063 const struct ovs_key_ipv4 *mask = ipv4_key + 1;
2065 /* Non-writeable fields. */
2066 if (mask->ipv4_proto || mask->ipv4_frag)
2067 return -EINVAL;
2068 } else {
2069 if (ipv4_key->ipv4_proto != flow_key->ip.proto)
2070 return -EINVAL;
2072 if (ipv4_key->ipv4_frag != flow_key->ip.frag)
2073 return -EINVAL;
2075 break;
2077 case OVS_KEY_ATTR_IPV6:
2078 if (eth_type != htons(ETH_P_IPV6))
2079 return -EINVAL;
2081 ipv6_key = nla_data(ovs_key);
2083 if (masked) {
2084 const struct ovs_key_ipv6 *mask = ipv6_key + 1;
2086 /* Non-writeable fields. */
2087 if (mask->ipv6_proto || mask->ipv6_frag)
2088 return -EINVAL;
2090 /* Invalid bits in the flow label mask? */
2091 if (ntohl(mask->ipv6_label) & 0xFFF00000)
2092 return -EINVAL;
2093 } else {
2094 if (ipv6_key->ipv6_proto != flow_key->ip.proto)
2095 return -EINVAL;
2097 if (ipv6_key->ipv6_frag != flow_key->ip.frag)
2098 return -EINVAL;
2100 if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
2101 return -EINVAL;
2103 break;
2105 case OVS_KEY_ATTR_TCP:
2106 if ((eth_type != htons(ETH_P_IP) &&
2107 eth_type != htons(ETH_P_IPV6)) ||
2108 flow_key->ip.proto != IPPROTO_TCP)
2109 return -EINVAL;
2111 break;
2113 case OVS_KEY_ATTR_UDP:
2114 if ((eth_type != htons(ETH_P_IP) &&
2115 eth_type != htons(ETH_P_IPV6)) ||
2116 flow_key->ip.proto != IPPROTO_UDP)
2117 return -EINVAL;
2119 break;
2121 case OVS_KEY_ATTR_MPLS:
2122 if (!eth_p_mpls(eth_type))
2123 return -EINVAL;
2124 break;
2126 case OVS_KEY_ATTR_SCTP:
2127 if ((eth_type != htons(ETH_P_IP) &&
2128 eth_type != htons(ETH_P_IPV6)) ||
2129 flow_key->ip.proto != IPPROTO_SCTP)
2130 return -EINVAL;
2132 break;
2134 default:
2135 return -EINVAL;
2138 /* Convert non-masked non-tunnel set actions to masked set actions. */
2139 if (!masked && key_type != OVS_KEY_ATTR_TUNNEL) {
2140 int start, len = key_len * 2;
2141 struct nlattr *at;
2143 *skip_copy = true;
2145 start = add_nested_action_start(sfa,
2146 OVS_ACTION_ATTR_SET_TO_MASKED,
2147 log);
2148 if (start < 0)
2149 return start;
2151 at = __add_action(sfa, key_type, NULL, len, log);
2152 if (IS_ERR(at))
2153 return PTR_ERR(at);
2155 memcpy(nla_data(at), nla_data(ovs_key), key_len); /* Key. */
2156 memset(nla_data(at) + key_len, 0xff, key_len); /* Mask. */
2157 /* Clear non-writeable bits from otherwise writeable fields. */
2158 if (key_type == OVS_KEY_ATTR_IPV6) {
2159 struct ovs_key_ipv6 *mask = nla_data(at) + key_len;
2161 mask->ipv6_label &= htonl(0x000FFFFF);
2163 add_nested_action_end(*sfa, start);
2166 return 0;
2169 static int validate_userspace(const struct nlattr *attr)
2171 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
2172 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
2173 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
2174 [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 },
2176 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
2177 int error;
2179 error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
2180 attr, userspace_policy);
2181 if (error)
2182 return error;
2184 if (!a[OVS_USERSPACE_ATTR_PID] ||
2185 !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
2186 return -EINVAL;
2188 return 0;
2191 static int copy_action(const struct nlattr *from,
2192 struct sw_flow_actions **sfa, bool log)
2194 int totlen = NLA_ALIGN(from->nla_len);
2195 struct nlattr *to;
2197 to = reserve_sfa_size(sfa, from->nla_len, log);
2198 if (IS_ERR(to))
2199 return PTR_ERR(to);
2201 memcpy(to, from, totlen);
2202 return 0;
2205 static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2206 const struct sw_flow_key *key,
2207 int depth, struct sw_flow_actions **sfa,
2208 __be16 eth_type, __be16 vlan_tci, bool log)
2210 const struct nlattr *a;
2211 int rem, err;
2213 if (depth >= SAMPLE_ACTION_DEPTH)
2214 return -EOVERFLOW;
2216 nla_for_each_nested(a, attr, rem) {
2217 /* Expected argument lengths, (u32)-1 for variable length. */
2218 static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
2219 [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
2220 [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
2221 [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
2222 [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls),
2223 [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16),
2224 [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
2225 [OVS_ACTION_ATTR_POP_VLAN] = 0,
2226 [OVS_ACTION_ATTR_SET] = (u32)-1,
2227 [OVS_ACTION_ATTR_SET_MASKED] = (u32)-1,
2228 [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
2229 [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash),
2230 [OVS_ACTION_ATTR_CT] = (u32)-1,
2232 const struct ovs_action_push_vlan *vlan;
2233 int type = nla_type(a);
2234 bool skip_copy;
2236 if (type > OVS_ACTION_ATTR_MAX ||
2237 (action_lens[type] != nla_len(a) &&
2238 action_lens[type] != (u32)-1))
2239 return -EINVAL;
2241 skip_copy = false;
2242 switch (type) {
2243 case OVS_ACTION_ATTR_UNSPEC:
2244 return -EINVAL;
2246 case OVS_ACTION_ATTR_USERSPACE:
2247 err = validate_userspace(a);
2248 if (err)
2249 return err;
2250 break;
2252 case OVS_ACTION_ATTR_OUTPUT:
2253 if (nla_get_u32(a) >= DP_MAX_PORTS)
2254 return -EINVAL;
2255 break;
2257 case OVS_ACTION_ATTR_HASH: {
2258 const struct ovs_action_hash *act_hash = nla_data(a);
2260 switch (act_hash->hash_alg) {
2261 case OVS_HASH_ALG_L4:
2262 break;
2263 default:
2264 return -EINVAL;
2267 break;
2270 case OVS_ACTION_ATTR_POP_VLAN:
2271 vlan_tci = htons(0);
2272 break;
2274 case OVS_ACTION_ATTR_PUSH_VLAN:
2275 vlan = nla_data(a);
2276 if (vlan->vlan_tpid != htons(ETH_P_8021Q))
2277 return -EINVAL;
2278 if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
2279 return -EINVAL;
2280 vlan_tci = vlan->vlan_tci;
2281 break;
2283 case OVS_ACTION_ATTR_RECIRC:
2284 break;
2286 case OVS_ACTION_ATTR_PUSH_MPLS: {
2287 const struct ovs_action_push_mpls *mpls = nla_data(a);
2289 if (!eth_p_mpls(mpls->mpls_ethertype))
2290 return -EINVAL;
2291 /* Prohibit push MPLS other than to a white list
2292 * for packets that have a known tag order.
2294 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2295 (eth_type != htons(ETH_P_IP) &&
2296 eth_type != htons(ETH_P_IPV6) &&
2297 eth_type != htons(ETH_P_ARP) &&
2298 eth_type != htons(ETH_P_RARP) &&
2299 !eth_p_mpls(eth_type)))
2300 return -EINVAL;
2301 eth_type = mpls->mpls_ethertype;
2302 break;
2305 case OVS_ACTION_ATTR_POP_MPLS:
2306 if (vlan_tci & htons(VLAN_TAG_PRESENT) ||
2307 !eth_p_mpls(eth_type))
2308 return -EINVAL;
2310 /* Disallow subsequent L2.5+ set and mpls_pop actions
2311 * as there is no check here to ensure that the new
2312 * eth_type is valid and thus set actions could
2313 * write off the end of the packet or otherwise
2314 * corrupt it.
2316 * Support for these actions is planned using packet
2317 * recirculation.
2319 eth_type = htons(0);
2320 break;
2322 case OVS_ACTION_ATTR_SET:
2323 err = validate_set(a, key, sfa,
2324 &skip_copy, eth_type, false, log);
2325 if (err)
2326 return err;
2327 break;
2329 case OVS_ACTION_ATTR_SET_MASKED:
2330 err = validate_set(a, key, sfa,
2331 &skip_copy, eth_type, true, log);
2332 if (err)
2333 return err;
2334 break;
2336 case OVS_ACTION_ATTR_SAMPLE:
2337 err = validate_and_copy_sample(net, a, key, depth, sfa,
2338 eth_type, vlan_tci, log);
2339 if (err)
2340 return err;
2341 skip_copy = true;
2342 break;
2344 case OVS_ACTION_ATTR_CT:
2345 err = ovs_ct_copy_action(net, a, key, sfa, log);
2346 if (err)
2347 return err;
2348 skip_copy = true;
2349 break;
2351 default:
2352 OVS_NLERR(log, "Unknown Action type %d", type);
2353 return -EINVAL;
2355 if (!skip_copy) {
2356 err = copy_action(a, sfa, log);
2357 if (err)
2358 return err;
2362 if (rem > 0)
2363 return -EINVAL;
2365 return 0;
2368 /* 'key' must be the masked key. */
2369 int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
2370 const struct sw_flow_key *key,
2371 struct sw_flow_actions **sfa, bool log)
2373 int err;
2375 *sfa = nla_alloc_flow_actions(nla_len(attr), log);
2376 if (IS_ERR(*sfa))
2377 return PTR_ERR(*sfa);
2379 (*sfa)->orig_len = nla_len(attr);
2380 err = __ovs_nla_copy_actions(net, attr, key, 0, sfa, key->eth.type,
2381 key->eth.tci, log);
2382 if (err)
2383 ovs_nla_free_flow_actions(*sfa);
2385 return err;
2388 static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
2390 const struct nlattr *a;
2391 struct nlattr *start;
2392 int err = 0, rem;
2394 start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
2395 if (!start)
2396 return -EMSGSIZE;
2398 nla_for_each_nested(a, attr, rem) {
2399 int type = nla_type(a);
2400 struct nlattr *st_sample;
2402 switch (type) {
2403 case OVS_SAMPLE_ATTR_PROBABILITY:
2404 if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY,
2405 sizeof(u32), nla_data(a)))
2406 return -EMSGSIZE;
2407 break;
2408 case OVS_SAMPLE_ATTR_ACTIONS:
2409 st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
2410 if (!st_sample)
2411 return -EMSGSIZE;
2412 err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb);
2413 if (err)
2414 return err;
2415 nla_nest_end(skb, st_sample);
2416 break;
2420 nla_nest_end(skb, start);
2421 return err;
2424 static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
2426 const struct nlattr *ovs_key = nla_data(a);
2427 int key_type = nla_type(ovs_key);
2428 struct nlattr *start;
2429 int err;
2431 switch (key_type) {
2432 case OVS_KEY_ATTR_TUNNEL_INFO: {
2433 struct ovs_tunnel_info *ovs_tun = nla_data(ovs_key);
2434 struct ip_tunnel_info *tun_info = &ovs_tun->tun_dst->u.tun_info;
2436 start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2437 if (!start)
2438 return -EMSGSIZE;
2440 err = ip_tun_to_nlattr(skb, &tun_info->key,
2441 ip_tunnel_info_opts(tun_info),
2442 tun_info->options_len,
2443 ip_tunnel_info_af(tun_info));
2444 if (err)
2445 return err;
2446 nla_nest_end(skb, start);
2447 break;
2449 default:
2450 if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
2451 return -EMSGSIZE;
2452 break;
2455 return 0;
2458 static int masked_set_action_to_set_action_attr(const struct nlattr *a,
2459 struct sk_buff *skb)
2461 const struct nlattr *ovs_key = nla_data(a);
2462 struct nlattr *nla;
2463 size_t key_len = nla_len(ovs_key) / 2;
2465 /* Revert the conversion we did from a non-masked set action to
2466 * masked set action.
2468 nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
2469 if (!nla)
2470 return -EMSGSIZE;
2472 if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key)))
2473 return -EMSGSIZE;
2475 nla_nest_end(skb, nla);
2476 return 0;
2479 int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb)
2481 const struct nlattr *a;
2482 int rem, err;
2484 nla_for_each_attr(a, attr, len, rem) {
2485 int type = nla_type(a);
2487 switch (type) {
2488 case OVS_ACTION_ATTR_SET:
2489 err = set_action_to_attr(a, skb);
2490 if (err)
2491 return err;
2492 break;
2494 case OVS_ACTION_ATTR_SET_TO_MASKED:
2495 err = masked_set_action_to_set_action_attr(a, skb);
2496 if (err)
2497 return err;
2498 break;
2500 case OVS_ACTION_ATTR_SAMPLE:
2501 err = sample_action_to_attr(a, skb);
2502 if (err)
2503 return err;
2504 break;
2506 case OVS_ACTION_ATTR_CT:
2507 err = ovs_ct_action_to_attr(nla_data(a), skb);
2508 if (err)
2509 return err;
2510 break;
2512 default:
2513 if (nla_put(skb, type, nla_len(a), nla_data(a)))
2514 return -EMSGSIZE;
2515 break;
2519 return 0;