2 * QEMU rocker switch emulation - OF-DPA flow processing support
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include "qemu/osdep.h"
19 #include "qapi/error.h"
20 #include "qapi/qapi-commands-rocker.h"
22 #include "qemu/timer.h"
25 #include "rocker_hw.h"
26 #include "rocker_fp.h"
27 #include "rocker_tlv.h"
28 #include "rocker_world.h"
29 #include "rocker_desc.h"
30 #include "rocker_of_dpa.h"
32 static const MACAddr zero_mac
= { .a
= { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 } };
33 static const MACAddr ff_mac
= { .a
= { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
35 typedef struct of_dpa
{
38 GHashTable
*group_tbl
;
39 unsigned int flow_tbl_max_size
;
40 unsigned int group_tbl_max_size
;
43 /* flow_key stolen mostly from OVS
45 * Note: fields that compare with network packet header fields
46 * are stored in network order (BE) to avoid per-packet field
50 typedef struct of_dpa_flow_key
{
51 uint32_t in_pport
; /* ingress port */
52 uint32_t tunnel_id
; /* overlay tunnel id */
53 uint32_t tbl_id
; /* table id */
55 __be16 vlan_id
; /* 0 if no VLAN */
56 MACAddr src
; /* ethernet source address */
57 MACAddr dst
; /* ethernet destination address */
58 __be16 type
; /* ethernet frame type */
61 uint8_t proto
; /* IP protocol or ARP opcode */
62 uint8_t tos
; /* IP ToS */
63 uint8_t ttl
; /* IP TTL/hop limit */
64 uint8_t frag
; /* one of FRAG_TYPE_* */
69 __be32 src
; /* IP source address */
70 __be32 dst
; /* IP destination address */
74 __be16 src
; /* TCP/UDP/SCTP source port */
75 __be16 dst
; /* TCP/UDP/SCTP destination port */
76 __be16 flags
; /* TCP flags */
79 MACAddr sha
; /* ARP source hardware address */
80 MACAddr tha
; /* ARP target hardware address */
86 Ipv6Addr src
; /* IPv6 source address */
87 Ipv6Addr dst
; /* IPv6 destination address */
89 __be32 label
; /* IPv6 flow label */
91 __be16 src
; /* TCP/UDP/SCTP source port */
92 __be16 dst
; /* TCP/UDP/SCTP destination port */
93 __be16 flags
; /* TCP flags */
96 Ipv6Addr target
; /* ND target address */
97 MACAddr sll
; /* ND source link layer address */
98 MACAddr tll
; /* ND target link layer address */
102 int width
; /* how many uint64_t's in key? */
105 /* Width of key which includes field 'f' in u64s, rounded up */
106 #define FLOW_KEY_WIDTH(f) \
107 DIV_ROUND_UP(offsetof(OfDpaFlowKey, f) + sizeof_field(OfDpaFlowKey, f), \
110 typedef struct of_dpa_flow_action
{
114 uint32_t tun_log_lport
;
125 typedef struct of_dpa_flow
{
133 OfDpaFlowAction action
;
136 int64_t install_time
;
137 int64_t refresh_time
;
143 typedef struct of_dpa_flow_pkt_fields
{
145 struct eth_header
*ethhdr
;
147 struct vlan_header
*vlanhdr
;
148 struct ip_header
*ipv4hdr
;
149 struct ip6_header
*ipv6hdr
;
150 Ipv6Addr
*ipv6_src_addr
;
151 Ipv6Addr
*ipv6_dst_addr
;
152 } OfDpaFlowPktFields
;
154 typedef struct of_dpa_flow_context
{
159 struct eth_header ethhdr_rewrite
;
160 struct vlan_header vlanhdr_rewrite
;
161 struct vlan_header vlanhdr
;
163 OfDpaFlowPktFields fields
;
164 OfDpaFlowAction action_set
;
167 typedef struct of_dpa_flow_match
{
172 typedef struct of_dpa_group
{
186 uint16_t group_count
;
199 static int of_dpa_mask2prefix(__be32 mask
)
204 for (i
= 0; i
< 32; i
++) {
205 if (!(ntohl(mask
) & ((2 << i
) - 1))) {
213 #if defined(DEBUG_ROCKER)
214 static void of_dpa_flow_key_dump(OfDpaFlowKey
*key
, OfDpaFlowKey
*mask
)
216 char buf
[512], *b
= buf
, *mac
;
218 b
+= sprintf(b
, " tbl %2d", key
->tbl_id
);
220 if (key
->in_pport
|| (mask
&& mask
->in_pport
)) {
221 b
+= sprintf(b
, " in_pport %2d", key
->in_pport
);
222 if (mask
&& mask
->in_pport
!= 0xffffffff) {
223 b
+= sprintf(b
, "/0x%08x", key
->in_pport
);
227 if (key
->tunnel_id
|| (mask
&& mask
->tunnel_id
)) {
228 b
+= sprintf(b
, " tun %8d", key
->tunnel_id
);
229 if (mask
&& mask
->tunnel_id
!= 0xffffffff) {
230 b
+= sprintf(b
, "/0x%08x", key
->tunnel_id
);
234 if (key
->eth
.vlan_id
|| (mask
&& mask
->eth
.vlan_id
)) {
235 b
+= sprintf(b
, " vlan %4d", ntohs(key
->eth
.vlan_id
));
236 if (mask
&& mask
->eth
.vlan_id
!= 0xffff) {
237 b
+= sprintf(b
, "/0x%04x", ntohs(key
->eth
.vlan_id
));
241 if (memcmp(key
->eth
.src
.a
, zero_mac
.a
, ETH_ALEN
) ||
242 (mask
&& memcmp(mask
->eth
.src
.a
, zero_mac
.a
, ETH_ALEN
))) {
243 mac
= qemu_mac_strdup_printf(key
->eth
.src
.a
);
244 b
+= sprintf(b
, " src %s", mac
);
246 if (mask
&& memcmp(mask
->eth
.src
.a
, ff_mac
.a
, ETH_ALEN
)) {
247 mac
= qemu_mac_strdup_printf(mask
->eth
.src
.a
);
248 b
+= sprintf(b
, "/%s", mac
);
253 if (memcmp(key
->eth
.dst
.a
, zero_mac
.a
, ETH_ALEN
) ||
254 (mask
&& memcmp(mask
->eth
.dst
.a
, zero_mac
.a
, ETH_ALEN
))) {
255 mac
= qemu_mac_strdup_printf(key
->eth
.dst
.a
);
256 b
+= sprintf(b
, " dst %s", mac
);
258 if (mask
&& memcmp(mask
->eth
.dst
.a
, ff_mac
.a
, ETH_ALEN
)) {
259 mac
= qemu_mac_strdup_printf(mask
->eth
.dst
.a
);
260 b
+= sprintf(b
, "/%s", mac
);
265 if (key
->eth
.type
|| (mask
&& mask
->eth
.type
)) {
266 b
+= sprintf(b
, " type 0x%04x", ntohs(key
->eth
.type
));
267 if (mask
&& mask
->eth
.type
!= 0xffff) {
268 b
+= sprintf(b
, "/0x%04x", ntohs(mask
->eth
.type
));
270 switch (ntohs(key
->eth
.type
)) {
273 if (key
->ip
.proto
|| (mask
&& mask
->ip
.proto
)) {
274 b
+= sprintf(b
, " ip proto %2d", key
->ip
.proto
);
275 if (mask
&& mask
->ip
.proto
!= 0xff) {
276 b
+= sprintf(b
, "/0x%02x", mask
->ip
.proto
);
279 if (key
->ip
.tos
|| (mask
&& mask
->ip
.tos
)) {
280 b
+= sprintf(b
, " ip tos %2d", key
->ip
.tos
);
281 if (mask
&& mask
->ip
.tos
!= 0xff) {
282 b
+= sprintf(b
, "/0x%02x", mask
->ip
.tos
);
287 switch (ntohs(key
->eth
.type
)) {
289 if (key
->ipv4
.addr
.dst
|| (mask
&& mask
->ipv4
.addr
.dst
)) {
290 b
+= sprintf(b
, " dst %s",
291 inet_ntoa(*(struct in_addr
*)&key
->ipv4
.addr
.dst
));
293 b
+= sprintf(b
, "/%d",
294 of_dpa_mask2prefix(mask
->ipv4
.addr
.dst
));
301 DPRINTF("%s\n", buf
);
304 #define of_dpa_flow_key_dump(k, m)
307 static void _of_dpa_flow_match(void *key
, void *value
, void *user_data
)
309 OfDpaFlow
*flow
= value
;
310 OfDpaFlowMatch
*match
= user_data
;
311 uint64_t *k
= (uint64_t *)&flow
->key
;
312 uint64_t *m
= (uint64_t *)&flow
->mask
;
313 uint64_t *v
= (uint64_t *)&match
->value
;
316 if (flow
->key
.tbl_id
== match
->value
.tbl_id
) {
317 of_dpa_flow_key_dump(&flow
->key
, &flow
->mask
);
320 if (flow
->key
.width
> match
->value
.width
) {
324 for (i
= 0; i
< flow
->key
.width
; i
++, k
++, m
++, v
++) {
325 if ((~*k
& *m
& *v
) | (*k
& *m
& ~*v
)) {
333 flow
->priority
> match
->best
->priority
||
334 flow
->lpm
> match
->best
->lpm
) {
339 static OfDpaFlow
*of_dpa_flow_match(OfDpa
*of_dpa
, OfDpaFlowMatch
*match
)
341 DPRINTF("\nnew search\n");
342 of_dpa_flow_key_dump(&match
->value
, NULL
);
344 g_hash_table_foreach(of_dpa
->flow_tbl
, _of_dpa_flow_match
, match
);
349 static OfDpaFlow
*of_dpa_flow_find(OfDpa
*of_dpa
, uint64_t cookie
)
351 return g_hash_table_lookup(of_dpa
->flow_tbl
, &cookie
);
354 static int of_dpa_flow_add(OfDpa
*of_dpa
, OfDpaFlow
*flow
)
356 g_hash_table_insert(of_dpa
->flow_tbl
, &flow
->cookie
, flow
);
361 static void of_dpa_flow_del(OfDpa
*of_dpa
, OfDpaFlow
*flow
)
363 g_hash_table_remove(of_dpa
->flow_tbl
, &flow
->cookie
);
366 static OfDpaFlow
*of_dpa_flow_alloc(uint64_t cookie
)
369 int64_t now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) / 1000;
371 flow
= g_new0(OfDpaFlow
, 1);
373 flow
->cookie
= cookie
;
374 flow
->mask
.tbl_id
= 0xffffffff;
376 flow
->stats
.install_time
= flow
->stats
.refresh_time
= now
;
381 static void of_dpa_flow_pkt_hdr_reset(OfDpaFlowContext
*fc
)
383 OfDpaFlowPktFields
*fields
= &fc
->fields
;
385 fc
->iov
[0].iov_base
= fields
->ethhdr
;
386 fc
->iov
[0].iov_len
= sizeof(struct eth_header
);
387 fc
->iov
[1].iov_base
= fields
->vlanhdr
;
388 fc
->iov
[1].iov_len
= fields
->vlanhdr
? sizeof(struct vlan_header
) : 0;
391 static void of_dpa_flow_pkt_parse(OfDpaFlowContext
*fc
,
392 const struct iovec
*iov
, int iovcnt
)
394 OfDpaFlowPktFields
*fields
= &fc
->fields
;
398 sofar
+= sizeof(struct eth_header
);
399 if (iov
->iov_len
< sofar
) {
400 DPRINTF("flow_pkt_parse underrun on eth_header\n");
404 fields
->ethhdr
= iov
->iov_base
;
405 fields
->h_proto
= &fields
->ethhdr
->h_proto
;
407 if (ntohs(*fields
->h_proto
) == ETH_P_VLAN
) {
408 sofar
+= sizeof(struct vlan_header
);
409 if (iov
->iov_len
< sofar
) {
410 DPRINTF("flow_pkt_parse underrun on vlan_header\n");
413 fields
->vlanhdr
= (struct vlan_header
*)(fields
->ethhdr
+ 1);
414 fields
->h_proto
= &fields
->vlanhdr
->h_proto
;
417 switch (ntohs(*fields
->h_proto
)) {
419 sofar
+= sizeof(struct ip_header
);
420 if (iov
->iov_len
< sofar
) {
421 DPRINTF("flow_pkt_parse underrun on ip_header\n");
424 fields
->ipv4hdr
= (struct ip_header
*)(fields
->h_proto
+ 1);
427 sofar
+= sizeof(struct ip6_header
);
428 if (iov
->iov_len
< sofar
) {
429 DPRINTF("flow_pkt_parse underrun on ip6_header\n");
432 fields
->ipv6hdr
= (struct ip6_header
*)(fields
->h_proto
+ 1);
436 /* To facilitate (potential) VLAN tag insertion, Make a
437 * copy of the iov and insert two new vectors at the
438 * beginning for eth hdr and vlan hdr. No data is copied,
442 of_dpa_flow_pkt_hdr_reset(fc
);
444 fc
->iov
[2].iov_base
= fields
->h_proto
+ 1;
445 fc
->iov
[2].iov_len
= iov
->iov_len
- fc
->iov
[0].iov_len
- fc
->iov
[1].iov_len
;
447 for (i
= 1; i
< iovcnt
; i
++) {
448 fc
->iov
[i
+2] = iov
[i
];
451 fc
->iovcnt
= iovcnt
+ 2;
454 static void of_dpa_flow_pkt_insert_vlan(OfDpaFlowContext
*fc
, __be16 vlan_id
)
456 OfDpaFlowPktFields
*fields
= &fc
->fields
;
457 uint16_t h_proto
= fields
->ethhdr
->h_proto
;
459 if (fields
->vlanhdr
) {
460 DPRINTF("flow_pkt_insert_vlan packet already has vlan\n");
464 fields
->ethhdr
->h_proto
= htons(ETH_P_VLAN
);
465 fields
->vlanhdr
= &fc
->vlanhdr
;
466 fields
->vlanhdr
->h_tci
= vlan_id
;
467 fields
->vlanhdr
->h_proto
= h_proto
;
468 fields
->h_proto
= &fields
->vlanhdr
->h_proto
;
470 fc
->iov
[1].iov_base
= fields
->vlanhdr
;
471 fc
->iov
[1].iov_len
= sizeof(struct vlan_header
);
474 static void of_dpa_flow_pkt_strip_vlan(OfDpaFlowContext
*fc
)
476 OfDpaFlowPktFields
*fields
= &fc
->fields
;
478 if (!fields
->vlanhdr
) {
482 fc
->iov
[0].iov_len
-= sizeof(fields
->ethhdr
->h_proto
);
483 fc
->iov
[1].iov_base
= fields
->h_proto
;
484 fc
->iov
[1].iov_len
= sizeof(fields
->ethhdr
->h_proto
);
487 static void of_dpa_flow_pkt_hdr_rewrite(OfDpaFlowContext
*fc
,
488 uint8_t *src_mac
, uint8_t *dst_mac
,
491 OfDpaFlowPktFields
*fields
= &fc
->fields
;
493 if (src_mac
|| dst_mac
) {
494 memcpy(&fc
->ethhdr_rewrite
, fields
->ethhdr
, sizeof(struct eth_header
));
495 if (src_mac
&& memcmp(src_mac
, zero_mac
.a
, ETH_ALEN
)) {
496 memcpy(fc
->ethhdr_rewrite
.h_source
, src_mac
, ETH_ALEN
);
498 if (dst_mac
&& memcmp(dst_mac
, zero_mac
.a
, ETH_ALEN
)) {
499 memcpy(fc
->ethhdr_rewrite
.h_dest
, dst_mac
, ETH_ALEN
);
501 fc
->iov
[0].iov_base
= &fc
->ethhdr_rewrite
;
504 if (vlan_id
&& fields
->vlanhdr
) {
505 fc
->vlanhdr_rewrite
= fc
->vlanhdr
;
506 fc
->vlanhdr_rewrite
.h_tci
= vlan_id
;
507 fc
->iov
[1].iov_base
= &fc
->vlanhdr_rewrite
;
511 static void of_dpa_flow_ig_tbl(OfDpaFlowContext
*fc
, uint32_t tbl_id
);
513 static void of_dpa_ig_port_build_match(OfDpaFlowContext
*fc
,
514 OfDpaFlowMatch
*match
)
516 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
517 match
->value
.in_pport
= fc
->in_pport
;
518 match
->value
.width
= FLOW_KEY_WIDTH(tbl_id
);
521 static void of_dpa_ig_port_miss(OfDpaFlowContext
*fc
)
525 /* The default on miss is for packets from physical ports
526 * to go to the VLAN Flow Table. There is no default rule
527 * for packets from logical ports, which are dropped on miss.
530 if (fp_port_from_pport(fc
->in_pport
, &port
)) {
531 of_dpa_flow_ig_tbl(fc
, ROCKER_OF_DPA_TABLE_ID_VLAN
);
535 static void of_dpa_vlan_build_match(OfDpaFlowContext
*fc
,
536 OfDpaFlowMatch
*match
)
538 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
539 match
->value
.in_pport
= fc
->in_pport
;
540 if (fc
->fields
.vlanhdr
) {
541 match
->value
.eth
.vlan_id
= fc
->fields
.vlanhdr
->h_tci
;
543 match
->value
.width
= FLOW_KEY_WIDTH(eth
.vlan_id
);
546 static void of_dpa_vlan_insert(OfDpaFlowContext
*fc
,
549 if (flow
->action
.apply
.new_vlan_id
) {
550 of_dpa_flow_pkt_insert_vlan(fc
, flow
->action
.apply
.new_vlan_id
);
554 static void of_dpa_term_mac_build_match(OfDpaFlowContext
*fc
,
555 OfDpaFlowMatch
*match
)
557 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
558 match
->value
.in_pport
= fc
->in_pport
;
559 match
->value
.eth
.type
= *fc
->fields
.h_proto
;
560 match
->value
.eth
.vlan_id
= fc
->fields
.vlanhdr
->h_tci
;
561 memcpy(match
->value
.eth
.dst
.a
, fc
->fields
.ethhdr
->h_dest
,
562 sizeof(match
->value
.eth
.dst
.a
));
563 match
->value
.width
= FLOW_KEY_WIDTH(eth
.type
);
566 static void of_dpa_term_mac_miss(OfDpaFlowContext
*fc
)
568 of_dpa_flow_ig_tbl(fc
, ROCKER_OF_DPA_TABLE_ID_BRIDGING
);
571 static void of_dpa_apply_actions(OfDpaFlowContext
*fc
,
574 fc
->action_set
.apply
.copy_to_cpu
= flow
->action
.apply
.copy_to_cpu
;
575 fc
->action_set
.apply
.vlan_id
= flow
->key
.eth
.vlan_id
;
578 static void of_dpa_bridging_build_match(OfDpaFlowContext
*fc
,
579 OfDpaFlowMatch
*match
)
581 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
582 if (fc
->fields
.vlanhdr
) {
583 match
->value
.eth
.vlan_id
= fc
->fields
.vlanhdr
->h_tci
;
584 } else if (fc
->tunnel_id
) {
585 match
->value
.tunnel_id
= fc
->tunnel_id
;
587 memcpy(match
->value
.eth
.dst
.a
, fc
->fields
.ethhdr
->h_dest
,
588 sizeof(match
->value
.eth
.dst
.a
));
589 match
->value
.width
= FLOW_KEY_WIDTH(eth
.dst
);
592 static void of_dpa_bridging_learn(OfDpaFlowContext
*fc
,
595 OfDpaFlowMatch match
= { { 0, }, };
599 int64_t now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) / 1000;
600 int64_t refresh_delay
= 1;
602 /* Do a lookup in bridge table by src_mac/vlan */
604 addr
= fc
->fields
.ethhdr
->h_source
;
605 vlan_id
= fc
->fields
.vlanhdr
->h_tci
;
607 match
.value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
608 match
.value
.eth
.vlan_id
= vlan_id
;
609 memcpy(match
.value
.eth
.dst
.a
, addr
, sizeof(match
.value
.eth
.dst
.a
));
610 match
.value
.width
= FLOW_KEY_WIDTH(eth
.dst
);
612 flow
= of_dpa_flow_match(fc
->of_dpa
, &match
);
614 if (!memcmp(flow
->mask
.eth
.dst
.a
, ff_mac
.a
,
615 sizeof(flow
->mask
.eth
.dst
.a
))) {
616 /* src_mac/vlan already learned; if in_port and out_port
617 * don't match, the end station has moved and the port
619 /* XXX implement the in_port/out_port check */
620 if (now
- flow
->stats
.refresh_time
< refresh_delay
) {
623 flow
->stats
.refresh_time
= now
;
627 /* Let driver know about mac/vlan. This may be a new mac/vlan
628 * or a refresh of existing mac/vlan that's been hit after the
632 rocker_event_mac_vlan_seen(world_rocker(fc
->of_dpa
->world
),
633 fc
->in_pport
, addr
, vlan_id
);
636 static void of_dpa_bridging_miss(OfDpaFlowContext
*fc
)
638 of_dpa_bridging_learn(fc
, NULL
);
639 of_dpa_flow_ig_tbl(fc
, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
);
642 static void of_dpa_bridging_action_write(OfDpaFlowContext
*fc
,
645 if (flow
->action
.write
.group_id
!= ROCKER_GROUP_NONE
) {
646 fc
->action_set
.write
.group_id
= flow
->action
.write
.group_id
;
648 fc
->action_set
.write
.tun_log_lport
= flow
->action
.write
.tun_log_lport
;
651 static void of_dpa_unicast_routing_build_match(OfDpaFlowContext
*fc
,
652 OfDpaFlowMatch
*match
)
654 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
655 match
->value
.eth
.type
= *fc
->fields
.h_proto
;
656 if (fc
->fields
.ipv4hdr
) {
657 match
->value
.ipv4
.addr
.dst
= fc
->fields
.ipv4hdr
->ip_dst
;
659 if (fc
->fields
.ipv6_dst_addr
) {
660 memcpy(&match
->value
.ipv6
.addr
.dst
, fc
->fields
.ipv6_dst_addr
,
661 sizeof(match
->value
.ipv6
.addr
.dst
));
663 match
->value
.width
= FLOW_KEY_WIDTH(ipv6
.addr
.dst
);
666 static void of_dpa_unicast_routing_miss(OfDpaFlowContext
*fc
)
668 of_dpa_flow_ig_tbl(fc
, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
);
671 static void of_dpa_unicast_routing_action_write(OfDpaFlowContext
*fc
,
674 if (flow
->action
.write
.group_id
!= ROCKER_GROUP_NONE
) {
675 fc
->action_set
.write
.group_id
= flow
->action
.write
.group_id
;
680 of_dpa_multicast_routing_build_match(OfDpaFlowContext
*fc
,
681 OfDpaFlowMatch
*match
)
683 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
684 match
->value
.eth
.type
= *fc
->fields
.h_proto
;
685 match
->value
.eth
.vlan_id
= fc
->fields
.vlanhdr
->h_tci
;
686 if (fc
->fields
.ipv4hdr
) {
687 match
->value
.ipv4
.addr
.src
= fc
->fields
.ipv4hdr
->ip_src
;
688 match
->value
.ipv4
.addr
.dst
= fc
->fields
.ipv4hdr
->ip_dst
;
690 if (fc
->fields
.ipv6_src_addr
) {
691 memcpy(&match
->value
.ipv6
.addr
.src
, fc
->fields
.ipv6_src_addr
,
692 sizeof(match
->value
.ipv6
.addr
.src
));
694 if (fc
->fields
.ipv6_dst_addr
) {
695 memcpy(&match
->value
.ipv6
.addr
.dst
, fc
->fields
.ipv6_dst_addr
,
696 sizeof(match
->value
.ipv6
.addr
.dst
));
698 match
->value
.width
= FLOW_KEY_WIDTH(ipv6
.addr
.dst
);
701 static void of_dpa_multicast_routing_miss(OfDpaFlowContext
*fc
)
703 of_dpa_flow_ig_tbl(fc
, ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
);
707 of_dpa_multicast_routing_action_write(OfDpaFlowContext
*fc
,
710 if (flow
->action
.write
.group_id
!= ROCKER_GROUP_NONE
) {
711 fc
->action_set
.write
.group_id
= flow
->action
.write
.group_id
;
713 fc
->action_set
.write
.vlan_id
= flow
->action
.write
.vlan_id
;
716 static void of_dpa_acl_build_match(OfDpaFlowContext
*fc
,
717 OfDpaFlowMatch
*match
)
719 match
->value
.tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
720 match
->value
.in_pport
= fc
->in_pport
;
721 memcpy(match
->value
.eth
.src
.a
, fc
->fields
.ethhdr
->h_source
,
722 sizeof(match
->value
.eth
.src
.a
));
723 memcpy(match
->value
.eth
.dst
.a
, fc
->fields
.ethhdr
->h_dest
,
724 sizeof(match
->value
.eth
.dst
.a
));
725 match
->value
.eth
.type
= *fc
->fields
.h_proto
;
726 match
->value
.eth
.vlan_id
= fc
->fields
.vlanhdr
->h_tci
;
727 match
->value
.width
= FLOW_KEY_WIDTH(eth
.type
);
728 if (fc
->fields
.ipv4hdr
) {
729 match
->value
.ip
.proto
= fc
->fields
.ipv4hdr
->ip_p
;
730 match
->value
.ip
.tos
= fc
->fields
.ipv4hdr
->ip_tos
;
731 match
->value
.width
= FLOW_KEY_WIDTH(ip
.tos
);
732 } else if (fc
->fields
.ipv6hdr
) {
733 match
->value
.ip
.proto
=
734 fc
->fields
.ipv6hdr
->ip6_ctlun
.ip6_un1
.ip6_un1_nxt
;
735 match
->value
.ip
.tos
= 0; /* XXX what goes here? */
736 match
->value
.width
= FLOW_KEY_WIDTH(ip
.tos
);
740 static void of_dpa_eg(OfDpaFlowContext
*fc
);
741 static void of_dpa_acl_hit(OfDpaFlowContext
*fc
,
747 static void of_dpa_acl_action_write(OfDpaFlowContext
*fc
,
750 if (flow
->action
.write
.group_id
!= ROCKER_GROUP_NONE
) {
751 fc
->action_set
.write
.group_id
= flow
->action
.write
.group_id
;
755 static void of_dpa_drop(OfDpaFlowContext
*fc
)
760 static OfDpaGroup
*of_dpa_group_find(OfDpa
*of_dpa
,
763 return g_hash_table_lookup(of_dpa
->group_tbl
, &group_id
);
766 static int of_dpa_group_add(OfDpa
*of_dpa
, OfDpaGroup
*group
)
768 g_hash_table_insert(of_dpa
->group_tbl
, &group
->id
, group
);
774 static int of_dpa_group_mod(OfDpa
*of_dpa
, OfDpaGroup
*group
)
776 OfDpaGroup
*old_group
= of_dpa_group_find(of_dpa
, group
->id
);
788 static int of_dpa_group_del(OfDpa
*of_dpa
, OfDpaGroup
*group
)
790 g_hash_table_remove(of_dpa
->group_tbl
, &group
->id
);
796 static int of_dpa_group_get_stats(OfDpa
*of_dpa
, uint32_t id
)
798 OfDpaGroup
*group
= of_dpa_group_find(of_dpa
, id
);
804 /* XXX get/return stats */
810 static OfDpaGroup
*of_dpa_group_alloc(uint32_t id
)
812 OfDpaGroup
*group
= g_new0(OfDpaGroup
, 1);
819 static void of_dpa_output_l2_interface(OfDpaFlowContext
*fc
,
822 uint8_t copy_to_cpu
= fc
->action_set
.apply
.copy_to_cpu
;
824 if (group
->l2_interface
.pop_vlan
) {
825 of_dpa_flow_pkt_strip_vlan(fc
);
828 /* Note: By default, and as per the OpenFlow 1.3.1
829 * specification, a packet cannot be forwarded back
830 * to the IN_PORT from which it came in. An action
831 * bucket that specifies the particular packet's
832 * egress port is not evaluated.
835 if (group
->l2_interface
.out_pport
== 0) {
836 rx_produce(fc
->of_dpa
->world
, fc
->in_pport
, fc
->iov
, fc
->iovcnt
,
838 } else if (group
->l2_interface
.out_pport
!= fc
->in_pport
) {
839 rocker_port_eg(world_rocker(fc
->of_dpa
->world
),
840 group
->l2_interface
.out_pport
,
841 fc
->iov
, fc
->iovcnt
);
845 static void of_dpa_output_l2_rewrite(OfDpaFlowContext
*fc
,
848 OfDpaGroup
*l2_group
=
849 of_dpa_group_find(fc
->of_dpa
, group
->l2_rewrite
.group_id
);
855 of_dpa_flow_pkt_hdr_rewrite(fc
, group
->l2_rewrite
.src_mac
.a
,
856 group
->l2_rewrite
.dst_mac
.a
,
857 group
->l2_rewrite
.vlan_id
);
858 of_dpa_output_l2_interface(fc
, l2_group
);
861 static void of_dpa_output_l2_flood(OfDpaFlowContext
*fc
,
864 OfDpaGroup
*l2_group
;
867 for (i
= 0; i
< group
->l2_flood
.group_count
; i
++) {
868 of_dpa_flow_pkt_hdr_reset(fc
);
869 l2_group
= of_dpa_group_find(fc
->of_dpa
, group
->l2_flood
.group_ids
[i
]);
873 switch (ROCKER_GROUP_TYPE_GET(l2_group
->id
)) {
874 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
875 of_dpa_output_l2_interface(fc
, l2_group
);
877 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
878 of_dpa_output_l2_rewrite(fc
, l2_group
);
884 static void of_dpa_output_l3_unicast(OfDpaFlowContext
*fc
, OfDpaGroup
*group
)
886 OfDpaGroup
*l2_group
=
887 of_dpa_group_find(fc
->of_dpa
, group
->l3_unicast
.group_id
);
893 of_dpa_flow_pkt_hdr_rewrite(fc
, group
->l3_unicast
.src_mac
.a
,
894 group
->l3_unicast
.dst_mac
.a
,
895 group
->l3_unicast
.vlan_id
);
896 /* XXX need ttl_check */
897 of_dpa_output_l2_interface(fc
, l2_group
);
900 static void of_dpa_eg(OfDpaFlowContext
*fc
)
902 OfDpaFlowAction
*set
= &fc
->action_set
;
906 /* send a copy of pkt to CPU (controller)? */
908 if (set
->apply
.copy_to_cpu
) {
909 group_id
= ROCKER_GROUP_L2_INTERFACE(set
->apply
.vlan_id
, 0);
910 group
= of_dpa_group_find(fc
->of_dpa
, group_id
);
912 of_dpa_output_l2_interface(fc
, group
);
913 of_dpa_flow_pkt_hdr_reset(fc
);
917 /* process group write actions */
919 if (!set
->write
.group_id
) {
923 group
= of_dpa_group_find(fc
->of_dpa
, set
->write
.group_id
);
928 switch (ROCKER_GROUP_TYPE_GET(group
->id
)) {
929 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
930 of_dpa_output_l2_interface(fc
, group
);
932 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
933 of_dpa_output_l2_rewrite(fc
, group
);
935 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
936 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
937 of_dpa_output_l2_flood(fc
, group
);
939 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
940 of_dpa_output_l3_unicast(fc
, group
);
945 typedef struct of_dpa_flow_tbl_ops
{
946 void (*build_match
)(OfDpaFlowContext
*fc
, OfDpaFlowMatch
*match
);
947 void (*hit
)(OfDpaFlowContext
*fc
, OfDpaFlow
*flow
);
948 void (*miss
)(OfDpaFlowContext
*fc
);
949 void (*hit_no_goto
)(OfDpaFlowContext
*fc
);
950 void (*action_apply
)(OfDpaFlowContext
*fc
, OfDpaFlow
*flow
);
951 void (*action_write
)(OfDpaFlowContext
*fc
, OfDpaFlow
*flow
);
954 static OfDpaFlowTblOps of_dpa_tbl_ops
[] = {
955 [ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
] = {
956 .build_match
= of_dpa_ig_port_build_match
,
957 .miss
= of_dpa_ig_port_miss
,
958 .hit_no_goto
= of_dpa_drop
,
960 [ROCKER_OF_DPA_TABLE_ID_VLAN
] = {
961 .build_match
= of_dpa_vlan_build_match
,
962 .hit_no_goto
= of_dpa_drop
,
963 .action_apply
= of_dpa_vlan_insert
,
965 [ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
] = {
966 .build_match
= of_dpa_term_mac_build_match
,
967 .miss
= of_dpa_term_mac_miss
,
968 .hit_no_goto
= of_dpa_drop
,
969 .action_apply
= of_dpa_apply_actions
,
971 [ROCKER_OF_DPA_TABLE_ID_BRIDGING
] = {
972 .build_match
= of_dpa_bridging_build_match
,
973 .hit
= of_dpa_bridging_learn
,
974 .miss
= of_dpa_bridging_miss
,
975 .hit_no_goto
= of_dpa_drop
,
976 .action_apply
= of_dpa_apply_actions
,
977 .action_write
= of_dpa_bridging_action_write
,
979 [ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
] = {
980 .build_match
= of_dpa_unicast_routing_build_match
,
981 .miss
= of_dpa_unicast_routing_miss
,
982 .hit_no_goto
= of_dpa_drop
,
983 .action_write
= of_dpa_unicast_routing_action_write
,
985 [ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
] = {
986 .build_match
= of_dpa_multicast_routing_build_match
,
987 .miss
= of_dpa_multicast_routing_miss
,
988 .hit_no_goto
= of_dpa_drop
,
989 .action_write
= of_dpa_multicast_routing_action_write
,
991 [ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
] = {
992 .build_match
= of_dpa_acl_build_match
,
993 .hit
= of_dpa_acl_hit
,
995 .action_apply
= of_dpa_apply_actions
,
996 .action_write
= of_dpa_acl_action_write
,
1000 static void of_dpa_flow_ig_tbl(OfDpaFlowContext
*fc
, uint32_t tbl_id
)
1002 OfDpaFlowTblOps
*ops
= &of_dpa_tbl_ops
[tbl_id
];
1003 OfDpaFlowMatch match
= { { 0, }, };
1006 if (ops
->build_match
) {
1007 ops
->build_match(fc
, &match
);
1012 flow
= of_dpa_flow_match(fc
->of_dpa
, &match
);
1022 if (ops
->action_apply
) {
1023 ops
->action_apply(fc
, flow
);
1026 if (ops
->action_write
) {
1027 ops
->action_write(fc
, flow
);
1034 if (flow
->action
.goto_tbl
) {
1035 of_dpa_flow_ig_tbl(fc
, flow
->action
.goto_tbl
);
1036 } else if (ops
->hit_no_goto
) {
1037 ops
->hit_no_goto(fc
);
1043 static ssize_t
of_dpa_ig(World
*world
, uint32_t pport
,
1044 const struct iovec
*iov
, int iovcnt
)
1046 struct iovec iov_copy
[iovcnt
+ 2];
1047 OfDpaFlowContext fc
= {
1048 .of_dpa
= world_private(world
),
1051 .iovcnt
= iovcnt
+ 2,
1054 of_dpa_flow_pkt_parse(&fc
, iov
, iovcnt
);
1055 of_dpa_flow_ig_tbl(&fc
, ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
);
1057 return iov_size(iov
, iovcnt
);
1060 #define ROCKER_TUNNEL_LPORT 0x00010000
1062 static int of_dpa_cmd_add_ig_port(OfDpaFlow
*flow
, RockerTlv
**flow_tlvs
)
1064 OfDpaFlowKey
*key
= &flow
->key
;
1065 OfDpaFlowKey
*mask
= &flow
->mask
;
1066 OfDpaFlowAction
*action
= &flow
->action
;
1067 bool overlay_tunnel
;
1069 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
] ||
1070 !flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]) {
1071 return -ROCKER_EINVAL
;
1074 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
;
1075 key
->width
= FLOW_KEY_WIDTH(tbl_id
);
1077 key
->in_pport
= rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
]);
1078 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT_MASK
]) {
1080 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT_MASK
]);
1083 overlay_tunnel
= !!(key
->in_pport
& ROCKER_TUNNEL_LPORT
);
1086 rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]);
1088 if (!overlay_tunnel
&& action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_VLAN
) {
1089 return -ROCKER_EINVAL
;
1092 if (overlay_tunnel
&& action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_BRIDGING
) {
1093 return -ROCKER_EINVAL
;
1099 static int of_dpa_cmd_add_vlan(OfDpaFlow
*flow
, RockerTlv
**flow_tlvs
)
1101 OfDpaFlowKey
*key
= &flow
->key
;
1102 OfDpaFlowKey
*mask
= &flow
->mask
;
1103 OfDpaFlowAction
*action
= &flow
->action
;
1107 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
] ||
1108 !flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]) {
1109 DPRINTF("Must give in_pport and vlan_id to install VLAN tbl entry\n");
1110 return -ROCKER_EINVAL
;
1113 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_VLAN
;
1114 key
->width
= FLOW_KEY_WIDTH(eth
.vlan_id
);
1116 key
->in_pport
= rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
]);
1117 if (!fp_port_from_pport(key
->in_pport
, &port
)) {
1118 DPRINTF("in_pport (%d) not a front-panel port\n", key
->in_pport
);
1119 return -ROCKER_EINVAL
;
1121 mask
->in_pport
= 0xffffffff;
1123 key
->eth
.vlan_id
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
1125 if (flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID_MASK
]) {
1127 rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID_MASK
]);
1130 if (key
->eth
.vlan_id
) {
1131 untagged
= false; /* filtering */
1136 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]) {
1138 rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]);
1139 if (action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
) {
1140 DPRINTF("Goto tbl (%d) must be TERM_MAC\n", action
->goto_tbl
);
1141 return -ROCKER_EINVAL
;
1146 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_NEW_VLAN_ID
]) {
1147 DPRINTF("Must specify new vlan_id if untagged\n");
1148 return -ROCKER_EINVAL
;
1150 action
->apply
.new_vlan_id
=
1151 rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_NEW_VLAN_ID
]);
1152 if (1 > ntohs(action
->apply
.new_vlan_id
) ||
1153 ntohs(action
->apply
.new_vlan_id
) > 4095) {
1154 DPRINTF("New vlan_id (%d) must be between 1 and 4095\n",
1155 ntohs(action
->apply
.new_vlan_id
));
1156 return -ROCKER_EINVAL
;
1163 static int of_dpa_cmd_add_term_mac(OfDpaFlow
*flow
, RockerTlv
**flow_tlvs
)
1165 OfDpaFlowKey
*key
= &flow
->key
;
1166 OfDpaFlowKey
*mask
= &flow
->mask
;
1167 OfDpaFlowAction
*action
= &flow
->action
;
1168 const MACAddr ipv4_mcast
= { .a
= { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 } };
1169 const MACAddr ipv4_mask
= { .a
= { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 } };
1170 const MACAddr ipv6_mcast
= { .a
= { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 } };
1171 const MACAddr ipv6_mask
= { .a
= { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 } };
1173 bool unicast
= false;
1174 bool multicast
= false;
1176 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
] ||
1177 !flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT_MASK
] ||
1178 !flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
] ||
1179 !flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
] ||
1180 !flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC_MASK
] ||
1181 !flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
] ||
1182 !flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID_MASK
]) {
1183 return -ROCKER_EINVAL
;
1186 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
;
1187 key
->width
= FLOW_KEY_WIDTH(eth
.type
);
1189 key
->in_pport
= rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
]);
1190 if (!fp_port_from_pport(key
->in_pport
, &port
)) {
1191 return -ROCKER_EINVAL
;
1194 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT_MASK
]);
1196 key
->eth
.type
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
]);
1197 if (key
->eth
.type
!= htons(0x0800) && key
->eth
.type
!= htons(0x86dd)) {
1198 return -ROCKER_EINVAL
;
1200 mask
->eth
.type
= htons(0xffff);
1202 memcpy(key
->eth
.dst
.a
,
1203 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]),
1204 sizeof(key
->eth
.dst
.a
));
1205 memcpy(mask
->eth
.dst
.a
,
1206 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC_MASK
]),
1207 sizeof(mask
->eth
.dst
.a
));
1209 if ((key
->eth
.dst
.a
[0] & 0x01) == 0x00) {
1213 /* only two wildcard rules are acceptable for IPv4 and IPv6 multicast */
1214 if (memcmp(key
->eth
.dst
.a
, ipv4_mcast
.a
, sizeof(key
->eth
.dst
.a
)) == 0 &&
1215 memcmp(mask
->eth
.dst
.a
, ipv4_mask
.a
, sizeof(mask
->eth
.dst
.a
)) == 0) {
1218 if (memcmp(key
->eth
.dst
.a
, ipv6_mcast
.a
, sizeof(key
->eth
.dst
.a
)) == 0 &&
1219 memcmp(mask
->eth
.dst
.a
, ipv6_mask
.a
, sizeof(mask
->eth
.dst
.a
)) == 0) {
1223 if (!unicast
&& !multicast
) {
1224 return -ROCKER_EINVAL
;
1227 key
->eth
.vlan_id
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
1229 rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID_MASK
]);
1231 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]) {
1233 rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]);
1235 if (action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
&&
1236 action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
) {
1237 return -ROCKER_EINVAL
;
1241 action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
) {
1242 return -ROCKER_EINVAL
;
1246 action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
) {
1247 return -ROCKER_EINVAL
;
1251 if (flow_tlvs
[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
]) {
1252 action
->apply
.copy_to_cpu
=
1253 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
]);
1259 static int of_dpa_cmd_add_bridging(OfDpaFlow
*flow
, RockerTlv
**flow_tlvs
)
1261 OfDpaFlowKey
*key
= &flow
->key
;
1262 OfDpaFlowKey
*mask
= &flow
->mask
;
1263 OfDpaFlowAction
*action
= &flow
->action
;
1264 bool unicast
= false;
1265 bool dst_mac
= false;
1266 bool dst_mac_mask
= false;
1268 BRIDGING_MODE_UNKNOWN
,
1269 BRIDGING_MODE_VLAN_UCAST
,
1270 BRIDGING_MODE_VLAN_MCAST
,
1271 BRIDGING_MODE_VLAN_DFLT
,
1272 BRIDGING_MODE_TUNNEL_UCAST
,
1273 BRIDGING_MODE_TUNNEL_MCAST
,
1274 BRIDGING_MODE_TUNNEL_DFLT
,
1275 } mode
= BRIDGING_MODE_UNKNOWN
;
1277 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_BRIDGING
;
1279 if (flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]) {
1281 rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
1282 mask
->eth
.vlan_id
= 0xffff;
1283 key
->width
= FLOW_KEY_WIDTH(eth
.vlan_id
);
1286 if (flow_tlvs
[ROCKER_TLV_OF_DPA_TUNNEL_ID
]) {
1288 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_TUNNEL_ID
]);
1289 mask
->tunnel_id
= 0xffffffff;
1290 key
->width
= FLOW_KEY_WIDTH(tunnel_id
);
1293 /* can't do VLAN bridging and tunnel bridging at same time */
1294 if (key
->eth
.vlan_id
&& key
->tunnel_id
) {
1295 DPRINTF("can't do VLAN bridging and tunnel bridging at same time\n");
1296 return -ROCKER_EINVAL
;
1299 if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]) {
1300 memcpy(key
->eth
.dst
.a
,
1301 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]),
1302 sizeof(key
->eth
.dst
.a
));
1303 key
->width
= FLOW_KEY_WIDTH(eth
.dst
);
1305 unicast
= (key
->eth
.dst
.a
[0] & 0x01) == 0x00;
1308 if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC_MASK
]) {
1309 memcpy(mask
->eth
.dst
.a
,
1310 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC_MASK
]),
1311 sizeof(mask
->eth
.dst
.a
));
1312 key
->width
= FLOW_KEY_WIDTH(eth
.dst
);
1313 dst_mac_mask
= true;
1314 } else if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]) {
1315 memcpy(mask
->eth
.dst
.a
, ff_mac
.a
, sizeof(mask
->eth
.dst
.a
));
1318 if (key
->eth
.vlan_id
) {
1319 if (dst_mac
&& !dst_mac_mask
) {
1320 mode
= unicast
? BRIDGING_MODE_VLAN_UCAST
:
1321 BRIDGING_MODE_VLAN_MCAST
;
1322 } else if ((dst_mac
&& dst_mac_mask
) || !dst_mac
) {
1323 mode
= BRIDGING_MODE_VLAN_DFLT
;
1325 } else if (key
->tunnel_id
) {
1326 if (dst_mac
&& !dst_mac_mask
) {
1327 mode
= unicast
? BRIDGING_MODE_TUNNEL_UCAST
:
1328 BRIDGING_MODE_TUNNEL_MCAST
;
1329 } else if ((dst_mac
&& dst_mac_mask
) || !dst_mac
) {
1330 mode
= BRIDGING_MODE_TUNNEL_DFLT
;
1334 if (mode
== BRIDGING_MODE_UNKNOWN
) {
1335 DPRINTF("Unknown bridging mode\n");
1336 return -ROCKER_EINVAL
;
1339 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]) {
1341 rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]);
1342 if (action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
) {
1343 DPRINTF("Briding goto tbl must be ACL policy\n");
1344 return -ROCKER_EINVAL
;
1348 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]) {
1349 action
->write
.group_id
=
1350 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]);
1352 case BRIDGING_MODE_VLAN_UCAST
:
1353 if (ROCKER_GROUP_TYPE_GET(action
->write
.group_id
) !=
1354 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
) {
1355 DPRINTF("Bridging mode vlan ucast needs L2 "
1356 "interface group (0x%08x)\n",
1357 action
->write
.group_id
);
1358 return -ROCKER_EINVAL
;
1361 case BRIDGING_MODE_VLAN_MCAST
:
1362 if (ROCKER_GROUP_TYPE_GET(action
->write
.group_id
) !=
1363 ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
) {
1364 DPRINTF("Bridging mode vlan mcast needs L2 "
1365 "mcast group (0x%08x)\n",
1366 action
->write
.group_id
);
1367 return -ROCKER_EINVAL
;
1370 case BRIDGING_MODE_VLAN_DFLT
:
1371 if (ROCKER_GROUP_TYPE_GET(action
->write
.group_id
) !=
1372 ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
) {
1373 DPRINTF("Bridging mode vlan dflt needs L2 "
1374 "flood group (0x%08x)\n",
1375 action
->write
.group_id
);
1376 return -ROCKER_EINVAL
;
1379 case BRIDGING_MODE_TUNNEL_MCAST
:
1380 if (ROCKER_GROUP_TYPE_GET(action
->write
.group_id
) !=
1381 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY
) {
1382 DPRINTF("Bridging mode tunnel mcast needs L2 "
1383 "overlay group (0x%08x)\n",
1384 action
->write
.group_id
);
1385 return -ROCKER_EINVAL
;
1388 case BRIDGING_MODE_TUNNEL_DFLT
:
1389 if (ROCKER_GROUP_TYPE_GET(action
->write
.group_id
) !=
1390 ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY
) {
1391 DPRINTF("Bridging mode tunnel dflt needs L2 "
1392 "overlay group (0x%08x)\n",
1393 action
->write
.group_id
);
1394 return -ROCKER_EINVAL
;
1398 return -ROCKER_EINVAL
;
1402 if (flow_tlvs
[ROCKER_TLV_OF_DPA_TUNNEL_LPORT
]) {
1403 action
->write
.tun_log_lport
=
1404 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_TUNNEL_LPORT
]);
1405 if (mode
!= BRIDGING_MODE_TUNNEL_UCAST
) {
1406 DPRINTF("Have tunnel logical port but not "
1407 "in bridging tunnel mode\n");
1408 return -ROCKER_EINVAL
;
1412 if (flow_tlvs
[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
]) {
1413 action
->apply
.copy_to_cpu
=
1414 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
]);
1420 static int of_dpa_cmd_add_unicast_routing(OfDpaFlow
*flow
,
1421 RockerTlv
**flow_tlvs
)
1423 OfDpaFlowKey
*key
= &flow
->key
;
1424 OfDpaFlowKey
*mask
= &flow
->mask
;
1425 OfDpaFlowAction
*action
= &flow
->action
;
1427 UNICAST_ROUTING_MODE_UNKNOWN
,
1428 UNICAST_ROUTING_MODE_IPV4
,
1429 UNICAST_ROUTING_MODE_IPV6
,
1430 } mode
= UNICAST_ROUTING_MODE_UNKNOWN
;
1433 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
]) {
1434 return -ROCKER_EINVAL
;
1437 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
;
1438 key
->width
= FLOW_KEY_WIDTH(ipv6
.addr
.dst
);
1440 key
->eth
.type
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
]);
1441 switch (ntohs(key
->eth
.type
)) {
1443 mode
= UNICAST_ROUTING_MODE_IPV4
;
1446 mode
= UNICAST_ROUTING_MODE_IPV6
;
1449 return -ROCKER_EINVAL
;
1451 mask
->eth
.type
= htons(0xffff);
1454 case UNICAST_ROUTING_MODE_IPV4
:
1455 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IP
]) {
1456 return -ROCKER_EINVAL
;
1458 key
->ipv4
.addr
.dst
=
1459 rocker_tlv_get_u32(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IP
]);
1460 if (ipv4_addr_is_multicast(key
->ipv4
.addr
.dst
)) {
1461 return -ROCKER_EINVAL
;
1463 flow
->lpm
= of_dpa_mask2prefix(htonl(0xffffffff));
1464 if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IP_MASK
]) {
1465 mask
->ipv4
.addr
.dst
=
1466 rocker_tlv_get_u32(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IP_MASK
]);
1467 flow
->lpm
= of_dpa_mask2prefix(mask
->ipv4
.addr
.dst
);
1470 case UNICAST_ROUTING_MODE_IPV6
:
1471 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IPV6
]) {
1472 return -ROCKER_EINVAL
;
1474 memcpy(&key
->ipv6
.addr
.dst
,
1475 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IPV6
]),
1476 sizeof(key
->ipv6
.addr
.dst
));
1477 if (ipv6_addr_is_multicast(&key
->ipv6
.addr
.dst
)) {
1478 return -ROCKER_EINVAL
;
1480 if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IPV6_MASK
]) {
1481 memcpy(&mask
->ipv6
.addr
.dst
,
1482 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IPV6_MASK
]),
1483 sizeof(mask
->ipv6
.addr
.dst
));
1487 return -ROCKER_EINVAL
;
1490 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]) {
1492 rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]);
1493 if (action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
) {
1494 return -ROCKER_EINVAL
;
1498 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]) {
1499 action
->write
.group_id
=
1500 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]);
1501 type
= ROCKER_GROUP_TYPE_GET(action
->write
.group_id
);
1502 if (type
!= ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
&&
1503 type
!= ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
&&
1504 type
!= ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP
) {
1505 return -ROCKER_EINVAL
;
1512 static int of_dpa_cmd_add_multicast_routing(OfDpaFlow
*flow
,
1513 RockerTlv
**flow_tlvs
)
1515 OfDpaFlowKey
*key
= &flow
->key
;
1516 OfDpaFlowKey
*mask
= &flow
->mask
;
1517 OfDpaFlowAction
*action
= &flow
->action
;
1519 MULTICAST_ROUTING_MODE_UNKNOWN
,
1520 MULTICAST_ROUTING_MODE_IPV4
,
1521 MULTICAST_ROUTING_MODE_IPV6
,
1522 } mode
= MULTICAST_ROUTING_MODE_UNKNOWN
;
1524 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
] ||
1525 !flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]) {
1526 return -ROCKER_EINVAL
;
1529 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
;
1530 key
->width
= FLOW_KEY_WIDTH(ipv6
.addr
.dst
);
1532 key
->eth
.type
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
]);
1533 switch (ntohs(key
->eth
.type
)) {
1535 mode
= MULTICAST_ROUTING_MODE_IPV4
;
1538 mode
= MULTICAST_ROUTING_MODE_IPV6
;
1541 return -ROCKER_EINVAL
;
1544 key
->eth
.vlan_id
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
1547 case MULTICAST_ROUTING_MODE_IPV4
:
1549 if (flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IP
]) {
1550 key
->ipv4
.addr
.src
=
1551 rocker_tlv_get_u32(flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IP
]);
1554 if (flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IP_MASK
]) {
1555 mask
->ipv4
.addr
.src
=
1556 rocker_tlv_get_u32(flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IP_MASK
]);
1559 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IP
]) {
1560 if (mask
->ipv4
.addr
.src
!= 0) {
1561 return -ROCKER_EINVAL
;
1565 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IP
]) {
1566 return -ROCKER_EINVAL
;
1569 key
->ipv4
.addr
.dst
=
1570 rocker_tlv_get_u32(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IP
]);
1571 if (!ipv4_addr_is_multicast(key
->ipv4
.addr
.dst
)) {
1572 return -ROCKER_EINVAL
;
1577 case MULTICAST_ROUTING_MODE_IPV6
:
1579 if (flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IPV6
]) {
1580 memcpy(&key
->ipv6
.addr
.src
,
1581 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IPV6
]),
1582 sizeof(key
->ipv6
.addr
.src
));
1585 if (flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK
]) {
1586 memcpy(&mask
->ipv6
.addr
.src
,
1587 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IPV6_MASK
]),
1588 sizeof(mask
->ipv6
.addr
.src
));
1591 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_IPV6
]) {
1592 if (mask
->ipv6
.addr
.src
.addr32
[0] != 0 &&
1593 mask
->ipv6
.addr
.src
.addr32
[1] != 0 &&
1594 mask
->ipv6
.addr
.src
.addr32
[2] != 0 &&
1595 mask
->ipv6
.addr
.src
.addr32
[3] != 0) {
1596 return -ROCKER_EINVAL
;
1600 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IPV6
]) {
1601 return -ROCKER_EINVAL
;
1604 memcpy(&key
->ipv6
.addr
.dst
,
1605 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_IPV6
]),
1606 sizeof(key
->ipv6
.addr
.dst
));
1607 if (!ipv6_addr_is_multicast(&key
->ipv6
.addr
.dst
)) {
1608 return -ROCKER_EINVAL
;
1614 return -ROCKER_EINVAL
;
1617 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]) {
1619 rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_GOTO_TABLE_ID
]);
1620 if (action
->goto_tbl
!= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
) {
1621 return -ROCKER_EINVAL
;
1625 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]) {
1626 action
->write
.group_id
=
1627 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]);
1628 if (ROCKER_GROUP_TYPE_GET(action
->write
.group_id
) !=
1629 ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST
) {
1630 return -ROCKER_EINVAL
;
1632 action
->write
.vlan_id
= key
->eth
.vlan_id
;
1638 static int of_dpa_cmd_add_acl_ip(OfDpaFlowKey
*key
, OfDpaFlowKey
*mask
,
1639 RockerTlv
**flow_tlvs
)
1641 key
->width
= FLOW_KEY_WIDTH(ip
.tos
);
1648 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IP_PROTO
]) {
1650 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_IP_PROTO
]);
1652 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IP_PROTO_MASK
]) {
1654 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_IP_PROTO_MASK
]);
1656 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IP_DSCP
]) {
1658 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_IP_DSCP
]);
1660 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IP_DSCP_MASK
]) {
1662 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_IP_DSCP_MASK
]);
1664 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IP_ECN
]) {
1666 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_IP_ECN
]) << 6;
1668 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IP_ECN_MASK
]) {
1670 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_IP_ECN_MASK
]) << 6;
1676 static int of_dpa_cmd_add_acl(OfDpaFlow
*flow
, RockerTlv
**flow_tlvs
)
1678 OfDpaFlowKey
*key
= &flow
->key
;
1679 OfDpaFlowKey
*mask
= &flow
->mask
;
1680 OfDpaFlowAction
*action
= &flow
->action
;
1685 ACL_MODE_IPV4_TENANT
,
1686 ACL_MODE_IPV6_TENANT
,
1687 ACL_MODE_NON_IP_VLAN
,
1688 ACL_MODE_NON_IP_TENANT
,
1690 ACL_MODE_ANY_TENANT
,
1691 } mode
= ACL_MODE_UNKNOWN
;
1692 int err
= ROCKER_OK
;
1694 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
] ||
1695 !flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
]) {
1696 return -ROCKER_EINVAL
;
1699 if (flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
] &&
1700 flow_tlvs
[ROCKER_TLV_OF_DPA_TUNNEL_ID
]) {
1701 return -ROCKER_EINVAL
;
1704 key
->tbl_id
= ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
;
1705 key
->width
= FLOW_KEY_WIDTH(eth
.type
);
1707 key
->in_pport
= rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT
]);
1708 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT_MASK
]) {
1710 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IN_PPORT_MASK
]);
1713 if (flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC
]) {
1714 memcpy(key
->eth
.src
.a
,
1715 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC
]),
1716 sizeof(key
->eth
.src
.a
));
1719 if (flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC_MASK
]) {
1720 memcpy(mask
->eth
.src
.a
,
1721 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC_MASK
]),
1722 sizeof(mask
->eth
.src
.a
));
1725 if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]) {
1726 memcpy(key
->eth
.dst
.a
,
1727 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]),
1728 sizeof(key
->eth
.dst
.a
));
1731 if (flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC_MASK
]) {
1732 memcpy(mask
->eth
.dst
.a
,
1733 rocker_tlv_data(flow_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC_MASK
]),
1734 sizeof(mask
->eth
.dst
.a
));
1737 key
->eth
.type
= rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_ETHERTYPE
]);
1738 if (key
->eth
.type
) {
1739 mask
->eth
.type
= 0xffff;
1742 if (flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]) {
1744 rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
1747 if (flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID_MASK
]) {
1749 rocker_tlv_get_u16(flow_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID_MASK
]);
1752 switch (ntohs(key
->eth
.type
)) {
1754 mode
= (key
->eth
.vlan_id
) ? ACL_MODE_ANY_VLAN
: ACL_MODE_ANY_TENANT
;
1757 mode
= (key
->eth
.vlan_id
) ? ACL_MODE_IPV4_VLAN
: ACL_MODE_IPV4_TENANT
;
1760 mode
= (key
->eth
.vlan_id
) ? ACL_MODE_IPV6_VLAN
: ACL_MODE_IPV6_TENANT
;
1763 mode
= (key
->eth
.vlan_id
) ? ACL_MODE_NON_IP_VLAN
:
1764 ACL_MODE_NON_IP_TENANT
;
1768 /* XXX only supporting VLAN modes for now */
1769 if (mode
!= ACL_MODE_IPV4_VLAN
&&
1770 mode
!= ACL_MODE_IPV6_VLAN
&&
1771 mode
!= ACL_MODE_NON_IP_VLAN
&&
1772 mode
!= ACL_MODE_ANY_VLAN
) {
1773 return -ROCKER_EINVAL
;
1776 switch (ntohs(key
->eth
.type
)) {
1779 err
= of_dpa_cmd_add_acl_ip(key
, mask
, flow_tlvs
);
1787 if (flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]) {
1788 action
->write
.group_id
=
1789 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]);
1792 if (flow_tlvs
[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
]) {
1793 action
->apply
.copy_to_cpu
=
1794 rocker_tlv_get_u8(flow_tlvs
[ROCKER_TLV_OF_DPA_COPY_CPU_ACTION
]);
1800 static int of_dpa_cmd_flow_add_mod(OfDpa
*of_dpa
, OfDpaFlow
*flow
,
1801 RockerTlv
**flow_tlvs
)
1803 enum rocker_of_dpa_table_id tbl
;
1804 int err
= ROCKER_OK
;
1806 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_TABLE_ID
] ||
1807 !flow_tlvs
[ROCKER_TLV_OF_DPA_PRIORITY
] ||
1808 !flow_tlvs
[ROCKER_TLV_OF_DPA_HARDTIME
]) {
1809 return -ROCKER_EINVAL
;
1812 tbl
= rocker_tlv_get_le16(flow_tlvs
[ROCKER_TLV_OF_DPA_TABLE_ID
]);
1813 flow
->priority
= rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_PRIORITY
]);
1814 flow
->hardtime
= rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_HARDTIME
]);
1816 if (flow_tlvs
[ROCKER_TLV_OF_DPA_IDLETIME
]) {
1817 if (tbl
== ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
||
1818 tbl
== ROCKER_OF_DPA_TABLE_ID_VLAN
||
1819 tbl
== ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
) {
1820 return -ROCKER_EINVAL
;
1823 rocker_tlv_get_le32(flow_tlvs
[ROCKER_TLV_OF_DPA_IDLETIME
]);
1827 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT
:
1828 err
= of_dpa_cmd_add_ig_port(flow
, flow_tlvs
);
1830 case ROCKER_OF_DPA_TABLE_ID_VLAN
:
1831 err
= of_dpa_cmd_add_vlan(flow
, flow_tlvs
);
1833 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC
:
1834 err
= of_dpa_cmd_add_term_mac(flow
, flow_tlvs
);
1836 case ROCKER_OF_DPA_TABLE_ID_BRIDGING
:
1837 err
= of_dpa_cmd_add_bridging(flow
, flow_tlvs
);
1839 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING
:
1840 err
= of_dpa_cmd_add_unicast_routing(flow
, flow_tlvs
);
1842 case ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING
:
1843 err
= of_dpa_cmd_add_multicast_routing(flow
, flow_tlvs
);
1845 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY
:
1846 err
= of_dpa_cmd_add_acl(flow
, flow_tlvs
);
1853 static int of_dpa_cmd_flow_add(OfDpa
*of_dpa
, uint64_t cookie
,
1854 RockerTlv
**flow_tlvs
)
1856 OfDpaFlow
*flow
= of_dpa_flow_find(of_dpa
, cookie
);
1857 int err
= ROCKER_OK
;
1860 return -ROCKER_EEXIST
;
1863 flow
= of_dpa_flow_alloc(cookie
);
1865 err
= of_dpa_cmd_flow_add_mod(of_dpa
, flow
, flow_tlvs
);
1871 return of_dpa_flow_add(of_dpa
, flow
);
1874 static int of_dpa_cmd_flow_mod(OfDpa
*of_dpa
, uint64_t cookie
,
1875 RockerTlv
**flow_tlvs
)
1877 OfDpaFlow
*flow
= of_dpa_flow_find(of_dpa
, cookie
);
1880 return -ROCKER_ENOENT
;
1883 return of_dpa_cmd_flow_add_mod(of_dpa
, flow
, flow_tlvs
);
1886 static int of_dpa_cmd_flow_del(OfDpa
*of_dpa
, uint64_t cookie
)
1888 OfDpaFlow
*flow
= of_dpa_flow_find(of_dpa
, cookie
);
1891 return -ROCKER_ENOENT
;
1894 of_dpa_flow_del(of_dpa
, flow
);
1899 static int of_dpa_cmd_flow_get_stats(OfDpa
*of_dpa
, uint64_t cookie
,
1900 struct desc_info
*info
, char *buf
)
1902 OfDpaFlow
*flow
= of_dpa_flow_find(of_dpa
, cookie
);
1904 int64_t now
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) / 1000;
1908 return -ROCKER_ENOENT
;
1911 tlv_size
= rocker_tlv_total_size(sizeof(uint32_t)) + /* duration */
1912 rocker_tlv_total_size(sizeof(uint64_t)) + /* rx_pkts */
1913 rocker_tlv_total_size(sizeof(uint64_t)); /* tx_ptks */
1915 if (tlv_size
> desc_buf_size(info
)) {
1916 return -ROCKER_EMSGSIZE
;
1920 rocker_tlv_put_le32(buf
, &pos
, ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION
,
1921 (int32_t)(now
- flow
->stats
.install_time
));
1922 rocker_tlv_put_le64(buf
, &pos
, ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS
,
1923 flow
->stats
.rx_pkts
);
1924 rocker_tlv_put_le64(buf
, &pos
, ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS
,
1925 flow
->stats
.tx_pkts
);
1927 return desc_set_buf(info
, tlv_size
);
1930 static int of_dpa_flow_cmd(OfDpa
*of_dpa
, struct desc_info
*info
,
1931 char *buf
, uint16_t cmd
,
1932 RockerTlv
**flow_tlvs
)
1936 if (!flow_tlvs
[ROCKER_TLV_OF_DPA_COOKIE
]) {
1937 return -ROCKER_EINVAL
;
1940 cookie
= rocker_tlv_get_le64(flow_tlvs
[ROCKER_TLV_OF_DPA_COOKIE
]);
1943 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
:
1944 return of_dpa_cmd_flow_add(of_dpa
, cookie
, flow_tlvs
);
1945 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
:
1946 return of_dpa_cmd_flow_mod(of_dpa
, cookie
, flow_tlvs
);
1947 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
:
1948 return of_dpa_cmd_flow_del(of_dpa
, cookie
);
1949 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS
:
1950 return of_dpa_cmd_flow_get_stats(of_dpa
, cookie
, info
, buf
);
1953 return -ROCKER_ENOTSUP
;
1956 static int of_dpa_cmd_add_l2_interface(OfDpaGroup
*group
,
1957 RockerTlv
**group_tlvs
)
1959 if (!group_tlvs
[ROCKER_TLV_OF_DPA_OUT_PPORT
] ||
1960 !group_tlvs
[ROCKER_TLV_OF_DPA_POP_VLAN
]) {
1961 return -ROCKER_EINVAL
;
1964 group
->l2_interface
.out_pport
=
1965 rocker_tlv_get_le32(group_tlvs
[ROCKER_TLV_OF_DPA_OUT_PPORT
]);
1966 group
->l2_interface
.pop_vlan
=
1967 rocker_tlv_get_u8(group_tlvs
[ROCKER_TLV_OF_DPA_POP_VLAN
]);
1972 static int of_dpa_cmd_add_l2_rewrite(OfDpa
*of_dpa
, OfDpaGroup
*group
,
1973 RockerTlv
**group_tlvs
)
1975 OfDpaGroup
*l2_interface_group
;
1977 if (!group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
]) {
1978 return -ROCKER_EINVAL
;
1981 group
->l2_rewrite
.group_id
=
1982 rocker_tlv_get_le32(group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
]);
1984 l2_interface_group
= of_dpa_group_find(of_dpa
, group
->l2_rewrite
.group_id
);
1985 if (!l2_interface_group
||
1986 ROCKER_GROUP_TYPE_GET(l2_interface_group
->id
) !=
1987 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
) {
1988 DPRINTF("l2 rewrite group needs a valid l2 interface group\n");
1989 return -ROCKER_EINVAL
;
1992 if (group_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC
]) {
1993 memcpy(group
->l2_rewrite
.src_mac
.a
,
1994 rocker_tlv_data(group_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC
]),
1995 sizeof(group
->l2_rewrite
.src_mac
.a
));
1998 if (group_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]) {
1999 memcpy(group
->l2_rewrite
.dst_mac
.a
,
2000 rocker_tlv_data(group_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]),
2001 sizeof(group
->l2_rewrite
.dst_mac
.a
));
2004 if (group_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]) {
2005 group
->l2_rewrite
.vlan_id
=
2006 rocker_tlv_get_u16(group_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
2007 if (ROCKER_GROUP_VLAN_GET(l2_interface_group
->id
) !=
2008 (ntohs(group
->l2_rewrite
.vlan_id
) & VLAN_VID_MASK
)) {
2009 DPRINTF("Set VLAN ID must be same as L2 interface group\n");
2010 return -ROCKER_EINVAL
;
2017 static int of_dpa_cmd_add_l2_flood(OfDpa
*of_dpa
, OfDpaGroup
*group
,
2018 RockerTlv
**group_tlvs
)
2020 OfDpaGroup
*l2_group
;
2025 if (!group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_COUNT
] ||
2026 !group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_IDS
]) {
2027 return -ROCKER_EINVAL
;
2030 group
->l2_flood
.group_count
=
2031 rocker_tlv_get_le16(group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_COUNT
]);
2033 tlvs
= g_new0(RockerTlv
*, group
->l2_flood
.group_count
+ 1);
2035 g_free(group
->l2_flood
.group_ids
);
2036 group
->l2_flood
.group_ids
=
2037 g_new0(uint32_t, group
->l2_flood
.group_count
);
2039 rocker_tlv_parse_nested(tlvs
, group
->l2_flood
.group_count
,
2040 group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_IDS
]);
2042 for (i
= 0; i
< group
->l2_flood
.group_count
; i
++) {
2043 group
->l2_flood
.group_ids
[i
] = rocker_tlv_get_le32(tlvs
[i
+ 1]);
2046 /* All of the L2 interface groups referenced by the L2 flood
2047 * must have same VLAN
2050 for (i
= 0; i
< group
->l2_flood
.group_count
; i
++) {
2051 l2_group
= of_dpa_group_find(of_dpa
, group
->l2_flood
.group_ids
[i
]);
2055 if ((ROCKER_GROUP_TYPE_GET(l2_group
->id
) ==
2056 ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
) &&
2057 (ROCKER_GROUP_VLAN_GET(l2_group
->id
) !=
2058 ROCKER_GROUP_VLAN_GET(group
->id
))) {
2059 DPRINTF("l2 interface group 0x%08x VLAN doesn't match l2 "
2060 "flood group 0x%08x\n",
2061 group
->l2_flood
.group_ids
[i
], group
->id
);
2062 err
= -ROCKER_EINVAL
;
2071 group
->l2_flood
.group_count
= 0;
2072 g_free(group
->l2_flood
.group_ids
);
2078 static int of_dpa_cmd_add_l3_unicast(OfDpaGroup
*group
, RockerTlv
**group_tlvs
)
2080 if (!group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
]) {
2081 return -ROCKER_EINVAL
;
2084 group
->l3_unicast
.group_id
=
2085 rocker_tlv_get_le32(group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID_LOWER
]);
2087 if (group_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC
]) {
2088 memcpy(group
->l3_unicast
.src_mac
.a
,
2089 rocker_tlv_data(group_tlvs
[ROCKER_TLV_OF_DPA_SRC_MAC
]),
2090 sizeof(group
->l3_unicast
.src_mac
.a
));
2093 if (group_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]) {
2094 memcpy(group
->l3_unicast
.dst_mac
.a
,
2095 rocker_tlv_data(group_tlvs
[ROCKER_TLV_OF_DPA_DST_MAC
]),
2096 sizeof(group
->l3_unicast
.dst_mac
.a
));
2099 if (group_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]) {
2100 group
->l3_unicast
.vlan_id
=
2101 rocker_tlv_get_u16(group_tlvs
[ROCKER_TLV_OF_DPA_VLAN_ID
]);
2104 if (group_tlvs
[ROCKER_TLV_OF_DPA_TTL_CHECK
]) {
2105 group
->l3_unicast
.ttl_check
=
2106 rocker_tlv_get_u8(group_tlvs
[ROCKER_TLV_OF_DPA_TTL_CHECK
]);
2112 static int of_dpa_cmd_group_do(OfDpa
*of_dpa
, uint32_t group_id
,
2113 OfDpaGroup
*group
, RockerTlv
**group_tlvs
)
2115 uint8_t type
= ROCKER_GROUP_TYPE_GET(group_id
);
2118 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
2119 return of_dpa_cmd_add_l2_interface(group
, group_tlvs
);
2120 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
2121 return of_dpa_cmd_add_l2_rewrite(of_dpa
, group
, group_tlvs
);
2122 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
2123 /* Treat L2 multicast group same as a L2 flood group */
2124 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
2125 return of_dpa_cmd_add_l2_flood(of_dpa
, group
, group_tlvs
);
2126 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
2127 return of_dpa_cmd_add_l3_unicast(group
, group_tlvs
);
2130 return -ROCKER_ENOTSUP
;
2133 static int of_dpa_cmd_group_add(OfDpa
*of_dpa
, uint32_t group_id
,
2134 RockerTlv
**group_tlvs
)
2136 OfDpaGroup
*group
= of_dpa_group_find(of_dpa
, group_id
);
2140 return -ROCKER_EEXIST
;
2143 group
= of_dpa_group_alloc(group_id
);
2145 err
= of_dpa_cmd_group_do(of_dpa
, group_id
, group
, group_tlvs
);
2150 err
= of_dpa_group_add(of_dpa
, group
);
2162 static int of_dpa_cmd_group_mod(OfDpa
*of_dpa
, uint32_t group_id
,
2163 RockerTlv
**group_tlvs
)
2165 OfDpaGroup
*group
= of_dpa_group_find(of_dpa
, group_id
);
2168 return -ROCKER_ENOENT
;
2171 return of_dpa_cmd_group_do(of_dpa
, group_id
, group
, group_tlvs
);
2174 static int of_dpa_cmd_group_del(OfDpa
*of_dpa
, uint32_t group_id
)
2176 OfDpaGroup
*group
= of_dpa_group_find(of_dpa
, group_id
);
2179 return -ROCKER_ENOENT
;
2182 return of_dpa_group_del(of_dpa
, group
);
2185 static int of_dpa_cmd_group_get_stats(OfDpa
*of_dpa
, uint32_t group_id
,
2186 struct desc_info
*info
, char *buf
)
2188 return -ROCKER_ENOTSUP
;
2191 static int of_dpa_group_cmd(OfDpa
*of_dpa
, struct desc_info
*info
,
2192 char *buf
, uint16_t cmd
, RockerTlv
**group_tlvs
)
2196 if (!group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]) {
2197 return -ROCKER_EINVAL
;
2200 group_id
= rocker_tlv_get_le32(group_tlvs
[ROCKER_TLV_OF_DPA_GROUP_ID
]);
2203 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
:
2204 return of_dpa_cmd_group_add(of_dpa
, group_id
, group_tlvs
);
2205 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
:
2206 return of_dpa_cmd_group_mod(of_dpa
, group_id
, group_tlvs
);
2207 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
:
2208 return of_dpa_cmd_group_del(of_dpa
, group_id
);
2209 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS
:
2210 return of_dpa_cmd_group_get_stats(of_dpa
, group_id
, info
, buf
);
2213 return -ROCKER_ENOTSUP
;
2216 static int of_dpa_cmd(World
*world
, struct desc_info
*info
,
2217 char *buf
, uint16_t cmd
, RockerTlv
*cmd_info_tlv
)
2219 OfDpa
*of_dpa
= world_private(world
);
2220 RockerTlv
*tlvs
[ROCKER_TLV_OF_DPA_MAX
+ 1];
2222 rocker_tlv_parse_nested(tlvs
, ROCKER_TLV_OF_DPA_MAX
, cmd_info_tlv
);
2225 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD
:
2226 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD
:
2227 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL
:
2228 case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS
:
2229 return of_dpa_flow_cmd(of_dpa
, info
, buf
, cmd
, tlvs
);
2230 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD
:
2231 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD
:
2232 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL
:
2233 case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS
:
2234 return of_dpa_group_cmd(of_dpa
, info
, buf
, cmd
, tlvs
);
2237 return -ROCKER_ENOTSUP
;
2240 static gboolean
rocker_int64_equal(gconstpointer v1
, gconstpointer v2
)
2242 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
2245 static guint
rocker_int64_hash(gconstpointer v
)
2247 return (guint
)*(const uint64_t *)v
;
2250 static int of_dpa_init(World
*world
)
2252 OfDpa
*of_dpa
= world_private(world
);
2254 of_dpa
->world
= world
;
2256 of_dpa
->flow_tbl
= g_hash_table_new_full(rocker_int64_hash
,
2259 if (!of_dpa
->flow_tbl
) {
2263 of_dpa
->group_tbl
= g_hash_table_new_full(g_int_hash
, g_int_equal
,
2265 if (!of_dpa
->group_tbl
) {
2269 /* XXX hardcode some artificial table max values */
2270 of_dpa
->flow_tbl_max_size
= 100;
2271 of_dpa
->group_tbl_max_size
= 100;
2276 g_hash_table_destroy(of_dpa
->flow_tbl
);
2280 static void of_dpa_uninit(World
*world
)
2282 OfDpa
*of_dpa
= world_private(world
);
2284 g_hash_table_destroy(of_dpa
->group_tbl
);
2285 g_hash_table_destroy(of_dpa
->flow_tbl
);
2288 struct of_dpa_flow_fill_context
{
2289 RockerOfDpaFlowList
*list
;
2293 static void of_dpa_flow_fill(void *cookie
, void *value
, void *user_data
)
2295 struct of_dpa_flow
*flow
= value
;
2296 struct of_dpa_flow_key
*key
= &flow
->key
;
2297 struct of_dpa_flow_key
*mask
= &flow
->mask
;
2298 struct of_dpa_flow_fill_context
*flow_context
= user_data
;
2299 RockerOfDpaFlowList
*new;
2300 RockerOfDpaFlow
*nflow
;
2301 RockerOfDpaFlowKey
*nkey
;
2302 RockerOfDpaFlowMask
*nmask
;
2303 RockerOfDpaFlowAction
*naction
;
2305 if (flow_context
->tbl_id
!= -1 &&
2306 flow_context
->tbl_id
!= key
->tbl_id
) {
2310 new = g_malloc0(sizeof(*new));
2311 nflow
= new->value
= g_malloc0(sizeof(*nflow
));
2312 nkey
= nflow
->key
= g_malloc0(sizeof(*nkey
));
2313 nmask
= nflow
->mask
= g_malloc0(sizeof(*nmask
));
2314 naction
= nflow
->action
= g_malloc0(sizeof(*naction
));
2316 nflow
->cookie
= flow
->cookie
;
2317 nflow
->hits
= flow
->stats
.hits
;
2318 nkey
->priority
= flow
->priority
;
2319 nkey
->tbl_id
= key
->tbl_id
;
2321 if (key
->in_pport
|| mask
->in_pport
) {
2322 nkey
->has_in_pport
= true;
2323 nkey
->in_pport
= key
->in_pport
;
2326 if (nkey
->has_in_pport
&& mask
->in_pport
!= 0xffffffff) {
2327 nmask
->has_in_pport
= true;
2328 nmask
->in_pport
= mask
->in_pport
;
2331 if (key
->eth
.vlan_id
|| mask
->eth
.vlan_id
) {
2332 nkey
->has_vlan_id
= true;
2333 nkey
->vlan_id
= ntohs(key
->eth
.vlan_id
);
2336 if (nkey
->has_vlan_id
&& mask
->eth
.vlan_id
!= 0xffff) {
2337 nmask
->has_vlan_id
= true;
2338 nmask
->vlan_id
= ntohs(mask
->eth
.vlan_id
);
2341 if (key
->tunnel_id
|| mask
->tunnel_id
) {
2342 nkey
->has_tunnel_id
= true;
2343 nkey
->tunnel_id
= key
->tunnel_id
;
2346 if (nkey
->has_tunnel_id
&& mask
->tunnel_id
!= 0xffffffff) {
2347 nmask
->has_tunnel_id
= true;
2348 nmask
->tunnel_id
= mask
->tunnel_id
;
2351 if (memcmp(key
->eth
.src
.a
, zero_mac
.a
, ETH_ALEN
) ||
2352 memcmp(mask
->eth
.src
.a
, zero_mac
.a
, ETH_ALEN
)) {
2353 nkey
->has_eth_src
= true;
2354 nkey
->eth_src
= qemu_mac_strdup_printf(key
->eth
.src
.a
);
2357 if (nkey
->has_eth_src
&& memcmp(mask
->eth
.src
.a
, ff_mac
.a
, ETH_ALEN
)) {
2358 nmask
->has_eth_src
= true;
2359 nmask
->eth_src
= qemu_mac_strdup_printf(mask
->eth
.src
.a
);
2362 if (memcmp(key
->eth
.dst
.a
, zero_mac
.a
, ETH_ALEN
) ||
2363 memcmp(mask
->eth
.dst
.a
, zero_mac
.a
, ETH_ALEN
)) {
2364 nkey
->has_eth_dst
= true;
2365 nkey
->eth_dst
= qemu_mac_strdup_printf(key
->eth
.dst
.a
);
2368 if (nkey
->has_eth_dst
&& memcmp(mask
->eth
.dst
.a
, ff_mac
.a
, ETH_ALEN
)) {
2369 nmask
->has_eth_dst
= true;
2370 nmask
->eth_dst
= qemu_mac_strdup_printf(mask
->eth
.dst
.a
);
2373 if (key
->eth
.type
) {
2375 nkey
->has_eth_type
= true;
2376 nkey
->eth_type
= ntohs(key
->eth
.type
);
2378 switch (ntohs(key
->eth
.type
)) {
2381 if (key
->ip
.proto
|| mask
->ip
.proto
) {
2382 nkey
->has_ip_proto
= true;
2383 nkey
->ip_proto
= key
->ip
.proto
;
2385 if (nkey
->has_ip_proto
&& mask
->ip
.proto
!= 0xff) {
2386 nmask
->has_ip_proto
= true;
2387 nmask
->ip_proto
= mask
->ip
.proto
;
2389 if (key
->ip
.tos
|| mask
->ip
.tos
) {
2390 nkey
->has_ip_tos
= true;
2391 nkey
->ip_tos
= key
->ip
.tos
;
2393 if (nkey
->has_ip_tos
&& mask
->ip
.tos
!= 0xff) {
2394 nmask
->has_ip_tos
= true;
2395 nmask
->ip_tos
= mask
->ip
.tos
;
2400 switch (ntohs(key
->eth
.type
)) {
2402 if (key
->ipv4
.addr
.dst
|| mask
->ipv4
.addr
.dst
) {
2403 char *dst
= inet_ntoa(*(struct in_addr
*)&key
->ipv4
.addr
.dst
);
2404 int dst_len
= of_dpa_mask2prefix(mask
->ipv4
.addr
.dst
);
2405 nkey
->has_ip_dst
= true;
2406 nkey
->ip_dst
= g_strdup_printf("%s/%d", dst
, dst_len
);
2412 if (flow
->action
.goto_tbl
) {
2413 naction
->has_goto_tbl
= true;
2414 naction
->goto_tbl
= flow
->action
.goto_tbl
;
2417 if (flow
->action
.write
.group_id
) {
2418 naction
->has_group_id
= true;
2419 naction
->group_id
= flow
->action
.write
.group_id
;
2422 if (flow
->action
.apply
.new_vlan_id
) {
2423 naction
->has_new_vlan_id
= true;
2424 naction
->new_vlan_id
= flow
->action
.apply
.new_vlan_id
;
2427 new->next
= flow_context
->list
;
2428 flow_context
->list
= new;
2431 RockerOfDpaFlowList
*qmp_query_rocker_of_dpa_flows(const char *name
,
2438 struct of_dpa
*of_dpa
;
2439 struct of_dpa_flow_fill_context fill_context
= {
2444 r
= rocker_find(name
);
2446 error_setg(errp
, "rocker %s not found", name
);
2450 w
= rocker_get_world(r
, ROCKER_WORLD_TYPE_OF_DPA
);
2452 error_setg(errp
, "rocker %s doesn't have OF-DPA world", name
);
2456 of_dpa
= world_private(w
);
2458 g_hash_table_foreach(of_dpa
->flow_tbl
, of_dpa_flow_fill
, &fill_context
);
2460 return fill_context
.list
;
2463 struct of_dpa_group_fill_context
{
2464 RockerOfDpaGroupList
*list
;
2468 static void of_dpa_group_fill(void *key
, void *value
, void *user_data
)
2470 struct of_dpa_group
*group
= value
;
2471 struct of_dpa_group_fill_context
*flow_context
= user_data
;
2472 RockerOfDpaGroupList
*new;
2473 RockerOfDpaGroup
*ngroup
;
2474 struct uint32List
*id
;
2477 if (flow_context
->type
!= 9 &&
2478 flow_context
->type
!= ROCKER_GROUP_TYPE_GET(group
->id
)) {
2482 new = g_malloc0(sizeof(*new));
2483 ngroup
= new->value
= g_malloc0(sizeof(*ngroup
));
2485 ngroup
->id
= group
->id
;
2487 ngroup
->type
= ROCKER_GROUP_TYPE_GET(group
->id
);
2489 switch (ngroup
->type
) {
2490 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE
:
2491 ngroup
->has_vlan_id
= true;
2492 ngroup
->vlan_id
= ROCKER_GROUP_VLAN_GET(group
->id
);
2493 ngroup
->has_pport
= true;
2494 ngroup
->pport
= ROCKER_GROUP_PORT_GET(group
->id
);
2495 ngroup
->has_out_pport
= true;
2496 ngroup
->out_pport
= group
->l2_interface
.out_pport
;
2497 ngroup
->has_pop_vlan
= true;
2498 ngroup
->pop_vlan
= group
->l2_interface
.pop_vlan
;
2500 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE
:
2501 ngroup
->has_index
= true;
2502 ngroup
->index
= ROCKER_GROUP_INDEX_LONG_GET(group
->id
);
2503 ngroup
->has_group_id
= true;
2504 ngroup
->group_id
= group
->l2_rewrite
.group_id
;
2505 if (group
->l2_rewrite
.vlan_id
) {
2506 ngroup
->has_set_vlan_id
= true;
2507 ngroup
->set_vlan_id
= ntohs(group
->l2_rewrite
.vlan_id
);
2509 if (memcmp(group
->l2_rewrite
.src_mac
.a
, zero_mac
.a
, ETH_ALEN
)) {
2510 ngroup
->has_set_eth_src
= true;
2511 ngroup
->set_eth_src
=
2512 qemu_mac_strdup_printf(group
->l2_rewrite
.src_mac
.a
);
2514 if (memcmp(group
->l2_rewrite
.dst_mac
.a
, zero_mac
.a
, ETH_ALEN
)) {
2515 ngroup
->has_set_eth_dst
= true;
2516 ngroup
->set_eth_dst
=
2517 qemu_mac_strdup_printf(group
->l2_rewrite
.dst_mac
.a
);
2520 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD
:
2521 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST
:
2522 ngroup
->has_vlan_id
= true;
2523 ngroup
->vlan_id
= ROCKER_GROUP_VLAN_GET(group
->id
);
2524 ngroup
->has_index
= true;
2525 ngroup
->index
= ROCKER_GROUP_INDEX_GET(group
->id
);
2526 for (i
= 0; i
< group
->l2_flood
.group_count
; i
++) {
2527 ngroup
->has_group_ids
= true;
2528 id
= g_malloc0(sizeof(*id
));
2529 id
->value
= group
->l2_flood
.group_ids
[i
];
2530 id
->next
= ngroup
->group_ids
;
2531 ngroup
->group_ids
= id
;
2534 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST
:
2535 ngroup
->has_index
= true;
2536 ngroup
->index
= ROCKER_GROUP_INDEX_LONG_GET(group
->id
);
2537 ngroup
->has_group_id
= true;
2538 ngroup
->group_id
= group
->l3_unicast
.group_id
;
2539 if (group
->l3_unicast
.vlan_id
) {
2540 ngroup
->has_set_vlan_id
= true;
2541 ngroup
->set_vlan_id
= ntohs(group
->l3_unicast
.vlan_id
);
2543 if (memcmp(group
->l3_unicast
.src_mac
.a
, zero_mac
.a
, ETH_ALEN
)) {
2544 ngroup
->has_set_eth_src
= true;
2545 ngroup
->set_eth_src
=
2546 qemu_mac_strdup_printf(group
->l3_unicast
.src_mac
.a
);
2548 if (memcmp(group
->l3_unicast
.dst_mac
.a
, zero_mac
.a
, ETH_ALEN
)) {
2549 ngroup
->has_set_eth_dst
= true;
2550 ngroup
->set_eth_dst
=
2551 qemu_mac_strdup_printf(group
->l3_unicast
.dst_mac
.a
);
2553 if (group
->l3_unicast
.ttl_check
) {
2554 ngroup
->has_ttl_check
= true;
2555 ngroup
->ttl_check
= group
->l3_unicast
.ttl_check
;
2560 new->next
= flow_context
->list
;
2561 flow_context
->list
= new;
2564 RockerOfDpaGroupList
*qmp_query_rocker_of_dpa_groups(const char *name
,
2571 struct of_dpa
*of_dpa
;
2572 struct of_dpa_group_fill_context fill_context
= {
2577 r
= rocker_find(name
);
2579 error_setg(errp
, "rocker %s not found", name
);
2583 w
= rocker_get_world(r
, ROCKER_WORLD_TYPE_OF_DPA
);
2585 error_setg(errp
, "rocker %s doesn't have OF-DPA world", name
);
2589 of_dpa
= world_private(w
);
2591 g_hash_table_foreach(of_dpa
->group_tbl
, of_dpa_group_fill
, &fill_context
);
2593 return fill_context
.list
;
2596 static WorldOps of_dpa_ops
= {
2598 .init
= of_dpa_init
,
2599 .uninit
= of_dpa_uninit
,
2604 World
*of_dpa_world_alloc(Rocker
*r
)
2606 return world_alloc(r
, sizeof(OfDpa
), ROCKER_WORLD_TYPE_OF_DPA
, &of_dpa_ops
);