2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
44 #define STATS_CHECK_PERIOD (HZ / 2)
46 static struct ch_tc_pedit_fields pedits
[] = {
47 PEDIT_FIELDS(ETH_
, DMAC_31_0
, 4, dmac
, 0),
48 PEDIT_FIELDS(ETH_
, DMAC_47_32
, 2, dmac
, 4),
49 PEDIT_FIELDS(ETH_
, SMAC_15_0
, 2, smac
, 0),
50 PEDIT_FIELDS(ETH_
, SMAC_47_16
, 4, smac
, 2),
51 PEDIT_FIELDS(IP4_
, SRC
, 4, nat_fip
, 0),
52 PEDIT_FIELDS(IP4_
, DST
, 4, nat_lip
, 0),
53 PEDIT_FIELDS(IP6_
, SRC_31_0
, 4, nat_fip
, 0),
54 PEDIT_FIELDS(IP6_
, SRC_63_32
, 4, nat_fip
, 4),
55 PEDIT_FIELDS(IP6_
, SRC_95_64
, 4, nat_fip
, 8),
56 PEDIT_FIELDS(IP6_
, SRC_127_96
, 4, nat_fip
, 12),
57 PEDIT_FIELDS(IP6_
, DST_31_0
, 4, nat_lip
, 0),
58 PEDIT_FIELDS(IP6_
, DST_63_32
, 4, nat_lip
, 4),
59 PEDIT_FIELDS(IP6_
, DST_95_64
, 4, nat_lip
, 8),
60 PEDIT_FIELDS(IP6_
, DST_127_96
, 4, nat_lip
, 12),
63 static const struct cxgb4_natmode_config cxgb4_natmode_config_array
[] = {
64 /* Default supported NAT modes */
67 .flags
= CXGB4_ACTION_NATMODE_NONE
,
68 .natmode
= NAT_MODE_NONE
,
72 .flags
= CXGB4_ACTION_NATMODE_DIP
,
73 .natmode
= NAT_MODE_DIP
,
77 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_DPORT
,
78 .natmode
= NAT_MODE_DIP_DP
,
82 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_DPORT
|
83 CXGB4_ACTION_NATMODE_SIP
,
84 .natmode
= NAT_MODE_DIP_DP_SIP
,
88 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_DPORT
|
89 CXGB4_ACTION_NATMODE_SPORT
,
90 .natmode
= NAT_MODE_DIP_DP_SP
,
94 .flags
= CXGB4_ACTION_NATMODE_SIP
| CXGB4_ACTION_NATMODE_SPORT
,
95 .natmode
= NAT_MODE_SIP_SP
,
99 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_SIP
|
100 CXGB4_ACTION_NATMODE_SPORT
,
101 .natmode
= NAT_MODE_DIP_SIP_SP
,
105 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_SIP
|
106 CXGB4_ACTION_NATMODE_DPORT
|
107 CXGB4_ACTION_NATMODE_SPORT
,
108 .natmode
= NAT_MODE_ALL
,
110 /* T6+ can ignore L4 ports when they're disabled. */
113 .flags
= CXGB4_ACTION_NATMODE_SIP
,
114 .natmode
= NAT_MODE_SIP_SP
,
118 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_SPORT
,
119 .natmode
= NAT_MODE_DIP_DP_SP
,
123 .flags
= CXGB4_ACTION_NATMODE_DIP
| CXGB4_ACTION_NATMODE_SIP
,
124 .natmode
= NAT_MODE_ALL
,
128 static void cxgb4_action_natmode_tweak(struct ch_filter_specification
*fs
,
133 /* Translate the enabled NAT 4-tuple fields to one of the
134 * hardware supported NAT mode configurations. This ensures
135 * that we pick a valid combination, where the disabled fields
136 * do not get overwritten to 0.
138 for (i
= 0; i
< ARRAY_SIZE(cxgb4_natmode_config_array
); i
++) {
139 if (cxgb4_natmode_config_array
[i
].flags
== natmode_flags
) {
140 fs
->nat_mode
= cxgb4_natmode_config_array
[i
].natmode
;
146 static struct ch_tc_flower_entry
*allocate_flower_entry(void)
148 struct ch_tc_flower_entry
*new = kzalloc(sizeof(*new), GFP_KERNEL
);
150 spin_lock_init(&new->lock
);
154 /* Must be called with either RTNL or rcu_read_lock */
155 static struct ch_tc_flower_entry
*ch_flower_lookup(struct adapter
*adap
,
156 unsigned long flower_cookie
)
158 return rhashtable_lookup_fast(&adap
->flower_tbl
, &flower_cookie
,
159 adap
->flower_ht_params
);
162 static void cxgb4_process_flow_match(struct net_device
*dev
,
163 struct flow_rule
*rule
,
164 struct ch_filter_specification
*fs
)
168 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
169 struct flow_match_control match
;
171 flow_rule_match_control(rule
, &match
);
172 addr_type
= match
.key
->addr_type
;
173 } else if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
)) {
174 addr_type
= FLOW_DISSECTOR_KEY_IPV4_ADDRS
;
175 } else if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IPV6_ADDRS
)) {
176 addr_type
= FLOW_DISSECTOR_KEY_IPV6_ADDRS
;
179 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
180 struct flow_match_basic match
;
181 u16 ethtype_key
, ethtype_mask
;
183 flow_rule_match_basic(rule
, &match
);
184 ethtype_key
= ntohs(match
.key
->n_proto
);
185 ethtype_mask
= ntohs(match
.mask
->n_proto
);
187 if (ethtype_key
== ETH_P_ALL
) {
192 if (ethtype_key
== ETH_P_IPV6
)
195 fs
->val
.ethtype
= ethtype_key
;
196 fs
->mask
.ethtype
= ethtype_mask
;
197 fs
->val
.proto
= match
.key
->ip_proto
;
198 fs
->mask
.proto
= match
.mask
->ip_proto
;
201 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
202 struct flow_match_ipv4_addrs match
;
204 flow_rule_match_ipv4_addrs(rule
, &match
);
206 memcpy(&fs
->val
.lip
[0], &match
.key
->dst
, sizeof(match
.key
->dst
));
207 memcpy(&fs
->val
.fip
[0], &match
.key
->src
, sizeof(match
.key
->src
));
208 memcpy(&fs
->mask
.lip
[0], &match
.mask
->dst
, sizeof(match
.mask
->dst
));
209 memcpy(&fs
->mask
.fip
[0], &match
.mask
->src
, sizeof(match
.mask
->src
));
211 /* also initialize nat_lip/fip to same values */
212 memcpy(&fs
->nat_lip
[0], &match
.key
->dst
, sizeof(match
.key
->dst
));
213 memcpy(&fs
->nat_fip
[0], &match
.key
->src
, sizeof(match
.key
->src
));
216 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
217 struct flow_match_ipv6_addrs match
;
219 flow_rule_match_ipv6_addrs(rule
, &match
);
221 memcpy(&fs
->val
.lip
[0], match
.key
->dst
.s6_addr
,
222 sizeof(match
.key
->dst
));
223 memcpy(&fs
->val
.fip
[0], match
.key
->src
.s6_addr
,
224 sizeof(match
.key
->src
));
225 memcpy(&fs
->mask
.lip
[0], match
.mask
->dst
.s6_addr
,
226 sizeof(match
.mask
->dst
));
227 memcpy(&fs
->mask
.fip
[0], match
.mask
->src
.s6_addr
,
228 sizeof(match
.mask
->src
));
230 /* also initialize nat_lip/fip to same values */
231 memcpy(&fs
->nat_lip
[0], match
.key
->dst
.s6_addr
,
232 sizeof(match
.key
->dst
));
233 memcpy(&fs
->nat_fip
[0], match
.key
->src
.s6_addr
,
234 sizeof(match
.key
->src
));
237 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
)) {
238 struct flow_match_ports match
;
240 flow_rule_match_ports(rule
, &match
);
241 fs
->val
.lport
= be16_to_cpu(match
.key
->dst
);
242 fs
->mask
.lport
= be16_to_cpu(match
.mask
->dst
);
243 fs
->val
.fport
= be16_to_cpu(match
.key
->src
);
244 fs
->mask
.fport
= be16_to_cpu(match
.mask
->src
);
246 /* also initialize nat_lport/fport to same values */
247 fs
->nat_lport
= fs
->val
.lport
;
248 fs
->nat_fport
= fs
->val
.fport
;
251 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
)) {
252 struct flow_match_ip match
;
254 flow_rule_match_ip(rule
, &match
);
255 fs
->val
.tos
= match
.key
->tos
;
256 fs
->mask
.tos
= match
.mask
->tos
;
259 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
260 struct flow_match_enc_keyid match
;
262 flow_rule_match_enc_keyid(rule
, &match
);
263 fs
->val
.vni
= be32_to_cpu(match
.key
->keyid
);
264 fs
->mask
.vni
= be32_to_cpu(match
.mask
->keyid
);
266 fs
->val
.encap_vld
= 1;
267 fs
->mask
.encap_vld
= 1;
271 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
272 struct flow_match_vlan match
;
273 u16 vlan_tci
, vlan_tci_mask
;
275 flow_rule_match_vlan(rule
, &match
);
276 vlan_tci
= match
.key
->vlan_id
| (match
.key
->vlan_priority
<<
278 vlan_tci_mask
= match
.mask
->vlan_id
| (match
.mask
->vlan_priority
<<
280 fs
->val
.ivlan
= vlan_tci
;
281 fs
->mask
.ivlan
= vlan_tci_mask
;
283 fs
->val
.ivlan_vld
= 1;
284 fs
->mask
.ivlan_vld
= 1;
286 /* Chelsio adapters use ivlan_vld bit to match vlan packets
287 * as 802.1Q. Also, when vlan tag is present in packets,
288 * ethtype match is used then to match on ethtype of inner
289 * header ie. the header following the vlan header.
290 * So, set the ivlan_vld based on ethtype info supplied by
291 * TC for vlan packets if its 802.1Q. And then reset the
292 * ethtype value else, hw will try to match the supplied
293 * ethtype value with ethtype of inner header.
295 if (fs
->val
.ethtype
== ETH_P_8021Q
) {
297 fs
->mask
.ethtype
= 0;
301 /* Match only packets coming from the ingress port where this
302 * filter will be created.
304 fs
->val
.iport
= netdev2pinfo(dev
)->port_id
;
308 static int cxgb4_validate_flow_match(struct net_device
*dev
,
309 struct flow_rule
*rule
)
311 struct flow_dissector
*dissector
= rule
->match
.dissector
;
312 u16 ethtype_mask
= 0;
315 if (dissector
->used_keys
&
316 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
317 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
318 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
319 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
320 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
321 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
) |
322 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
323 BIT(FLOW_DISSECTOR_KEY_IP
))) {
324 netdev_warn(dev
, "Unsupported key used: 0x%x\n",
325 dissector
->used_keys
);
329 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
330 struct flow_match_basic match
;
332 flow_rule_match_basic(rule
, &match
);
333 ethtype_key
= ntohs(match
.key
->n_proto
);
334 ethtype_mask
= ntohs(match
.mask
->n_proto
);
337 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_IP
)) {
338 u16 eth_ip_type
= ethtype_key
& ethtype_mask
;
339 struct flow_match_ip match
;
341 if (eth_ip_type
!= ETH_P_IP
&& eth_ip_type
!= ETH_P_IPV6
) {
342 netdev_err(dev
, "IP Key supported only with IPv4/v6");
346 flow_rule_match_ip(rule
, &match
);
347 if (match
.mask
->ttl
) {
348 netdev_warn(dev
, "ttl match unsupported for offload");
356 static void offload_pedit(struct ch_filter_specification
*fs
, u32 val
, u32 mask
,
359 u32 set_val
= val
& ~mask
;
364 for (i
= 0; i
< ARRAY_SIZE(pedits
); i
++) {
365 if (pedits
[i
].field
== field
) {
366 offset
= pedits
[i
].offset
;
367 size
= pedits
[i
].size
;
371 memcpy((u8
*)fs
+ offset
, &set_val
, size
);
374 static void process_pedit_field(struct ch_filter_specification
*fs
, u32 val
,
375 u32 mask
, u32 offset
, u8 htype
,
379 case FLOW_ACT_MANGLE_HDR_TYPE_ETH
:
381 case PEDIT_ETH_DMAC_31_0
:
383 offload_pedit(fs
, val
, mask
, ETH_DMAC_31_0
);
385 case PEDIT_ETH_DMAC_47_32_SMAC_15_0
:
386 if (~mask
& PEDIT_ETH_DMAC_MASK
)
387 offload_pedit(fs
, val
, mask
, ETH_DMAC_47_32
);
389 offload_pedit(fs
, val
>> 16, mask
>> 16,
392 case PEDIT_ETH_SMAC_47_16
:
394 offload_pedit(fs
, val
, mask
, ETH_SMAC_47_16
);
397 case FLOW_ACT_MANGLE_HDR_TYPE_IP4
:
400 offload_pedit(fs
, val
, mask
, IP4_SRC
);
401 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
404 offload_pedit(fs
, val
, mask
, IP4_DST
);
405 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
408 case FLOW_ACT_MANGLE_HDR_TYPE_IP6
:
410 case PEDIT_IP6_SRC_31_0
:
411 offload_pedit(fs
, val
, mask
, IP6_SRC_31_0
);
412 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
414 case PEDIT_IP6_SRC_63_32
:
415 offload_pedit(fs
, val
, mask
, IP6_SRC_63_32
);
416 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
418 case PEDIT_IP6_SRC_95_64
:
419 offload_pedit(fs
, val
, mask
, IP6_SRC_95_64
);
420 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
422 case PEDIT_IP6_SRC_127_96
:
423 offload_pedit(fs
, val
, mask
, IP6_SRC_127_96
);
424 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
426 case PEDIT_IP6_DST_31_0
:
427 offload_pedit(fs
, val
, mask
, IP6_DST_31_0
);
428 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
430 case PEDIT_IP6_DST_63_32
:
431 offload_pedit(fs
, val
, mask
, IP6_DST_63_32
);
432 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
434 case PEDIT_IP6_DST_95_64
:
435 offload_pedit(fs
, val
, mask
, IP6_DST_95_64
);
436 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
438 case PEDIT_IP6_DST_127_96
:
439 offload_pedit(fs
, val
, mask
, IP6_DST_127_96
);
440 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
443 case FLOW_ACT_MANGLE_HDR_TYPE_TCP
:
445 case PEDIT_TCP_SPORT_DPORT
:
446 if (~mask
& PEDIT_TCP_UDP_SPORT_MASK
) {
448 *natmode_flags
|= CXGB4_ACTION_NATMODE_SPORT
;
450 fs
->nat_lport
= val
>> 16;
451 *natmode_flags
|= CXGB4_ACTION_NATMODE_DPORT
;
455 case FLOW_ACT_MANGLE_HDR_TYPE_UDP
:
457 case PEDIT_UDP_SPORT_DPORT
:
458 if (~mask
& PEDIT_TCP_UDP_SPORT_MASK
) {
460 *natmode_flags
|= CXGB4_ACTION_NATMODE_SPORT
;
462 fs
->nat_lport
= val
>> 16;
463 *natmode_flags
|= CXGB4_ACTION_NATMODE_DPORT
;
470 static int cxgb4_action_natmode_validate(struct adapter
*adap
, u8 natmode_flags
,
471 struct netlink_ext_ack
*extack
)
475 /* Extract the NAT mode to enable based on what 4-tuple fields
476 * are enabled to be overwritten. This ensures that the
477 * disabled fields don't get overwritten to 0.
479 for (i
= 0; i
< ARRAY_SIZE(cxgb4_natmode_config_array
); i
++) {
480 const struct cxgb4_natmode_config
*c
;
482 c
= &cxgb4_natmode_config_array
[i
];
483 if (CHELSIO_CHIP_VERSION(adap
->params
.chip
) >= c
->chip
&&
484 natmode_flags
== c
->flags
)
487 NL_SET_ERR_MSG_MOD(extack
, "Unsupported NAT mode 4-tuple combination");
491 void cxgb4_process_flow_actions(struct net_device
*in
,
492 struct flow_action
*actions
,
493 struct ch_filter_specification
*fs
)
495 struct flow_action_entry
*act
;
496 u8 natmode_flags
= 0;
499 flow_action_for_each(i
, act
, actions
) {
501 case FLOW_ACTION_ACCEPT
:
502 fs
->action
= FILTER_PASS
;
504 case FLOW_ACTION_DROP
:
505 fs
->action
= FILTER_DROP
;
507 case FLOW_ACTION_MIRRED
:
508 case FLOW_ACTION_REDIRECT
: {
509 struct net_device
*out
= act
->dev
;
510 struct port_info
*pi
= netdev_priv(out
);
512 fs
->action
= FILTER_SWITCH
;
513 fs
->eport
= pi
->port_id
;
516 case FLOW_ACTION_VLAN_POP
:
517 case FLOW_ACTION_VLAN_PUSH
:
518 case FLOW_ACTION_VLAN_MANGLE
: {
519 u8 prio
= act
->vlan
.prio
;
520 u16 vid
= act
->vlan
.vid
;
521 u16 vlan_tci
= (prio
<< VLAN_PRIO_SHIFT
) | vid
;
523 case FLOW_ACTION_VLAN_POP
:
524 fs
->newvlan
|= VLAN_REMOVE
;
526 case FLOW_ACTION_VLAN_PUSH
:
527 fs
->newvlan
|= VLAN_INSERT
;
530 case FLOW_ACTION_VLAN_MANGLE
:
531 fs
->newvlan
|= VLAN_REWRITE
;
539 case FLOW_ACTION_MANGLE
: {
540 u32 mask
, val
, offset
;
543 htype
= act
->mangle
.htype
;
544 mask
= act
->mangle
.mask
;
545 val
= act
->mangle
.val
;
546 offset
= act
->mangle
.offset
;
548 process_pedit_field(fs
, val
, mask
, offset
, htype
,
552 case FLOW_ACTION_QUEUE
:
553 fs
->action
= FILTER_PASS
;
555 fs
->iq
= act
->queue
.index
;
562 cxgb4_action_natmode_tweak(fs
, natmode_flags
);
566 static bool valid_l4_mask(u32 mask
)
570 /* Either the upper 16-bits (SPORT) OR the lower
571 * 16-bits (DPORT) can be set, but NOT BOTH.
573 hi
= (mask
>> 16) & 0xFFFF;
576 return hi
&& lo
? false : true;
579 static bool valid_pedit_action(struct net_device
*dev
,
580 const struct flow_action_entry
*act
,
586 htype
= act
->mangle
.htype
;
587 mask
= act
->mangle
.mask
;
588 offset
= act
->mangle
.offset
;
591 case FLOW_ACT_MANGLE_HDR_TYPE_ETH
:
593 case PEDIT_ETH_DMAC_31_0
:
594 case PEDIT_ETH_DMAC_47_32_SMAC_15_0
:
595 case PEDIT_ETH_SMAC_47_16
:
598 netdev_err(dev
, "%s: Unsupported pedit field\n",
603 case FLOW_ACT_MANGLE_HDR_TYPE_IP4
:
606 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
609 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
612 netdev_err(dev
, "%s: Unsupported pedit field\n",
617 case FLOW_ACT_MANGLE_HDR_TYPE_IP6
:
619 case PEDIT_IP6_SRC_31_0
:
620 case PEDIT_IP6_SRC_63_32
:
621 case PEDIT_IP6_SRC_95_64
:
622 case PEDIT_IP6_SRC_127_96
:
623 *natmode_flags
|= CXGB4_ACTION_NATMODE_SIP
;
625 case PEDIT_IP6_DST_31_0
:
626 case PEDIT_IP6_DST_63_32
:
627 case PEDIT_IP6_DST_95_64
:
628 case PEDIT_IP6_DST_127_96
:
629 *natmode_flags
|= CXGB4_ACTION_NATMODE_DIP
;
632 netdev_err(dev
, "%s: Unsupported pedit field\n",
637 case FLOW_ACT_MANGLE_HDR_TYPE_TCP
:
639 case PEDIT_TCP_SPORT_DPORT
:
640 if (!valid_l4_mask(~mask
)) {
641 netdev_err(dev
, "%s: Unsupported mask for TCP L4 ports\n",
645 if (~mask
& PEDIT_TCP_UDP_SPORT_MASK
)
646 *natmode_flags
|= CXGB4_ACTION_NATMODE_SPORT
;
648 *natmode_flags
|= CXGB4_ACTION_NATMODE_DPORT
;
651 netdev_err(dev
, "%s: Unsupported pedit field\n",
656 case FLOW_ACT_MANGLE_HDR_TYPE_UDP
:
658 case PEDIT_UDP_SPORT_DPORT
:
659 if (!valid_l4_mask(~mask
)) {
660 netdev_err(dev
, "%s: Unsupported mask for UDP L4 ports\n",
664 if (~mask
& PEDIT_TCP_UDP_SPORT_MASK
)
665 *natmode_flags
|= CXGB4_ACTION_NATMODE_SPORT
;
667 *natmode_flags
|= CXGB4_ACTION_NATMODE_DPORT
;
670 netdev_err(dev
, "%s: Unsupported pedit field\n",
676 netdev_err(dev
, "%s: Unsupported pedit type\n", __func__
);
682 int cxgb4_validate_flow_actions(struct net_device
*dev
,
683 struct flow_action
*actions
,
684 struct netlink_ext_ack
*extack
,
687 struct adapter
*adap
= netdev2adap(dev
);
688 struct flow_action_entry
*act
;
689 bool act_redir
= false;
690 bool act_pedit
= false;
691 bool act_vlan
= false;
692 u8 natmode_flags
= 0;
695 if (!flow_action_basic_hw_stats_check(actions
, extack
))
698 flow_action_for_each(i
, act
, actions
) {
700 case FLOW_ACTION_ACCEPT
:
701 case FLOW_ACTION_DROP
:
704 case FLOW_ACTION_MIRRED
:
705 case FLOW_ACTION_REDIRECT
: {
706 struct net_device
*n_dev
, *target_dev
;
710 if (act
->id
== FLOW_ACTION_MIRRED
&&
712 NL_SET_ERR_MSG_MOD(extack
,
713 "Egress mirror action is only supported for tc-matchall");
717 target_dev
= act
->dev
;
718 for_each_port(adap
, i
) {
719 n_dev
= adap
->port
[i
];
720 if (target_dev
== n_dev
) {
726 /* If interface doesn't belong to our hw, then
727 * the provided output port is not valid
730 netdev_err(dev
, "%s: Out port invalid\n",
737 case FLOW_ACTION_VLAN_POP
:
738 case FLOW_ACTION_VLAN_PUSH
:
739 case FLOW_ACTION_VLAN_MANGLE
: {
740 u16 proto
= be16_to_cpu(act
->vlan
.proto
);
743 case FLOW_ACTION_VLAN_POP
:
745 case FLOW_ACTION_VLAN_PUSH
:
746 case FLOW_ACTION_VLAN_MANGLE
:
747 if (proto
!= ETH_P_8021Q
) {
748 netdev_err(dev
, "%s: Unsupported vlan proto\n",
754 netdev_err(dev
, "%s: Unsupported vlan action\n",
761 case FLOW_ACTION_MANGLE
: {
762 bool pedit_valid
= valid_pedit_action(dev
, act
,
770 case FLOW_ACTION_QUEUE
:
771 /* Do nothing. cxgb4_set_filter will validate */
774 netdev_err(dev
, "%s: Unsupported action\n", __func__
);
779 if ((act_pedit
|| act_vlan
) && !act_redir
) {
780 netdev_err(dev
, "%s: pedit/vlan rewrite invalid without egress redirect\n",
788 ret
= cxgb4_action_natmode_validate(adap
, natmode_flags
,
797 static void cxgb4_tc_flower_hash_prio_add(struct adapter
*adap
, u32 tc_prio
)
799 spin_lock_bh(&adap
->tids
.ftid_lock
);
800 if (adap
->tids
.tc_hash_tids_max_prio
< tc_prio
)
801 adap
->tids
.tc_hash_tids_max_prio
= tc_prio
;
802 spin_unlock_bh(&adap
->tids
.ftid_lock
);
805 static void cxgb4_tc_flower_hash_prio_del(struct adapter
*adap
, u32 tc_prio
)
807 struct tid_info
*t
= &adap
->tids
;
808 struct ch_tc_flower_entry
*fe
;
809 struct rhashtable_iter iter
;
812 spin_lock_bh(&t
->ftid_lock
);
813 /* Bail if the current rule is not the one with the max
816 if (t
->tc_hash_tids_max_prio
!= tc_prio
)
819 /* Search for the next rule having the same or next lower
822 rhashtable_walk_enter(&adap
->flower_tbl
, &iter
);
824 rhashtable_walk_start(&iter
);
826 fe
= rhashtable_walk_next(&iter
);
827 while (!IS_ERR_OR_NULL(fe
)) {
829 fe
->fs
.tc_prio
<= t
->tc_hash_tids_max_prio
) {
830 t
->tc_hash_tids_max_prio
= fe
->fs
.tc_prio
;
833 /* Bail if we found another rule
834 * having the same prio as the
837 if (fe
->fs
.tc_prio
== tc_prio
)
841 fe
= rhashtable_walk_next(&iter
);
844 rhashtable_walk_stop(&iter
);
845 } while (fe
== ERR_PTR(-EAGAIN
));
846 rhashtable_walk_exit(&iter
);
849 t
->tc_hash_tids_max_prio
= 0;
852 spin_unlock_bh(&t
->ftid_lock
);
855 int cxgb4_flow_rule_replace(struct net_device
*dev
, struct flow_rule
*rule
,
856 u32 tc_prio
, struct netlink_ext_ack
*extack
,
857 struct ch_filter_specification
*fs
, u32
*tid
)
859 struct adapter
*adap
= netdev2adap(dev
);
860 struct filter_ctx ctx
;
864 if (cxgb4_validate_flow_actions(dev
, &rule
->action
, extack
, 0))
867 if (cxgb4_validate_flow_match(dev
, rule
))
870 cxgb4_process_flow_match(dev
, rule
, fs
);
871 cxgb4_process_flow_actions(dev
, &rule
->action
, fs
);
873 fs
->hash
= is_filter_exact_match(adap
, fs
);
874 inet_family
= fs
->type
? PF_INET6
: PF_INET
;
876 /* Get a free filter entry TID, where we can insert this new
877 * rule. Only insert rule if its prio doesn't conflict with
880 fidx
= cxgb4_get_free_ftid(dev
, inet_family
, fs
->hash
,
883 NL_SET_ERR_MSG_MOD(extack
,
884 "No free LETCAM index available");
888 if (fidx
< adap
->tids
.nhpftids
) {
893 /* If the rule can be inserted into HASH region, then ignore
894 * the index to normal FILTER region.
899 fs
->tc_prio
= tc_prio
;
901 init_completion(&ctx
.completion
);
902 ret
= __cxgb4_set_filter(dev
, fidx
, fs
, &ctx
);
904 netdev_err(dev
, "%s: filter creation err %d\n",
910 ret
= wait_for_completion_timeout(&ctx
.completion
, 10 * HZ
);
914 /* Check if hw returned error for filter creation */
921 cxgb4_tc_flower_hash_prio_add(adap
, tc_prio
);
926 int cxgb4_tc_flower_replace(struct net_device
*dev
,
927 struct flow_cls_offload
*cls
)
929 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
930 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
931 struct adapter
*adap
= netdev2adap(dev
);
932 struct ch_tc_flower_entry
*ch_flower
;
933 struct ch_filter_specification
*fs
;
936 ch_flower
= allocate_flower_entry();
938 netdev_err(dev
, "%s: ch_flower alloc failed.\n", __func__
);
944 fs
->tc_cookie
= cls
->cookie
;
946 ret
= cxgb4_flow_rule_replace(dev
, rule
, cls
->common
.prio
, extack
, fs
,
947 &ch_flower
->filter_id
);
951 ch_flower
->tc_flower_cookie
= cls
->cookie
;
952 ret
= rhashtable_insert_fast(&adap
->flower_tbl
, &ch_flower
->node
,
953 adap
->flower_ht_params
);
961 cxgb4_tc_flower_hash_prio_del(adap
, cls
->common
.prio
);
963 cxgb4_del_filter(dev
, ch_flower
->filter_id
, &ch_flower
->fs
);
970 int cxgb4_flow_rule_destroy(struct net_device
*dev
, u32 tc_prio
,
971 struct ch_filter_specification
*fs
, int tid
)
973 struct adapter
*adap
= netdev2adap(dev
);
979 ret
= cxgb4_del_filter(dev
, tid
, fs
);
984 cxgb4_tc_flower_hash_prio_del(adap
, tc_prio
);
989 int cxgb4_tc_flower_destroy(struct net_device
*dev
,
990 struct flow_cls_offload
*cls
)
992 struct adapter
*adap
= netdev2adap(dev
);
993 struct ch_tc_flower_entry
*ch_flower
;
996 ch_flower
= ch_flower_lookup(adap
, cls
->cookie
);
1000 ret
= cxgb4_flow_rule_destroy(dev
, ch_flower
->fs
.tc_prio
,
1001 &ch_flower
->fs
, ch_flower
->filter_id
);
1005 ret
= rhashtable_remove_fast(&adap
->flower_tbl
, &ch_flower
->node
,
1006 adap
->flower_ht_params
);
1008 netdev_err(dev
, "Flow remove from rhashtable failed");
1011 kfree_rcu(ch_flower
, rcu
);
1017 static void ch_flower_stats_handler(struct work_struct
*work
)
1019 struct adapter
*adap
= container_of(work
, struct adapter
,
1021 struct ch_tc_flower_entry
*flower_entry
;
1022 struct ch_tc_flower_stats
*ofld_stats
;
1023 struct rhashtable_iter iter
;
1028 rhashtable_walk_enter(&adap
->flower_tbl
, &iter
);
1030 rhashtable_walk_start(&iter
);
1032 while ((flower_entry
= rhashtable_walk_next(&iter
)) &&
1033 !IS_ERR(flower_entry
)) {
1034 ret
= cxgb4_get_filter_counters(adap
->port
[0],
1035 flower_entry
->filter_id
,
1037 flower_entry
->fs
.hash
);
1039 spin_lock(&flower_entry
->lock
);
1040 ofld_stats
= &flower_entry
->stats
;
1042 if (ofld_stats
->prev_packet_count
!= packets
) {
1043 ofld_stats
->prev_packet_count
= packets
;
1044 ofld_stats
->last_used
= jiffies
;
1046 spin_unlock(&flower_entry
->lock
);
1050 rhashtable_walk_stop(&iter
);
1052 } while (flower_entry
== ERR_PTR(-EAGAIN
));
1053 rhashtable_walk_exit(&iter
);
1054 mod_timer(&adap
->flower_stats_timer
, jiffies
+ STATS_CHECK_PERIOD
);
1057 static void ch_flower_stats_cb(struct timer_list
*t
)
1059 struct adapter
*adap
= from_timer(adap
, t
, flower_stats_timer
);
1061 schedule_work(&adap
->flower_stats_work
);
1064 int cxgb4_tc_flower_stats(struct net_device
*dev
,
1065 struct flow_cls_offload
*cls
)
1067 struct adapter
*adap
= netdev2adap(dev
);
1068 struct ch_tc_flower_stats
*ofld_stats
;
1069 struct ch_tc_flower_entry
*ch_flower
;
1074 ch_flower
= ch_flower_lookup(adap
, cls
->cookie
);
1080 ret
= cxgb4_get_filter_counters(dev
, ch_flower
->filter_id
,
1082 ch_flower
->fs
.hash
);
1086 spin_lock_bh(&ch_flower
->lock
);
1087 ofld_stats
= &ch_flower
->stats
;
1088 if (ofld_stats
->packet_count
!= packets
) {
1089 if (ofld_stats
->prev_packet_count
!= packets
)
1090 ofld_stats
->last_used
= jiffies
;
1091 flow_stats_update(&cls
->stats
, bytes
- ofld_stats
->byte_count
,
1092 packets
- ofld_stats
->packet_count
, 0,
1093 ofld_stats
->last_used
,
1094 FLOW_ACTION_HW_STATS_IMMEDIATE
);
1096 ofld_stats
->packet_count
= packets
;
1097 ofld_stats
->byte_count
= bytes
;
1098 ofld_stats
->prev_packet_count
= packets
;
1100 spin_unlock_bh(&ch_flower
->lock
);
1107 static const struct rhashtable_params cxgb4_tc_flower_ht_params
= {
1109 .head_offset
= offsetof(struct ch_tc_flower_entry
, node
),
1110 .key_offset
= offsetof(struct ch_tc_flower_entry
, tc_flower_cookie
),
1111 .key_len
= sizeof(((struct ch_tc_flower_entry
*)0)->tc_flower_cookie
),
1114 .automatic_shrinking
= true
1117 int cxgb4_init_tc_flower(struct adapter
*adap
)
1121 if (adap
->tc_flower_initialized
)
1124 adap
->flower_ht_params
= cxgb4_tc_flower_ht_params
;
1125 ret
= rhashtable_init(&adap
->flower_tbl
, &adap
->flower_ht_params
);
1129 INIT_WORK(&adap
->flower_stats_work
, ch_flower_stats_handler
);
1130 timer_setup(&adap
->flower_stats_timer
, ch_flower_stats_cb
, 0);
1131 mod_timer(&adap
->flower_stats_timer
, jiffies
+ STATS_CHECK_PERIOD
);
1132 adap
->tc_flower_initialized
= true;
1136 void cxgb4_cleanup_tc_flower(struct adapter
*adap
)
1138 if (!adap
->tc_flower_initialized
)
1141 if (adap
->flower_stats_timer
.function
)
1142 del_timer_sync(&adap
->flower_stats_timer
);
1143 cancel_work_sync(&adap
->flower_stats_work
);
1144 rhashtable_destroy(&adap
->flower_tbl
);
1145 adap
->tc_flower_initialized
= false;