2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
44 #define STATS_CHECK_PERIOD (HZ / 2)
46 static struct ch_tc_pedit_fields pedits
[] = {
47 PEDIT_FIELDS(ETH_
, DMAC_31_0
, 4, dmac
, 0),
48 PEDIT_FIELDS(ETH_
, DMAC_47_32
, 2, dmac
, 4),
49 PEDIT_FIELDS(ETH_
, SMAC_15_0
, 2, smac
, 0),
50 PEDIT_FIELDS(ETH_
, SMAC_47_16
, 4, smac
, 2),
51 PEDIT_FIELDS(IP4_
, SRC
, 4, nat_fip
, 0),
52 PEDIT_FIELDS(IP4_
, DST
, 4, nat_lip
, 0),
53 PEDIT_FIELDS(IP6_
, SRC_31_0
, 4, nat_fip
, 0),
54 PEDIT_FIELDS(IP6_
, SRC_63_32
, 4, nat_fip
, 4),
55 PEDIT_FIELDS(IP6_
, SRC_95_64
, 4, nat_fip
, 8),
56 PEDIT_FIELDS(IP6_
, SRC_127_96
, 4, nat_fip
, 12),
57 PEDIT_FIELDS(IP6_
, DST_31_0
, 4, nat_lip
, 0),
58 PEDIT_FIELDS(IP6_
, DST_63_32
, 4, nat_lip
, 4),
59 PEDIT_FIELDS(IP6_
, DST_95_64
, 4, nat_lip
, 8),
60 PEDIT_FIELDS(IP6_
, DST_127_96
, 4, nat_lip
, 12),
61 PEDIT_FIELDS(TCP_
, SPORT
, 2, nat_fport
, 0),
62 PEDIT_FIELDS(TCP_
, DPORT
, 2, nat_lport
, 0),
63 PEDIT_FIELDS(UDP_
, SPORT
, 2, nat_fport
, 0),
64 PEDIT_FIELDS(UDP_
, DPORT
, 2, nat_lport
, 0),
67 static struct ch_tc_flower_entry
*allocate_flower_entry(void)
69 struct ch_tc_flower_entry
*new = kzalloc(sizeof(*new), GFP_KERNEL
);
70 spin_lock_init(&new->lock
);
74 /* Must be called with either RTNL or rcu_read_lock */
75 static struct ch_tc_flower_entry
*ch_flower_lookup(struct adapter
*adap
,
76 unsigned long flower_cookie
)
78 return rhashtable_lookup_fast(&adap
->flower_tbl
, &flower_cookie
,
79 adap
->flower_ht_params
);
82 static void cxgb4_process_flow_match(struct net_device
*dev
,
83 struct tc_cls_flower_offload
*cls
,
84 struct ch_filter_specification
*fs
)
88 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
89 struct flow_dissector_key_control
*key
=
90 skb_flow_dissector_target(cls
->dissector
,
91 FLOW_DISSECTOR_KEY_CONTROL
,
94 addr_type
= key
->addr_type
;
97 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
98 struct flow_dissector_key_basic
*key
=
99 skb_flow_dissector_target(cls
->dissector
,
100 FLOW_DISSECTOR_KEY_BASIC
,
102 struct flow_dissector_key_basic
*mask
=
103 skb_flow_dissector_target(cls
->dissector
,
104 FLOW_DISSECTOR_KEY_BASIC
,
106 u16 ethtype_key
= ntohs(key
->n_proto
);
107 u16 ethtype_mask
= ntohs(mask
->n_proto
);
109 if (ethtype_key
== ETH_P_ALL
) {
114 if (ethtype_key
== ETH_P_IPV6
)
117 fs
->val
.ethtype
= ethtype_key
;
118 fs
->mask
.ethtype
= ethtype_mask
;
119 fs
->val
.proto
= key
->ip_proto
;
120 fs
->mask
.proto
= mask
->ip_proto
;
123 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
124 struct flow_dissector_key_ipv4_addrs
*key
=
125 skb_flow_dissector_target(cls
->dissector
,
126 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
128 struct flow_dissector_key_ipv4_addrs
*mask
=
129 skb_flow_dissector_target(cls
->dissector
,
130 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
133 memcpy(&fs
->val
.lip
[0], &key
->dst
, sizeof(key
->dst
));
134 memcpy(&fs
->val
.fip
[0], &key
->src
, sizeof(key
->src
));
135 memcpy(&fs
->mask
.lip
[0], &mask
->dst
, sizeof(mask
->dst
));
136 memcpy(&fs
->mask
.fip
[0], &mask
->src
, sizeof(mask
->src
));
138 /* also initialize nat_lip/fip to same values */
139 memcpy(&fs
->nat_lip
[0], &key
->dst
, sizeof(key
->dst
));
140 memcpy(&fs
->nat_fip
[0], &key
->src
, sizeof(key
->src
));
144 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
145 struct flow_dissector_key_ipv6_addrs
*key
=
146 skb_flow_dissector_target(cls
->dissector
,
147 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
149 struct flow_dissector_key_ipv6_addrs
*mask
=
150 skb_flow_dissector_target(cls
->dissector
,
151 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
155 memcpy(&fs
->val
.lip
[0], key
->dst
.s6_addr
, sizeof(key
->dst
));
156 memcpy(&fs
->val
.fip
[0], key
->src
.s6_addr
, sizeof(key
->src
));
157 memcpy(&fs
->mask
.lip
[0], mask
->dst
.s6_addr
, sizeof(mask
->dst
));
158 memcpy(&fs
->mask
.fip
[0], mask
->src
.s6_addr
, sizeof(mask
->src
));
160 /* also initialize nat_lip/fip to same values */
161 memcpy(&fs
->nat_lip
[0], key
->dst
.s6_addr
, sizeof(key
->dst
));
162 memcpy(&fs
->nat_fip
[0], key
->src
.s6_addr
, sizeof(key
->src
));
165 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
166 struct flow_dissector_key_ports
*key
, *mask
;
168 key
= skb_flow_dissector_target(cls
->dissector
,
169 FLOW_DISSECTOR_KEY_PORTS
,
171 mask
= skb_flow_dissector_target(cls
->dissector
,
172 FLOW_DISSECTOR_KEY_PORTS
,
174 fs
->val
.lport
= cpu_to_be16(key
->dst
);
175 fs
->mask
.lport
= cpu_to_be16(mask
->dst
);
176 fs
->val
.fport
= cpu_to_be16(key
->src
);
177 fs
->mask
.fport
= cpu_to_be16(mask
->src
);
179 /* also initialize nat_lport/fport to same values */
180 fs
->nat_lport
= cpu_to_be16(key
->dst
);
181 fs
->nat_fport
= cpu_to_be16(key
->src
);
184 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_IP
)) {
185 struct flow_dissector_key_ip
*key
, *mask
;
187 key
= skb_flow_dissector_target(cls
->dissector
,
188 FLOW_DISSECTOR_KEY_IP
,
190 mask
= skb_flow_dissector_target(cls
->dissector
,
191 FLOW_DISSECTOR_KEY_IP
,
193 fs
->val
.tos
= key
->tos
;
194 fs
->mask
.tos
= mask
->tos
;
197 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
198 struct flow_dissector_key_vlan
*key
, *mask
;
199 u16 vlan_tci
, vlan_tci_mask
;
201 key
= skb_flow_dissector_target(cls
->dissector
,
202 FLOW_DISSECTOR_KEY_VLAN
,
204 mask
= skb_flow_dissector_target(cls
->dissector
,
205 FLOW_DISSECTOR_KEY_VLAN
,
207 vlan_tci
= key
->vlan_id
| (key
->vlan_priority
<<
209 vlan_tci_mask
= mask
->vlan_id
| (mask
->vlan_priority
<<
211 fs
->val
.ivlan
= vlan_tci
;
212 fs
->mask
.ivlan
= vlan_tci_mask
;
214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
215 * as 802.1Q. Also, when vlan tag is present in packets,
216 * ethtype match is used then to match on ethtype of inner
217 * header ie. the header following the vlan header.
218 * So, set the ivlan_vld based on ethtype info supplied by
219 * TC for vlan packets if its 802.1Q. And then reset the
220 * ethtype value else, hw will try to match the supplied
221 * ethtype value with ethtype of inner header.
223 if (fs
->val
.ethtype
== ETH_P_8021Q
) {
224 fs
->val
.ivlan_vld
= 1;
225 fs
->mask
.ivlan_vld
= 1;
227 fs
->mask
.ethtype
= 0;
231 /* Match only packets coming from the ingress port where this
232 * filter will be created.
234 fs
->val
.iport
= netdev2pinfo(dev
)->port_id
;
238 static int cxgb4_validate_flow_match(struct net_device
*dev
,
239 struct tc_cls_flower_offload
*cls
)
241 u16 ethtype_mask
= 0;
244 if (cls
->dissector
->used_keys
&
245 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
246 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
247 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
248 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
249 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
250 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
251 BIT(FLOW_DISSECTOR_KEY_IP
))) {
252 netdev_warn(dev
, "Unsupported key used: 0x%x\n",
253 cls
->dissector
->used_keys
);
257 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
258 struct flow_dissector_key_basic
*key
=
259 skb_flow_dissector_target(cls
->dissector
,
260 FLOW_DISSECTOR_KEY_BASIC
,
262 struct flow_dissector_key_basic
*mask
=
263 skb_flow_dissector_target(cls
->dissector
,
264 FLOW_DISSECTOR_KEY_BASIC
,
266 ethtype_key
= ntohs(key
->n_proto
);
267 ethtype_mask
= ntohs(mask
->n_proto
);
270 if (dissector_uses_key(cls
->dissector
, FLOW_DISSECTOR_KEY_IP
)) {
271 u16 eth_ip_type
= ethtype_key
& ethtype_mask
;
272 struct flow_dissector_key_ip
*mask
;
274 if (eth_ip_type
!= ETH_P_IP
&& eth_ip_type
!= ETH_P_IPV6
) {
275 netdev_err(dev
, "IP Key supported only with IPv4/v6");
279 mask
= skb_flow_dissector_target(cls
->dissector
,
280 FLOW_DISSECTOR_KEY_IP
,
283 netdev_warn(dev
, "ttl match unsupported for offload");
291 static void offload_pedit(struct ch_filter_specification
*fs
, u32 val
, u32 mask
,
294 u32 set_val
= val
& ~mask
;
299 for (i
= 0; i
< ARRAY_SIZE(pedits
); i
++) {
300 if (pedits
[i
].field
== field
) {
301 offset
= pedits
[i
].offset
;
302 size
= pedits
[i
].size
;
306 memcpy((u8
*)fs
+ offset
, &set_val
, size
);
309 static void process_pedit_field(struct ch_filter_specification
*fs
, u32 val
,
310 u32 mask
, u32 offset
, u8 htype
)
313 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
:
315 case PEDIT_ETH_DMAC_31_0
:
317 offload_pedit(fs
, val
, mask
, ETH_DMAC_31_0
);
319 case PEDIT_ETH_DMAC_47_32_SMAC_15_0
:
320 if (~mask
& PEDIT_ETH_DMAC_MASK
)
321 offload_pedit(fs
, val
, mask
, ETH_DMAC_47_32
);
323 offload_pedit(fs
, val
>> 16, mask
>> 16,
326 case PEDIT_ETH_SMAC_47_16
:
328 offload_pedit(fs
, val
, mask
, ETH_SMAC_47_16
);
331 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
:
334 offload_pedit(fs
, val
, mask
, IP4_SRC
);
337 offload_pedit(fs
, val
, mask
, IP4_DST
);
339 fs
->nat_mode
= NAT_MODE_ALL
;
341 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
:
343 case PEDIT_IP6_SRC_31_0
:
344 offload_pedit(fs
, val
, mask
, IP6_SRC_31_0
);
346 case PEDIT_IP6_SRC_63_32
:
347 offload_pedit(fs
, val
, mask
, IP6_SRC_63_32
);
349 case PEDIT_IP6_SRC_95_64
:
350 offload_pedit(fs
, val
, mask
, IP6_SRC_95_64
);
352 case PEDIT_IP6_SRC_127_96
:
353 offload_pedit(fs
, val
, mask
, IP6_SRC_127_96
);
355 case PEDIT_IP6_DST_31_0
:
356 offload_pedit(fs
, val
, mask
, IP6_DST_31_0
);
358 case PEDIT_IP6_DST_63_32
:
359 offload_pedit(fs
, val
, mask
, IP6_DST_63_32
);
361 case PEDIT_IP6_DST_95_64
:
362 offload_pedit(fs
, val
, mask
, IP6_DST_95_64
);
364 case PEDIT_IP6_DST_127_96
:
365 offload_pedit(fs
, val
, mask
, IP6_DST_127_96
);
367 fs
->nat_mode
= NAT_MODE_ALL
;
369 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
:
371 case PEDIT_TCP_SPORT_DPORT
:
372 if (~mask
& PEDIT_TCP_UDP_SPORT_MASK
)
373 offload_pedit(fs
, cpu_to_be32(val
) >> 16,
374 cpu_to_be32(mask
) >> 16,
377 offload_pedit(fs
, cpu_to_be32(val
),
378 cpu_to_be32(mask
), TCP_DPORT
);
380 fs
->nat_mode
= NAT_MODE_ALL
;
382 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
:
384 case PEDIT_UDP_SPORT_DPORT
:
385 if (~mask
& PEDIT_TCP_UDP_SPORT_MASK
)
386 offload_pedit(fs
, cpu_to_be32(val
) >> 16,
387 cpu_to_be32(mask
) >> 16,
390 offload_pedit(fs
, cpu_to_be32(val
),
391 cpu_to_be32(mask
), UDP_DPORT
);
393 fs
->nat_mode
= NAT_MODE_ALL
;
397 static void cxgb4_process_flow_actions(struct net_device
*in
,
398 struct tc_cls_flower_offload
*cls
,
399 struct ch_filter_specification
*fs
)
401 const struct tc_action
*a
;
404 tcf_exts_to_list(cls
->exts
, &actions
);
405 list_for_each_entry(a
, &actions
, list
) {
406 if (is_tcf_gact_ok(a
)) {
407 fs
->action
= FILTER_PASS
;
408 } else if (is_tcf_gact_shot(a
)) {
409 fs
->action
= FILTER_DROP
;
410 } else if (is_tcf_mirred_egress_redirect(a
)) {
411 struct net_device
*out
= tcf_mirred_dev(a
);
412 struct port_info
*pi
= netdev_priv(out
);
414 fs
->action
= FILTER_SWITCH
;
415 fs
->eport
= pi
->port_id
;
416 } else if (is_tcf_vlan(a
)) {
417 u32 vlan_action
= tcf_vlan_action(a
);
418 u8 prio
= tcf_vlan_push_prio(a
);
419 u16 vid
= tcf_vlan_push_vid(a
);
420 u16 vlan_tci
= (prio
<< VLAN_PRIO_SHIFT
) | vid
;
422 switch (vlan_action
) {
423 case TCA_VLAN_ACT_POP
:
424 fs
->newvlan
|= VLAN_REMOVE
;
426 case TCA_VLAN_ACT_PUSH
:
427 fs
->newvlan
|= VLAN_INSERT
;
430 case TCA_VLAN_ACT_MODIFY
:
431 fs
->newvlan
|= VLAN_REWRITE
;
437 } else if (is_tcf_pedit(a
)) {
438 u32 mask
, val
, offset
;
442 nkeys
= tcf_pedit_nkeys(a
);
443 for (i
= 0; i
< nkeys
; i
++) {
444 htype
= tcf_pedit_htype(a
, i
);
445 mask
= tcf_pedit_mask(a
, i
);
446 val
= tcf_pedit_val(a
, i
);
447 offset
= tcf_pedit_offset(a
, i
);
449 process_pedit_field(fs
, val
, mask
, offset
,
456 static bool valid_l4_mask(u32 mask
)
460 /* Either the upper 16-bits (SPORT) OR the lower
461 * 16-bits (DPORT) can be set, but NOT BOTH.
463 hi
= (mask
>> 16) & 0xFFFF;
466 return hi
&& lo
? false : true;
469 static bool valid_pedit_action(struct net_device
*dev
,
470 const struct tc_action
*a
)
476 nkeys
= tcf_pedit_nkeys(a
);
477 for (i
= 0; i
< nkeys
; i
++) {
478 htype
= tcf_pedit_htype(a
, i
);
479 cmd
= tcf_pedit_cmd(a
, i
);
480 mask
= tcf_pedit_mask(a
, i
);
481 offset
= tcf_pedit_offset(a
, i
);
483 if (cmd
!= TCA_PEDIT_KEY_EX_CMD_SET
) {
484 netdev_err(dev
, "%s: Unsupported pedit cmd\n",
490 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH
:
492 case PEDIT_ETH_DMAC_31_0
:
493 case PEDIT_ETH_DMAC_47_32_SMAC_15_0
:
494 case PEDIT_ETH_SMAC_47_16
:
497 netdev_err(dev
, "%s: Unsupported pedit field\n",
502 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4
:
508 netdev_err(dev
, "%s: Unsupported pedit field\n",
513 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6
:
515 case PEDIT_IP6_SRC_31_0
:
516 case PEDIT_IP6_SRC_63_32
:
517 case PEDIT_IP6_SRC_95_64
:
518 case PEDIT_IP6_SRC_127_96
:
519 case PEDIT_IP6_DST_31_0
:
520 case PEDIT_IP6_DST_63_32
:
521 case PEDIT_IP6_DST_95_64
:
522 case PEDIT_IP6_DST_127_96
:
525 netdev_err(dev
, "%s: Unsupported pedit field\n",
530 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP
:
532 case PEDIT_TCP_SPORT_DPORT
:
533 if (!valid_l4_mask(~mask
)) {
534 netdev_err(dev
, "%s: Unsupported mask for TCP L4 ports\n",
540 netdev_err(dev
, "%s: Unsupported pedit field\n",
545 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP
:
547 case PEDIT_UDP_SPORT_DPORT
:
548 if (!valid_l4_mask(~mask
)) {
549 netdev_err(dev
, "%s: Unsupported mask for UDP L4 ports\n",
555 netdev_err(dev
, "%s: Unsupported pedit field\n",
561 netdev_err(dev
, "%s: Unsupported pedit type\n",
569 static int cxgb4_validate_flow_actions(struct net_device
*dev
,
570 struct tc_cls_flower_offload
*cls
)
572 const struct tc_action
*a
;
573 bool act_redir
= false;
574 bool act_pedit
= false;
575 bool act_vlan
= false;
578 tcf_exts_to_list(cls
->exts
, &actions
);
579 list_for_each_entry(a
, &actions
, list
) {
580 if (is_tcf_gact_ok(a
)) {
582 } else if (is_tcf_gact_shot(a
)) {
584 } else if (is_tcf_mirred_egress_redirect(a
)) {
585 struct adapter
*adap
= netdev2adap(dev
);
586 struct net_device
*n_dev
, *target_dev
;
590 target_dev
= tcf_mirred_dev(a
);
591 for_each_port(adap
, i
) {
592 n_dev
= adap
->port
[i
];
593 if (target_dev
== n_dev
) {
599 /* If interface doesn't belong to our hw, then
600 * the provided output port is not valid
603 netdev_err(dev
, "%s: Out port invalid\n",
608 } else if (is_tcf_vlan(a
)) {
609 u16 proto
= be16_to_cpu(tcf_vlan_push_proto(a
));
610 u32 vlan_action
= tcf_vlan_action(a
);
612 switch (vlan_action
) {
613 case TCA_VLAN_ACT_POP
:
615 case TCA_VLAN_ACT_PUSH
:
616 case TCA_VLAN_ACT_MODIFY
:
617 if (proto
!= ETH_P_8021Q
) {
618 netdev_err(dev
, "%s: Unsupported vlan proto\n",
624 netdev_err(dev
, "%s: Unsupported vlan action\n",
629 } else if (is_tcf_pedit(a
)) {
630 bool pedit_valid
= valid_pedit_action(dev
, a
);
636 netdev_err(dev
, "%s: Unsupported action\n", __func__
);
641 if ((act_pedit
|| act_vlan
) && !act_redir
) {
642 netdev_err(dev
, "%s: pedit/vlan rewrite invalid without egress redirect\n",
650 int cxgb4_tc_flower_replace(struct net_device
*dev
,
651 struct tc_cls_flower_offload
*cls
)
653 struct adapter
*adap
= netdev2adap(dev
);
654 struct ch_tc_flower_entry
*ch_flower
;
655 struct ch_filter_specification
*fs
;
656 struct filter_ctx ctx
;
660 if (cxgb4_validate_flow_actions(dev
, cls
))
663 if (cxgb4_validate_flow_match(dev
, cls
))
666 ch_flower
= allocate_flower_entry();
668 netdev_err(dev
, "%s: ch_flower alloc failed.\n", __func__
);
674 cxgb4_process_flow_match(dev
, cls
, fs
);
675 cxgb4_process_flow_actions(dev
, cls
, fs
);
677 fs
->hash
= is_filter_exact_match(adap
, fs
);
681 fidx
= cxgb4_get_free_ftid(dev
, fs
->type
? PF_INET6
: PF_INET
);
683 netdev_err(dev
, "%s: No fidx for offload.\n", __func__
);
689 init_completion(&ctx
.completion
);
690 ret
= __cxgb4_set_filter(dev
, fidx
, fs
, &ctx
);
692 netdev_err(dev
, "%s: filter creation err %d\n",
698 ret
= wait_for_completion_timeout(&ctx
.completion
, 10 * HZ
);
705 /* Check if hw returned error for filter creation */
707 netdev_err(dev
, "%s: filter creation err %d\n",
712 ch_flower
->tc_flower_cookie
= cls
->cookie
;
713 ch_flower
->filter_id
= ctx
.tid
;
714 ret
= rhashtable_insert_fast(&adap
->flower_tbl
, &ch_flower
->node
,
715 adap
->flower_ht_params
);
722 cxgb4_del_filter(dev
, ch_flower
->filter_id
, &ch_flower
->fs
);
729 int cxgb4_tc_flower_destroy(struct net_device
*dev
,
730 struct tc_cls_flower_offload
*cls
)
732 struct adapter
*adap
= netdev2adap(dev
);
733 struct ch_tc_flower_entry
*ch_flower
;
736 ch_flower
= ch_flower_lookup(adap
, cls
->cookie
);
740 ret
= cxgb4_del_filter(dev
, ch_flower
->filter_id
, &ch_flower
->fs
);
744 ret
= rhashtable_remove_fast(&adap
->flower_tbl
, &ch_flower
->node
,
745 adap
->flower_ht_params
);
747 netdev_err(dev
, "Flow remove from rhashtable failed");
750 kfree_rcu(ch_flower
, rcu
);
756 static void ch_flower_stats_handler(struct work_struct
*work
)
758 struct adapter
*adap
= container_of(work
, struct adapter
,
760 struct ch_tc_flower_entry
*flower_entry
;
761 struct ch_tc_flower_stats
*ofld_stats
;
762 struct rhashtable_iter iter
;
767 rhashtable_walk_enter(&adap
->flower_tbl
, &iter
);
769 rhashtable_walk_start(&iter
);
771 while ((flower_entry
= rhashtable_walk_next(&iter
)) &&
772 !IS_ERR(flower_entry
)) {
773 ret
= cxgb4_get_filter_counters(adap
->port
[0],
774 flower_entry
->filter_id
,
776 flower_entry
->fs
.hash
);
778 spin_lock(&flower_entry
->lock
);
779 ofld_stats
= &flower_entry
->stats
;
781 if (ofld_stats
->prev_packet_count
!= packets
) {
782 ofld_stats
->prev_packet_count
= packets
;
783 ofld_stats
->last_used
= jiffies
;
785 spin_unlock(&flower_entry
->lock
);
789 rhashtable_walk_stop(&iter
);
791 } while (flower_entry
== ERR_PTR(-EAGAIN
));
792 rhashtable_walk_exit(&iter
);
793 mod_timer(&adap
->flower_stats_timer
, jiffies
+ STATS_CHECK_PERIOD
);
796 static void ch_flower_stats_cb(struct timer_list
*t
)
798 struct adapter
*adap
= from_timer(adap
, t
, flower_stats_timer
);
800 schedule_work(&adap
->flower_stats_work
);
803 int cxgb4_tc_flower_stats(struct net_device
*dev
,
804 struct tc_cls_flower_offload
*cls
)
806 struct adapter
*adap
= netdev2adap(dev
);
807 struct ch_tc_flower_stats
*ofld_stats
;
808 struct ch_tc_flower_entry
*ch_flower
;
813 ch_flower
= ch_flower_lookup(adap
, cls
->cookie
);
819 ret
= cxgb4_get_filter_counters(dev
, ch_flower
->filter_id
,
825 spin_lock_bh(&ch_flower
->lock
);
826 ofld_stats
= &ch_flower
->stats
;
827 if (ofld_stats
->packet_count
!= packets
) {
828 if (ofld_stats
->prev_packet_count
!= packets
)
829 ofld_stats
->last_used
= jiffies
;
830 tcf_exts_stats_update(cls
->exts
, bytes
- ofld_stats
->byte_count
,
831 packets
- ofld_stats
->packet_count
,
832 ofld_stats
->last_used
);
834 ofld_stats
->packet_count
= packets
;
835 ofld_stats
->byte_count
= bytes
;
836 ofld_stats
->prev_packet_count
= packets
;
838 spin_unlock_bh(&ch_flower
->lock
);
845 static const struct rhashtable_params cxgb4_tc_flower_ht_params
= {
847 .head_offset
= offsetof(struct ch_tc_flower_entry
, node
),
848 .key_offset
= offsetof(struct ch_tc_flower_entry
, tc_flower_cookie
),
849 .key_len
= sizeof(((struct ch_tc_flower_entry
*)0)->tc_flower_cookie
),
852 .automatic_shrinking
= true
855 int cxgb4_init_tc_flower(struct adapter
*adap
)
859 adap
->flower_ht_params
= cxgb4_tc_flower_ht_params
;
860 ret
= rhashtable_init(&adap
->flower_tbl
, &adap
->flower_ht_params
);
864 INIT_WORK(&adap
->flower_stats_work
, ch_flower_stats_handler
);
865 timer_setup(&adap
->flower_stats_timer
, ch_flower_stats_cb
, 0);
866 mod_timer(&adap
->flower_stats_timer
, jiffies
+ STATS_CHECK_PERIOD
);
870 void cxgb4_cleanup_tc_flower(struct adapter
*adap
)
872 if (adap
->flower_stats_timer
.function
)
873 del_timer_sync(&adap
->flower_stats_timer
);
874 cancel_work_sync(&adap
->flower_stats_work
);
875 rhashtable_destroy(&adap
->flower_tbl
);