rtc: stm32: fix misspelling and misalignment issues
[linux/fpc-iii.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_tc_flower.c
blob36563364bae7cf9fbd7361c418838b671fc7fdd1
1 /*
2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
44 #define STATS_CHECK_PERIOD (HZ / 2)
46 static struct ch_tc_pedit_fields pedits[] = {
47 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61 PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62 PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63 PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64 PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70 spin_lock_init(&new->lock);
71 return new;
74 /* Must be called with either RTNL or rcu_read_lock */
75 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
76 unsigned long flower_cookie)
78 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
79 adap->flower_ht_params);
82 static void cxgb4_process_flow_match(struct net_device *dev,
83 struct tc_cls_flower_offload *cls,
84 struct ch_filter_specification *fs)
86 u16 addr_type = 0;
88 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
89 struct flow_dissector_key_control *key =
90 skb_flow_dissector_target(cls->dissector,
91 FLOW_DISSECTOR_KEY_CONTROL,
92 cls->key);
94 addr_type = key->addr_type;
97 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
98 struct flow_dissector_key_basic *key =
99 skb_flow_dissector_target(cls->dissector,
100 FLOW_DISSECTOR_KEY_BASIC,
101 cls->key);
102 struct flow_dissector_key_basic *mask =
103 skb_flow_dissector_target(cls->dissector,
104 FLOW_DISSECTOR_KEY_BASIC,
105 cls->mask);
106 u16 ethtype_key = ntohs(key->n_proto);
107 u16 ethtype_mask = ntohs(mask->n_proto);
109 if (ethtype_key == ETH_P_ALL) {
110 ethtype_key = 0;
111 ethtype_mask = 0;
114 if (ethtype_key == ETH_P_IPV6)
115 fs->type = 1;
117 fs->val.ethtype = ethtype_key;
118 fs->mask.ethtype = ethtype_mask;
119 fs->val.proto = key->ip_proto;
120 fs->mask.proto = mask->ip_proto;
123 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
124 struct flow_dissector_key_ipv4_addrs *key =
125 skb_flow_dissector_target(cls->dissector,
126 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
127 cls->key);
128 struct flow_dissector_key_ipv4_addrs *mask =
129 skb_flow_dissector_target(cls->dissector,
130 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
131 cls->mask);
132 fs->type = 0;
133 memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
134 memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
135 memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
136 memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
138 /* also initialize nat_lip/fip to same values */
139 memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
140 memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
144 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
145 struct flow_dissector_key_ipv6_addrs *key =
146 skb_flow_dissector_target(cls->dissector,
147 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
148 cls->key);
149 struct flow_dissector_key_ipv6_addrs *mask =
150 skb_flow_dissector_target(cls->dissector,
151 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
152 cls->mask);
154 fs->type = 1;
155 memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
156 memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
157 memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
158 memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
160 /* also initialize nat_lip/fip to same values */
161 memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
162 memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
165 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
166 struct flow_dissector_key_ports *key, *mask;
168 key = skb_flow_dissector_target(cls->dissector,
169 FLOW_DISSECTOR_KEY_PORTS,
170 cls->key);
171 mask = skb_flow_dissector_target(cls->dissector,
172 FLOW_DISSECTOR_KEY_PORTS,
173 cls->mask);
174 fs->val.lport = cpu_to_be16(key->dst);
175 fs->mask.lport = cpu_to_be16(mask->dst);
176 fs->val.fport = cpu_to_be16(key->src);
177 fs->mask.fport = cpu_to_be16(mask->src);
179 /* also initialize nat_lport/fport to same values */
180 fs->nat_lport = cpu_to_be16(key->dst);
181 fs->nat_fport = cpu_to_be16(key->src);
184 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
185 struct flow_dissector_key_ip *key, *mask;
187 key = skb_flow_dissector_target(cls->dissector,
188 FLOW_DISSECTOR_KEY_IP,
189 cls->key);
190 mask = skb_flow_dissector_target(cls->dissector,
191 FLOW_DISSECTOR_KEY_IP,
192 cls->mask);
193 fs->val.tos = key->tos;
194 fs->mask.tos = mask->tos;
197 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
198 struct flow_dissector_key_vlan *key, *mask;
199 u16 vlan_tci, vlan_tci_mask;
201 key = skb_flow_dissector_target(cls->dissector,
202 FLOW_DISSECTOR_KEY_VLAN,
203 cls->key);
204 mask = skb_flow_dissector_target(cls->dissector,
205 FLOW_DISSECTOR_KEY_VLAN,
206 cls->mask);
207 vlan_tci = key->vlan_id | (key->vlan_priority <<
208 VLAN_PRIO_SHIFT);
209 vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
210 VLAN_PRIO_SHIFT);
211 fs->val.ivlan = vlan_tci;
212 fs->mask.ivlan = vlan_tci_mask;
214 /* Chelsio adapters use ivlan_vld bit to match vlan packets
215 * as 802.1Q. Also, when vlan tag is present in packets,
216 * ethtype match is used then to match on ethtype of inner
217 * header ie. the header following the vlan header.
218 * So, set the ivlan_vld based on ethtype info supplied by
219 * TC for vlan packets if its 802.1Q. And then reset the
220 * ethtype value else, hw will try to match the supplied
221 * ethtype value with ethtype of inner header.
223 if (fs->val.ethtype == ETH_P_8021Q) {
224 fs->val.ivlan_vld = 1;
225 fs->mask.ivlan_vld = 1;
226 fs->val.ethtype = 0;
227 fs->mask.ethtype = 0;
231 /* Match only packets coming from the ingress port where this
232 * filter will be created.
234 fs->val.iport = netdev2pinfo(dev)->port_id;
235 fs->mask.iport = ~0;
238 static int cxgb4_validate_flow_match(struct net_device *dev,
239 struct tc_cls_flower_offload *cls)
241 u16 ethtype_mask = 0;
242 u16 ethtype_key = 0;
244 if (cls->dissector->used_keys &
245 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
246 BIT(FLOW_DISSECTOR_KEY_BASIC) |
247 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
248 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
249 BIT(FLOW_DISSECTOR_KEY_PORTS) |
250 BIT(FLOW_DISSECTOR_KEY_VLAN) |
251 BIT(FLOW_DISSECTOR_KEY_IP))) {
252 netdev_warn(dev, "Unsupported key used: 0x%x\n",
253 cls->dissector->used_keys);
254 return -EOPNOTSUPP;
257 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
258 struct flow_dissector_key_basic *key =
259 skb_flow_dissector_target(cls->dissector,
260 FLOW_DISSECTOR_KEY_BASIC,
261 cls->key);
262 struct flow_dissector_key_basic *mask =
263 skb_flow_dissector_target(cls->dissector,
264 FLOW_DISSECTOR_KEY_BASIC,
265 cls->mask);
266 ethtype_key = ntohs(key->n_proto);
267 ethtype_mask = ntohs(mask->n_proto);
270 if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
271 u16 eth_ip_type = ethtype_key & ethtype_mask;
272 struct flow_dissector_key_ip *mask;
274 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
275 netdev_err(dev, "IP Key supported only with IPv4/v6");
276 return -EINVAL;
279 mask = skb_flow_dissector_target(cls->dissector,
280 FLOW_DISSECTOR_KEY_IP,
281 cls->mask);
282 if (mask->ttl) {
283 netdev_warn(dev, "ttl match unsupported for offload");
284 return -EOPNOTSUPP;
288 return 0;
291 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
292 u8 field)
294 u32 set_val = val & ~mask;
295 u32 offset = 0;
296 u8 size = 1;
297 int i;
299 for (i = 0; i < ARRAY_SIZE(pedits); i++) {
300 if (pedits[i].field == field) {
301 offset = pedits[i].offset;
302 size = pedits[i].size;
303 break;
306 memcpy((u8 *)fs + offset, &set_val, size);
309 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
310 u32 mask, u32 offset, u8 htype)
312 switch (htype) {
313 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
314 switch (offset) {
315 case PEDIT_ETH_DMAC_31_0:
316 fs->newdmac = 1;
317 offload_pedit(fs, val, mask, ETH_DMAC_31_0);
318 break;
319 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
320 if (~mask & PEDIT_ETH_DMAC_MASK)
321 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
322 else
323 offload_pedit(fs, val >> 16, mask >> 16,
324 ETH_SMAC_15_0);
325 break;
326 case PEDIT_ETH_SMAC_47_16:
327 fs->newsmac = 1;
328 offload_pedit(fs, val, mask, ETH_SMAC_47_16);
330 break;
331 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
332 switch (offset) {
333 case PEDIT_IP4_SRC:
334 offload_pedit(fs, val, mask, IP4_SRC);
335 break;
336 case PEDIT_IP4_DST:
337 offload_pedit(fs, val, mask, IP4_DST);
339 fs->nat_mode = NAT_MODE_ALL;
340 break;
341 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
342 switch (offset) {
343 case PEDIT_IP6_SRC_31_0:
344 offload_pedit(fs, val, mask, IP6_SRC_31_0);
345 break;
346 case PEDIT_IP6_SRC_63_32:
347 offload_pedit(fs, val, mask, IP6_SRC_63_32);
348 break;
349 case PEDIT_IP6_SRC_95_64:
350 offload_pedit(fs, val, mask, IP6_SRC_95_64);
351 break;
352 case PEDIT_IP6_SRC_127_96:
353 offload_pedit(fs, val, mask, IP6_SRC_127_96);
354 break;
355 case PEDIT_IP6_DST_31_0:
356 offload_pedit(fs, val, mask, IP6_DST_31_0);
357 break;
358 case PEDIT_IP6_DST_63_32:
359 offload_pedit(fs, val, mask, IP6_DST_63_32);
360 break;
361 case PEDIT_IP6_DST_95_64:
362 offload_pedit(fs, val, mask, IP6_DST_95_64);
363 break;
364 case PEDIT_IP6_DST_127_96:
365 offload_pedit(fs, val, mask, IP6_DST_127_96);
367 fs->nat_mode = NAT_MODE_ALL;
368 break;
369 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
370 switch (offset) {
371 case PEDIT_TCP_SPORT_DPORT:
372 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
373 offload_pedit(fs, cpu_to_be32(val) >> 16,
374 cpu_to_be32(mask) >> 16,
375 TCP_SPORT);
376 else
377 offload_pedit(fs, cpu_to_be32(val),
378 cpu_to_be32(mask), TCP_DPORT);
380 fs->nat_mode = NAT_MODE_ALL;
381 break;
382 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
383 switch (offset) {
384 case PEDIT_UDP_SPORT_DPORT:
385 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
386 offload_pedit(fs, cpu_to_be32(val) >> 16,
387 cpu_to_be32(mask) >> 16,
388 UDP_SPORT);
389 else
390 offload_pedit(fs, cpu_to_be32(val),
391 cpu_to_be32(mask), UDP_DPORT);
393 fs->nat_mode = NAT_MODE_ALL;
397 static void cxgb4_process_flow_actions(struct net_device *in,
398 struct tc_cls_flower_offload *cls,
399 struct ch_filter_specification *fs)
401 const struct tc_action *a;
402 LIST_HEAD(actions);
404 tcf_exts_to_list(cls->exts, &actions);
405 list_for_each_entry(a, &actions, list) {
406 if (is_tcf_gact_ok(a)) {
407 fs->action = FILTER_PASS;
408 } else if (is_tcf_gact_shot(a)) {
409 fs->action = FILTER_DROP;
410 } else if (is_tcf_mirred_egress_redirect(a)) {
411 struct net_device *out = tcf_mirred_dev(a);
412 struct port_info *pi = netdev_priv(out);
414 fs->action = FILTER_SWITCH;
415 fs->eport = pi->port_id;
416 } else if (is_tcf_vlan(a)) {
417 u32 vlan_action = tcf_vlan_action(a);
418 u8 prio = tcf_vlan_push_prio(a);
419 u16 vid = tcf_vlan_push_vid(a);
420 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
422 switch (vlan_action) {
423 case TCA_VLAN_ACT_POP:
424 fs->newvlan |= VLAN_REMOVE;
425 break;
426 case TCA_VLAN_ACT_PUSH:
427 fs->newvlan |= VLAN_INSERT;
428 fs->vlan = vlan_tci;
429 break;
430 case TCA_VLAN_ACT_MODIFY:
431 fs->newvlan |= VLAN_REWRITE;
432 fs->vlan = vlan_tci;
433 break;
434 default:
435 break;
437 } else if (is_tcf_pedit(a)) {
438 u32 mask, val, offset;
439 int nkeys, i;
440 u8 htype;
442 nkeys = tcf_pedit_nkeys(a);
443 for (i = 0; i < nkeys; i++) {
444 htype = tcf_pedit_htype(a, i);
445 mask = tcf_pedit_mask(a, i);
446 val = tcf_pedit_val(a, i);
447 offset = tcf_pedit_offset(a, i);
449 process_pedit_field(fs, val, mask, offset,
450 htype);
456 static bool valid_l4_mask(u32 mask)
458 u16 hi, lo;
460 /* Either the upper 16-bits (SPORT) OR the lower
461 * 16-bits (DPORT) can be set, but NOT BOTH.
463 hi = (mask >> 16) & 0xFFFF;
464 lo = mask & 0xFFFF;
466 return hi && lo ? false : true;
469 static bool valid_pedit_action(struct net_device *dev,
470 const struct tc_action *a)
472 u32 mask, offset;
473 u8 cmd, htype;
474 int nkeys, i;
476 nkeys = tcf_pedit_nkeys(a);
477 for (i = 0; i < nkeys; i++) {
478 htype = tcf_pedit_htype(a, i);
479 cmd = tcf_pedit_cmd(a, i);
480 mask = tcf_pedit_mask(a, i);
481 offset = tcf_pedit_offset(a, i);
483 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
484 netdev_err(dev, "%s: Unsupported pedit cmd\n",
485 __func__);
486 return false;
489 switch (htype) {
490 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
491 switch (offset) {
492 case PEDIT_ETH_DMAC_31_0:
493 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
494 case PEDIT_ETH_SMAC_47_16:
495 break;
496 default:
497 netdev_err(dev, "%s: Unsupported pedit field\n",
498 __func__);
499 return false;
501 break;
502 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
503 switch (offset) {
504 case PEDIT_IP4_SRC:
505 case PEDIT_IP4_DST:
506 break;
507 default:
508 netdev_err(dev, "%s: Unsupported pedit field\n",
509 __func__);
510 return false;
512 break;
513 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
514 switch (offset) {
515 case PEDIT_IP6_SRC_31_0:
516 case PEDIT_IP6_SRC_63_32:
517 case PEDIT_IP6_SRC_95_64:
518 case PEDIT_IP6_SRC_127_96:
519 case PEDIT_IP6_DST_31_0:
520 case PEDIT_IP6_DST_63_32:
521 case PEDIT_IP6_DST_95_64:
522 case PEDIT_IP6_DST_127_96:
523 break;
524 default:
525 netdev_err(dev, "%s: Unsupported pedit field\n",
526 __func__);
527 return false;
529 break;
530 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
531 switch (offset) {
532 case PEDIT_TCP_SPORT_DPORT:
533 if (!valid_l4_mask(~mask)) {
534 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
535 __func__);
536 return false;
538 break;
539 default:
540 netdev_err(dev, "%s: Unsupported pedit field\n",
541 __func__);
542 return false;
544 break;
545 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
546 switch (offset) {
547 case PEDIT_UDP_SPORT_DPORT:
548 if (!valid_l4_mask(~mask)) {
549 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
550 __func__);
551 return false;
553 break;
554 default:
555 netdev_err(dev, "%s: Unsupported pedit field\n",
556 __func__);
557 return false;
559 break;
560 default:
561 netdev_err(dev, "%s: Unsupported pedit type\n",
562 __func__);
563 return false;
566 return true;
569 static int cxgb4_validate_flow_actions(struct net_device *dev,
570 struct tc_cls_flower_offload *cls)
572 const struct tc_action *a;
573 bool act_redir = false;
574 bool act_pedit = false;
575 bool act_vlan = false;
576 LIST_HEAD(actions);
578 tcf_exts_to_list(cls->exts, &actions);
579 list_for_each_entry(a, &actions, list) {
580 if (is_tcf_gact_ok(a)) {
581 /* Do nothing */
582 } else if (is_tcf_gact_shot(a)) {
583 /* Do nothing */
584 } else if (is_tcf_mirred_egress_redirect(a)) {
585 struct adapter *adap = netdev2adap(dev);
586 struct net_device *n_dev, *target_dev;
587 unsigned int i;
588 bool found = false;
590 target_dev = tcf_mirred_dev(a);
591 for_each_port(adap, i) {
592 n_dev = adap->port[i];
593 if (target_dev == n_dev) {
594 found = true;
595 break;
599 /* If interface doesn't belong to our hw, then
600 * the provided output port is not valid
602 if (!found) {
603 netdev_err(dev, "%s: Out port invalid\n",
604 __func__);
605 return -EINVAL;
607 act_redir = true;
608 } else if (is_tcf_vlan(a)) {
609 u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
610 u32 vlan_action = tcf_vlan_action(a);
612 switch (vlan_action) {
613 case TCA_VLAN_ACT_POP:
614 break;
615 case TCA_VLAN_ACT_PUSH:
616 case TCA_VLAN_ACT_MODIFY:
617 if (proto != ETH_P_8021Q) {
618 netdev_err(dev, "%s: Unsupported vlan proto\n",
619 __func__);
620 return -EOPNOTSUPP;
622 break;
623 default:
624 netdev_err(dev, "%s: Unsupported vlan action\n",
625 __func__);
626 return -EOPNOTSUPP;
628 act_vlan = true;
629 } else if (is_tcf_pedit(a)) {
630 bool pedit_valid = valid_pedit_action(dev, a);
632 if (!pedit_valid)
633 return -EOPNOTSUPP;
634 act_pedit = true;
635 } else {
636 netdev_err(dev, "%s: Unsupported action\n", __func__);
637 return -EOPNOTSUPP;
641 if ((act_pedit || act_vlan) && !act_redir) {
642 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
643 __func__);
644 return -EINVAL;
647 return 0;
650 int cxgb4_tc_flower_replace(struct net_device *dev,
651 struct tc_cls_flower_offload *cls)
653 struct adapter *adap = netdev2adap(dev);
654 struct ch_tc_flower_entry *ch_flower;
655 struct ch_filter_specification *fs;
656 struct filter_ctx ctx;
657 int fidx;
658 int ret;
660 if (cxgb4_validate_flow_actions(dev, cls))
661 return -EOPNOTSUPP;
663 if (cxgb4_validate_flow_match(dev, cls))
664 return -EOPNOTSUPP;
666 ch_flower = allocate_flower_entry();
667 if (!ch_flower) {
668 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
669 return -ENOMEM;
672 fs = &ch_flower->fs;
673 fs->hitcnts = 1;
674 cxgb4_process_flow_match(dev, cls, fs);
675 cxgb4_process_flow_actions(dev, cls, fs);
677 fs->hash = is_filter_exact_match(adap, fs);
678 if (fs->hash) {
679 fidx = 0;
680 } else {
681 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
682 if (fidx < 0) {
683 netdev_err(dev, "%s: No fidx for offload.\n", __func__);
684 ret = -ENOMEM;
685 goto free_entry;
689 init_completion(&ctx.completion);
690 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
691 if (ret) {
692 netdev_err(dev, "%s: filter creation err %d\n",
693 __func__, ret);
694 goto free_entry;
697 /* Wait for reply */
698 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
699 if (!ret) {
700 ret = -ETIMEDOUT;
701 goto free_entry;
704 ret = ctx.result;
705 /* Check if hw returned error for filter creation */
706 if (ret) {
707 netdev_err(dev, "%s: filter creation err %d\n",
708 __func__, ret);
709 goto free_entry;
712 ch_flower->tc_flower_cookie = cls->cookie;
713 ch_flower->filter_id = ctx.tid;
714 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
715 adap->flower_ht_params);
716 if (ret)
717 goto del_filter;
719 return 0;
721 del_filter:
722 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
724 free_entry:
725 kfree(ch_flower);
726 return ret;
729 int cxgb4_tc_flower_destroy(struct net_device *dev,
730 struct tc_cls_flower_offload *cls)
732 struct adapter *adap = netdev2adap(dev);
733 struct ch_tc_flower_entry *ch_flower;
734 int ret;
736 ch_flower = ch_flower_lookup(adap, cls->cookie);
737 if (!ch_flower)
738 return -ENOENT;
740 ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
741 if (ret)
742 goto err;
744 ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
745 adap->flower_ht_params);
746 if (ret) {
747 netdev_err(dev, "Flow remove from rhashtable failed");
748 goto err;
750 kfree_rcu(ch_flower, rcu);
752 err:
753 return ret;
756 static void ch_flower_stats_handler(struct work_struct *work)
758 struct adapter *adap = container_of(work, struct adapter,
759 flower_stats_work);
760 struct ch_tc_flower_entry *flower_entry;
761 struct ch_tc_flower_stats *ofld_stats;
762 struct rhashtable_iter iter;
763 u64 packets;
764 u64 bytes;
765 int ret;
767 rhashtable_walk_enter(&adap->flower_tbl, &iter);
768 do {
769 rhashtable_walk_start(&iter);
771 while ((flower_entry = rhashtable_walk_next(&iter)) &&
772 !IS_ERR(flower_entry)) {
773 ret = cxgb4_get_filter_counters(adap->port[0],
774 flower_entry->filter_id,
775 &packets, &bytes,
776 flower_entry->fs.hash);
777 if (!ret) {
778 spin_lock(&flower_entry->lock);
779 ofld_stats = &flower_entry->stats;
781 if (ofld_stats->prev_packet_count != packets) {
782 ofld_stats->prev_packet_count = packets;
783 ofld_stats->last_used = jiffies;
785 spin_unlock(&flower_entry->lock);
789 rhashtable_walk_stop(&iter);
791 } while (flower_entry == ERR_PTR(-EAGAIN));
792 rhashtable_walk_exit(&iter);
793 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
796 static void ch_flower_stats_cb(struct timer_list *t)
798 struct adapter *adap = from_timer(adap, t, flower_stats_timer);
800 schedule_work(&adap->flower_stats_work);
803 int cxgb4_tc_flower_stats(struct net_device *dev,
804 struct tc_cls_flower_offload *cls)
806 struct adapter *adap = netdev2adap(dev);
807 struct ch_tc_flower_stats *ofld_stats;
808 struct ch_tc_flower_entry *ch_flower;
809 u64 packets;
810 u64 bytes;
811 int ret;
813 ch_flower = ch_flower_lookup(adap, cls->cookie);
814 if (!ch_flower) {
815 ret = -ENOENT;
816 goto err;
819 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
820 &packets, &bytes,
821 ch_flower->fs.hash);
822 if (ret < 0)
823 goto err;
825 spin_lock_bh(&ch_flower->lock);
826 ofld_stats = &ch_flower->stats;
827 if (ofld_stats->packet_count != packets) {
828 if (ofld_stats->prev_packet_count != packets)
829 ofld_stats->last_used = jiffies;
830 tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
831 packets - ofld_stats->packet_count,
832 ofld_stats->last_used);
834 ofld_stats->packet_count = packets;
835 ofld_stats->byte_count = bytes;
836 ofld_stats->prev_packet_count = packets;
838 spin_unlock_bh(&ch_flower->lock);
839 return 0;
841 err:
842 return ret;
845 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
846 .nelem_hint = 384,
847 .head_offset = offsetof(struct ch_tc_flower_entry, node),
848 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
849 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
850 .max_size = 524288,
851 .min_size = 512,
852 .automatic_shrinking = true
855 int cxgb4_init_tc_flower(struct adapter *adap)
857 int ret;
859 adap->flower_ht_params = cxgb4_tc_flower_ht_params;
860 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
861 if (ret)
862 return ret;
864 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
865 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
866 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
867 return 0;
870 void cxgb4_cleanup_tc_flower(struct adapter *adap)
872 if (adap->flower_stats_timer.function)
873 del_timer_sync(&adap->flower_stats_timer);
874 cancel_work_sync(&adap->flower_stats_work);
875 rhashtable_destroy(&adap->flower_tbl);