dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / net / dsa / bcm_sf2_cfp.c
blobe6234d20978780ea0ae2b3847cf059619b83004d
1 /*
2 * Broadcom Starfighter 2 DSA switch CFP support
4 * Copyright (C) 2016, Broadcom
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/list.h>
13 #include <linux/ethtool.h>
14 #include <linux/if_ether.h>
15 #include <linux/in.h>
16 #include <linux/netdevice.h>
17 #include <net/dsa.h>
18 #include <linux/bitmap.h>
19 #include <net/flow_offload.h>
21 #include "bcm_sf2.h"
22 #include "bcm_sf2_regs.h"
24 struct cfp_rule {
25 int port;
26 struct ethtool_rx_flow_spec fs;
27 struct list_head next;
30 struct cfp_udf_slice_layout {
31 u8 slices[UDFS_PER_SLICE];
32 u32 mask_value;
33 u32 base_offset;
36 struct cfp_udf_layout {
37 struct cfp_udf_slice_layout udfs[UDF_NUM_SLICES];
40 static const u8 zero_slice[UDFS_PER_SLICE] = { };
42 /* UDF slices layout for a TCPv4/UDPv4 specification */
43 static const struct cfp_udf_layout udf_tcpip4_layout = {
44 .udfs = {
45 [1] = {
46 .slices = {
47 /* End of L2, byte offset 12, src IP[0:15] */
48 CFG_UDF_EOL2 | 6,
49 /* End of L2, byte offset 14, src IP[16:31] */
50 CFG_UDF_EOL2 | 7,
51 /* End of L2, byte offset 16, dst IP[0:15] */
52 CFG_UDF_EOL2 | 8,
53 /* End of L2, byte offset 18, dst IP[16:31] */
54 CFG_UDF_EOL2 | 9,
55 /* End of L3, byte offset 0, src port */
56 CFG_UDF_EOL3 | 0,
57 /* End of L3, byte offset 2, dst port */
58 CFG_UDF_EOL3 | 1,
59 0, 0, 0
61 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
62 .base_offset = CORE_UDF_0_A_0_8_PORT_0 + UDF_SLICE_OFFSET,
67 /* UDF slices layout for a TCPv6/UDPv6 specification */
68 static const struct cfp_udf_layout udf_tcpip6_layout = {
69 .udfs = {
70 [0] = {
71 .slices = {
72 /* End of L2, byte offset 8, src IP[0:15] */
73 CFG_UDF_EOL2 | 4,
74 /* End of L2, byte offset 10, src IP[16:31] */
75 CFG_UDF_EOL2 | 5,
76 /* End of L2, byte offset 12, src IP[32:47] */
77 CFG_UDF_EOL2 | 6,
78 /* End of L2, byte offset 14, src IP[48:63] */
79 CFG_UDF_EOL2 | 7,
80 /* End of L2, byte offset 16, src IP[64:79] */
81 CFG_UDF_EOL2 | 8,
82 /* End of L2, byte offset 18, src IP[80:95] */
83 CFG_UDF_EOL2 | 9,
84 /* End of L2, byte offset 20, src IP[96:111] */
85 CFG_UDF_EOL2 | 10,
86 /* End of L2, byte offset 22, src IP[112:127] */
87 CFG_UDF_EOL2 | 11,
88 /* End of L3, byte offset 0, src port */
89 CFG_UDF_EOL3 | 0,
91 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
92 .base_offset = CORE_UDF_0_B_0_8_PORT_0,
94 [3] = {
95 .slices = {
96 /* End of L2, byte offset 24, dst IP[0:15] */
97 CFG_UDF_EOL2 | 12,
98 /* End of L2, byte offset 26, dst IP[16:31] */
99 CFG_UDF_EOL2 | 13,
100 /* End of L2, byte offset 28, dst IP[32:47] */
101 CFG_UDF_EOL2 | 14,
102 /* End of L2, byte offset 30, dst IP[48:63] */
103 CFG_UDF_EOL2 | 15,
104 /* End of L2, byte offset 32, dst IP[64:79] */
105 CFG_UDF_EOL2 | 16,
106 /* End of L2, byte offset 34, dst IP[80:95] */
107 CFG_UDF_EOL2 | 17,
108 /* End of L2, byte offset 36, dst IP[96:111] */
109 CFG_UDF_EOL2 | 18,
110 /* End of L2, byte offset 38, dst IP[112:127] */
111 CFG_UDF_EOL2 | 19,
112 /* End of L3, byte offset 2, dst port */
113 CFG_UDF_EOL3 | 1,
115 .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
116 .base_offset = CORE_UDF_0_D_0_11_PORT_0,
121 static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
123 unsigned int i, count = 0;
125 for (i = 0; i < UDFS_PER_SLICE; i++) {
126 if (layout[i] != 0)
127 count++;
130 return count;
133 static inline u32 udf_upper_bits(unsigned int num_udf)
135 return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
138 static inline u32 udf_lower_bits(unsigned int num_udf)
140 return (u8)GENMASK(num_udf - 1, 0);
143 static unsigned int bcm_sf2_get_slice_number(const struct cfp_udf_layout *l,
144 unsigned int start)
146 const struct cfp_udf_slice_layout *slice_layout;
147 unsigned int slice_idx;
149 for (slice_idx = start; slice_idx < UDF_NUM_SLICES; slice_idx++) {
150 slice_layout = &l->udfs[slice_idx];
151 if (memcmp(slice_layout->slices, zero_slice,
152 sizeof(zero_slice)))
153 break;
156 return slice_idx;
159 static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
160 const struct cfp_udf_layout *layout,
161 unsigned int slice_num)
163 u32 offset = layout->udfs[slice_num].base_offset;
164 unsigned int i;
166 for (i = 0; i < UDFS_PER_SLICE; i++)
167 core_writel(priv, layout->udfs[slice_num].slices[i],
168 offset + i * 4);
171 static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
173 unsigned int timeout = 1000;
174 u32 reg;
176 reg = core_readl(priv, CORE_CFP_ACC);
177 reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
178 reg |= OP_STR_DONE | op;
179 core_writel(priv, reg, CORE_CFP_ACC);
181 do {
182 reg = core_readl(priv, CORE_CFP_ACC);
183 if (!(reg & OP_STR_DONE))
184 break;
186 cpu_relax();
187 } while (timeout--);
189 if (!timeout)
190 return -ETIMEDOUT;
192 return 0;
195 static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
196 unsigned int addr)
198 u32 reg;
200 WARN_ON(addr >= priv->num_cfp_rules);
202 reg = core_readl(priv, CORE_CFP_ACC);
203 reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
204 reg |= addr << XCESS_ADDR_SHIFT;
205 core_writel(priv, reg, CORE_CFP_ACC);
208 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
210 /* Entry #0 is reserved */
211 return priv->num_cfp_rules - 1;
214 static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
215 unsigned int rule_index,
216 int src_port,
217 unsigned int port_num,
218 unsigned int queue_num,
219 bool fwd_map_change)
221 int ret;
222 u32 reg;
224 /* Replace ARL derived destination with DST_MAP derived, define
225 * which port and queue this should be forwarded to.
227 if (fwd_map_change)
228 reg = CHANGE_FWRD_MAP_IB_REP_ARL |
229 BIT(port_num + DST_MAP_IB_SHIFT) |
230 CHANGE_TC | queue_num << NEW_TC_SHIFT;
231 else
232 reg = 0;
234 /* Enable looping back to the original port */
235 if (src_port == port_num)
236 reg |= LOOP_BK_EN;
238 core_writel(priv, reg, CORE_ACT_POL_DATA0);
240 /* Set classification ID that needs to be put in Broadcom tag */
241 core_writel(priv, rule_index << CHAIN_ID_SHIFT, CORE_ACT_POL_DATA1);
243 core_writel(priv, 0, CORE_ACT_POL_DATA2);
245 /* Configure policer RAM now */
246 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
247 if (ret) {
248 pr_err("Policer entry at %d failed\n", rule_index);
249 return ret;
252 /* Disable the policer */
253 core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
255 /* Now the rate meter */
256 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
257 if (ret) {
258 pr_err("Meter entry at %d failed\n", rule_index);
259 return ret;
262 return 0;
265 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
266 struct flow_dissector_key_ipv4_addrs *addrs,
267 struct flow_dissector_key_ports *ports,
268 unsigned int slice_num,
269 bool mask)
271 u32 reg, offset;
273 /* C-Tag [31:24]
274 * UDF_n_A8 [23:8]
275 * UDF_n_A7 [7:0]
277 reg = 0;
278 if (mask)
279 offset = CORE_CFP_MASK_PORT(4);
280 else
281 offset = CORE_CFP_DATA_PORT(4);
282 core_writel(priv, reg, offset);
284 /* UDF_n_A7 [31:24]
285 * UDF_n_A6 [23:8]
286 * UDF_n_A5 [7:0]
288 reg = be16_to_cpu(ports->dst) >> 8;
289 if (mask)
290 offset = CORE_CFP_MASK_PORT(3);
291 else
292 offset = CORE_CFP_DATA_PORT(3);
293 core_writel(priv, reg, offset);
295 /* UDF_n_A5 [31:24]
296 * UDF_n_A4 [23:8]
297 * UDF_n_A3 [7:0]
299 reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
300 (u32)be16_to_cpu(ports->src) << 8 |
301 (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
302 if (mask)
303 offset = CORE_CFP_MASK_PORT(2);
304 else
305 offset = CORE_CFP_DATA_PORT(2);
306 core_writel(priv, reg, offset);
308 /* UDF_n_A3 [31:24]
309 * UDF_n_A2 [23:8]
310 * UDF_n_A1 [7:0]
312 reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
313 (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
314 (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
315 if (mask)
316 offset = CORE_CFP_MASK_PORT(1);
317 else
318 offset = CORE_CFP_DATA_PORT(1);
319 core_writel(priv, reg, offset);
321 /* UDF_n_A1 [31:24]
322 * UDF_n_A0 [23:8]
323 * Reserved [7:4]
324 * Slice ID [3:2]
325 * Slice valid [1:0]
327 reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
328 (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
329 SLICE_NUM(slice_num) | SLICE_VALID;
330 if (mask)
331 offset = CORE_CFP_MASK_PORT(0);
332 else
333 offset = CORE_CFP_DATA_PORT(0);
334 core_writel(priv, reg, offset);
337 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
338 unsigned int port_num,
339 unsigned int queue_num,
340 struct ethtool_rx_flow_spec *fs)
342 struct ethtool_rx_flow_spec_input input = {};
343 const struct cfp_udf_layout *layout;
344 unsigned int slice_num, rule_index;
345 struct ethtool_rx_flow_rule *flow;
346 struct flow_match_ipv4_addrs ipv4;
347 struct flow_match_ports ports;
348 struct flow_match_ip ip;
349 u8 ip_proto, ip_frag;
350 u8 num_udf;
351 u32 reg;
352 int ret;
354 switch (fs->flow_type & ~FLOW_EXT) {
355 case TCP_V4_FLOW:
356 ip_proto = IPPROTO_TCP;
357 break;
358 case UDP_V4_FLOW:
359 ip_proto = IPPROTO_UDP;
360 break;
361 default:
362 return -EINVAL;
365 ip_frag = be32_to_cpu(fs->m_ext.data[0]);
367 /* Locate the first rule available */
368 if (fs->location == RX_CLS_LOC_ANY)
369 rule_index = find_first_zero_bit(priv->cfp.used,
370 priv->num_cfp_rules);
371 else
372 rule_index = fs->location;
374 if (rule_index > bcm_sf2_cfp_rule_size(priv))
375 return -ENOSPC;
377 input.fs = fs;
378 flow = ethtool_rx_flow_rule_create(&input);
379 if (IS_ERR(flow))
380 return PTR_ERR(flow);
382 flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
383 flow_rule_match_ports(flow->rule, &ports);
384 flow_rule_match_ip(flow->rule, &ip);
386 layout = &udf_tcpip4_layout;
387 /* We only use one UDF slice for now */
388 slice_num = bcm_sf2_get_slice_number(layout, 0);
389 if (slice_num == UDF_NUM_SLICES) {
390 ret = -EINVAL;
391 goto out_err_flow_rule;
394 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
396 /* Apply the UDF layout for this filter */
397 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
399 /* Apply to all packets received through this port */
400 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
402 /* Source port map match */
403 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
405 /* S-Tag status [31:30]
406 * C-Tag status [29:28]
407 * L2 framing [27:26]
408 * L3 framing [25:24]
409 * IP ToS [23:16]
410 * IP proto [15:08]
411 * IP Fragm [7]
412 * Non 1st frag [6]
413 * IP Authen [5]
414 * TTL range [4:3]
415 * PPPoE session [2]
416 * Reserved [1]
417 * UDF_Valid[8] [0]
419 core_writel(priv, ip.key->tos << IPTOS_SHIFT |
420 ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
421 udf_upper_bits(num_udf),
422 CORE_CFP_DATA_PORT(6));
424 /* Mask with the specific layout for IPv4 packets */
425 core_writel(priv, layout->udfs[slice_num].mask_value |
426 udf_upper_bits(num_udf), CORE_CFP_MASK_PORT(6));
428 /* UDF_Valid[7:0] [31:24]
429 * S-Tag [23:8]
430 * C-Tag [7:0]
432 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
434 /* Mask all but valid UDFs */
435 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
437 /* Program the match and the mask */
438 bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
439 bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
441 /* Insert into TCAM now */
442 bcm_sf2_cfp_rule_addr_set(priv, rule_index);
444 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
445 if (ret) {
446 pr_err("TCAM entry at addr %d failed\n", rule_index);
447 goto out_err_flow_rule;
450 /* Insert into Action and policer RAMs now */
451 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port, port_num,
452 queue_num, true);
453 if (ret)
454 goto out_err_flow_rule;
456 /* Turn on CFP for this rule now */
457 reg = core_readl(priv, CORE_CFP_CTL_REG);
458 reg |= BIT(port);
459 core_writel(priv, reg, CORE_CFP_CTL_REG);
461 /* Flag the rule as being used and return it */
462 set_bit(rule_index, priv->cfp.used);
463 set_bit(rule_index, priv->cfp.unique);
464 fs->location = rule_index;
466 return 0;
468 out_err_flow_rule:
469 ethtool_rx_flow_rule_destroy(flow);
470 return ret;
473 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
474 const __be32 *ip6_addr, const __be16 port,
475 unsigned int slice_num,
476 bool mask)
478 u32 reg, tmp, val, offset;
480 /* C-Tag [31:24]
481 * UDF_n_B8 [23:8] (port)
482 * UDF_n_B7 (upper) [7:0] (addr[15:8])
484 reg = be32_to_cpu(ip6_addr[3]);
485 val = (u32)be16_to_cpu(port) << 8 | ((reg >> 8) & 0xff);
486 if (mask)
487 offset = CORE_CFP_MASK_PORT(4);
488 else
489 offset = CORE_CFP_DATA_PORT(4);
490 core_writel(priv, val, offset);
492 /* UDF_n_B7 (lower) [31:24] (addr[7:0])
493 * UDF_n_B6 [23:8] (addr[31:16])
494 * UDF_n_B5 (upper) [7:0] (addr[47:40])
496 tmp = be32_to_cpu(ip6_addr[2]);
497 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
498 ((tmp >> 8) & 0xff);
499 if (mask)
500 offset = CORE_CFP_MASK_PORT(3);
501 else
502 offset = CORE_CFP_DATA_PORT(3);
503 core_writel(priv, val, offset);
505 /* UDF_n_B5 (lower) [31:24] (addr[39:32])
506 * UDF_n_B4 [23:8] (addr[63:48])
507 * UDF_n_B3 (upper) [7:0] (addr[79:72])
509 reg = be32_to_cpu(ip6_addr[1]);
510 val = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
511 ((reg >> 8) & 0xff);
512 if (mask)
513 offset = CORE_CFP_MASK_PORT(2);
514 else
515 offset = CORE_CFP_DATA_PORT(2);
516 core_writel(priv, val, offset);
518 /* UDF_n_B3 (lower) [31:24] (addr[71:64])
519 * UDF_n_B2 [23:8] (addr[95:80])
520 * UDF_n_B1 (upper) [7:0] (addr[111:104])
522 tmp = be32_to_cpu(ip6_addr[0]);
523 val = (u32)(reg & 0xff) << 24 | (u32)(reg >> 16) << 8 |
524 ((tmp >> 8) & 0xff);
525 if (mask)
526 offset = CORE_CFP_MASK_PORT(1);
527 else
528 offset = CORE_CFP_DATA_PORT(1);
529 core_writel(priv, val, offset);
531 /* UDF_n_B1 (lower) [31:24] (addr[103:96])
532 * UDF_n_B0 [23:8] (addr[127:112])
533 * Reserved [7:4]
534 * Slice ID [3:2]
535 * Slice valid [1:0]
537 reg = (u32)(tmp & 0xff) << 24 | (u32)(tmp >> 16) << 8 |
538 SLICE_NUM(slice_num) | SLICE_VALID;
539 if (mask)
540 offset = CORE_CFP_MASK_PORT(0);
541 else
542 offset = CORE_CFP_DATA_PORT(0);
543 core_writel(priv, reg, offset);
546 static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
547 int port, u32 location)
549 struct cfp_rule *rule = NULL;
551 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
552 if (rule->port == port && rule->fs.location == location)
553 break;
556 return rule;
559 static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
560 struct ethtool_rx_flow_spec *fs)
562 struct cfp_rule *rule = NULL;
563 size_t fs_size = 0;
564 int ret = 1;
566 if (list_empty(&priv->cfp.rules_list))
567 return ret;
569 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
570 ret = 1;
571 if (rule->port != port)
572 continue;
574 if (rule->fs.flow_type != fs->flow_type ||
575 rule->fs.ring_cookie != fs->ring_cookie ||
576 rule->fs.m_ext.data[0] != fs->m_ext.data[0])
577 continue;
579 switch (fs->flow_type & ~FLOW_EXT) {
580 case TCP_V6_FLOW:
581 case UDP_V6_FLOW:
582 fs_size = sizeof(struct ethtool_tcpip6_spec);
583 break;
584 case TCP_V4_FLOW:
585 case UDP_V4_FLOW:
586 fs_size = sizeof(struct ethtool_tcpip4_spec);
587 break;
588 default:
589 continue;
592 ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
593 ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
594 if (ret == 0)
595 break;
598 return ret;
601 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
602 unsigned int port_num,
603 unsigned int queue_num,
604 struct ethtool_rx_flow_spec *fs)
606 struct ethtool_rx_flow_spec_input input = {};
607 unsigned int slice_num, rule_index[2];
608 const struct cfp_udf_layout *layout;
609 struct ethtool_rx_flow_rule *flow;
610 struct flow_match_ipv6_addrs ipv6;
611 struct flow_match_ports ports;
612 u8 ip_proto, ip_frag;
613 int ret = 0;
614 u8 num_udf;
615 u32 reg;
617 switch (fs->flow_type & ~FLOW_EXT) {
618 case TCP_V6_FLOW:
619 ip_proto = IPPROTO_TCP;
620 break;
621 case UDP_V6_FLOW:
622 ip_proto = IPPROTO_UDP;
623 break;
624 default:
625 return -EINVAL;
628 ip_frag = be32_to_cpu(fs->m_ext.data[0]);
630 layout = &udf_tcpip6_layout;
631 slice_num = bcm_sf2_get_slice_number(layout, 0);
632 if (slice_num == UDF_NUM_SLICES)
633 return -EINVAL;
635 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
637 /* Negotiate two indexes, one for the second half which we are chained
638 * from, which is what we will return to user-space, and a second one
639 * which is used to store its first half. That first half does not
640 * allow any choice of placement, so it just needs to find the next
641 * available bit. We return the second half as fs->location because
642 * that helps with the rule lookup later on since the second half is
643 * chained from its first half, we can easily identify IPv6 CFP rules
644 * by looking whether they carry a CHAIN_ID.
646 * We also want the second half to have a lower rule_index than its
647 * first half because the HW search is by incrementing addresses.
649 if (fs->location == RX_CLS_LOC_ANY)
650 rule_index[1] = find_first_zero_bit(priv->cfp.used,
651 priv->num_cfp_rules);
652 else
653 rule_index[1] = fs->location;
654 if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
655 return -ENOSPC;
657 /* Flag it as used (cleared on error path) such that we can immediately
658 * obtain a second one to chain from.
660 set_bit(rule_index[1], priv->cfp.used);
662 rule_index[0] = find_first_zero_bit(priv->cfp.used,
663 priv->num_cfp_rules);
664 if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
665 ret = -ENOSPC;
666 goto out_err;
669 input.fs = fs;
670 flow = ethtool_rx_flow_rule_create(&input);
671 if (IS_ERR(flow)) {
672 ret = PTR_ERR(flow);
673 goto out_err;
675 flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
676 flow_rule_match_ports(flow->rule, &ports);
678 /* Apply the UDF layout for this filter */
679 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
681 /* Apply to all packets received through this port */
682 core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
684 /* Source port map match */
685 core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
687 /* S-Tag status [31:30]
688 * C-Tag status [29:28]
689 * L2 framing [27:26]
690 * L3 framing [25:24]
691 * IP ToS [23:16]
692 * IP proto [15:08]
693 * IP Fragm [7]
694 * Non 1st frag [6]
695 * IP Authen [5]
696 * TTL range [4:3]
697 * PPPoE session [2]
698 * Reserved [1]
699 * UDF_Valid[8] [0]
701 reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
702 ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
703 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
705 /* Mask with the specific layout for IPv6 packets including
706 * UDF_Valid[8]
708 reg = layout->udfs[slice_num].mask_value | udf_upper_bits(num_udf);
709 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
711 /* UDF_Valid[7:0] [31:24]
712 * S-Tag [23:8]
713 * C-Tag [7:0]
715 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_DATA_PORT(5));
717 /* Mask all but valid UDFs */
718 core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
720 /* Slice the IPv6 source address and port */
721 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
722 ports.key->src, slice_num, false);
723 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
724 ports.mask->src, SLICE_NUM_MASK, true);
726 /* Insert into TCAM now because we need to insert a second rule */
727 bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
729 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
730 if (ret) {
731 pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
732 goto out_err_flow_rule;
735 /* Insert into Action and policer RAMs now */
736 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port, port_num,
737 queue_num, false);
738 if (ret)
739 goto out_err_flow_rule;
741 /* Now deal with the second slice to chain this rule */
742 slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
743 if (slice_num == UDF_NUM_SLICES) {
744 ret = -EINVAL;
745 goto out_err_flow_rule;
748 num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
750 /* Apply the UDF layout for this filter */
751 bcm_sf2_cfp_udf_set(priv, layout, slice_num);
753 /* Chained rule, source port match is coming from the rule we are
754 * chained from.
756 core_writel(priv, 0, CORE_CFP_DATA_PORT(7));
757 core_writel(priv, 0, CORE_CFP_MASK_PORT(7));
760 * CHAIN ID [31:24] chain to previous slice
761 * Reserved [23:20]
762 * UDF_Valid[11:8] [19:16]
763 * UDF_Valid[7:0] [15:8]
764 * UDF_n_D11 [7:0]
766 reg = rule_index[0] << 24 | udf_upper_bits(num_udf) << 16 |
767 udf_lower_bits(num_udf) << 8;
768 core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
770 /* Mask all except chain ID, UDF Valid[8] and UDF Valid[7:0] */
771 reg = XCESS_ADDR_MASK << 24 | udf_upper_bits(num_udf) << 16 |
772 udf_lower_bits(num_udf) << 8;
773 core_writel(priv, reg, CORE_CFP_MASK_PORT(6));
775 /* Don't care */
776 core_writel(priv, 0, CORE_CFP_DATA_PORT(5));
778 /* Mask all */
779 core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
781 bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
782 ports.key->dst, slice_num, false);
783 bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
784 ports.key->dst, SLICE_NUM_MASK, true);
786 /* Insert into TCAM now */
787 bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
789 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
790 if (ret) {
791 pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
792 goto out_err_flow_rule;
795 /* Insert into Action and policer RAMs now, set chain ID to
796 * the one we are chained to
798 ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port, port_num,
799 queue_num, true);
800 if (ret)
801 goto out_err_flow_rule;
803 /* Turn on CFP for this rule now */
804 reg = core_readl(priv, CORE_CFP_CTL_REG);
805 reg |= BIT(port);
806 core_writel(priv, reg, CORE_CFP_CTL_REG);
808 /* Flag the second half rule as being used now, return it as the
809 * location, and flag it as unique while dumping rules
811 set_bit(rule_index[0], priv->cfp.used);
812 set_bit(rule_index[1], priv->cfp.unique);
813 fs->location = rule_index[1];
815 return ret;
817 out_err_flow_rule:
818 ethtool_rx_flow_rule_destroy(flow);
819 out_err:
820 clear_bit(rule_index[1], priv->cfp.used);
821 return ret;
824 static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
825 struct ethtool_rx_flow_spec *fs)
827 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
828 s8 cpu_port = ds->ports[port].cpu_dp->index;
829 __u64 ring_cookie = fs->ring_cookie;
830 unsigned int queue_num, port_num;
831 int ret;
833 /* This rule is a Wake-on-LAN filter and we must specifically
834 * target the CPU port in order for it to be working.
836 if (ring_cookie == RX_CLS_FLOW_WAKE)
837 ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
839 /* We do not support discarding packets, check that the
840 * destination port is enabled and that we are within the
841 * number of ports supported by the switch
843 port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
845 if (ring_cookie == RX_CLS_FLOW_DISC ||
846 !(dsa_is_user_port(ds, port_num) ||
847 dsa_is_cpu_port(ds, port_num)) ||
848 port_num >= priv->hw_params.num_ports)
849 return -EINVAL;
851 * We have a small oddity where Port 6 just does not have a
852 * valid bit here (so we substract by one).
854 queue_num = ring_cookie % SF2_NUM_EGRESS_QUEUES;
855 if (port_num >= 7)
856 port_num -= 1;
858 switch (fs->flow_type & ~FLOW_EXT) {
859 case TCP_V4_FLOW:
860 case UDP_V4_FLOW:
861 ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
862 queue_num, fs);
863 break;
864 case TCP_V6_FLOW:
865 case UDP_V6_FLOW:
866 ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
867 queue_num, fs);
868 break;
869 default:
870 ret = -EINVAL;
871 break;
874 return ret;
877 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
878 struct ethtool_rx_flow_spec *fs)
880 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
881 struct cfp_rule *rule = NULL;
882 int ret = -EINVAL;
884 /* Check for unsupported extensions */
885 if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
886 fs->m_ext.data[1]))
887 return -EINVAL;
889 if (fs->location != RX_CLS_LOC_ANY &&
890 test_bit(fs->location, priv->cfp.used))
891 return -EBUSY;
893 if (fs->location != RX_CLS_LOC_ANY &&
894 fs->location > bcm_sf2_cfp_rule_size(priv))
895 return -EINVAL;
897 ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
898 if (ret == 0)
899 return -EEXIST;
901 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
902 if (!rule)
903 return -ENOMEM;
905 ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
906 if (ret) {
907 kfree(rule);
908 return ret;
911 rule->port = port;
912 memcpy(&rule->fs, fs, sizeof(*fs));
913 list_add_tail(&rule->next, &priv->cfp.rules_list);
915 return ret;
918 static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
919 u32 loc, u32 *next_loc)
921 int ret;
922 u32 reg;
924 /* Indicate which rule we want to read */
925 bcm_sf2_cfp_rule_addr_set(priv, loc);
927 ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
928 if (ret)
929 return ret;
931 /* Check if this is possibly an IPv6 rule that would
932 * indicate we need to delete its companion rule
933 * as well
935 reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
936 if (next_loc)
937 *next_loc = (reg >> 24) & CHAIN_ID_MASK;
939 /* Clear its valid bits */
940 reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
941 reg &= ~SLICE_VALID;
942 core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
944 /* Write back this entry into the TCAM now */
945 ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
946 if (ret)
947 return ret;
949 clear_bit(loc, priv->cfp.used);
950 clear_bit(loc, priv->cfp.unique);
952 return 0;
955 static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
956 u32 loc)
958 u32 next_loc = 0;
959 int ret;
961 ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
962 if (ret)
963 return ret;
965 /* If this was an IPv6 rule, delete is companion rule too */
966 if (next_loc)
967 ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
969 return ret;
972 static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
974 struct cfp_rule *rule;
975 int ret;
977 /* Refuse deleting unused rules, and those that are not unique since
978 * that could leave IPv6 rules with one of the chained rule in the
979 * table.
981 if (!test_bit(loc, priv->cfp.unique) || loc == 0)
982 return -EINVAL;
984 rule = bcm_sf2_cfp_rule_find(priv, port, loc);
985 if (!rule)
986 return -EINVAL;
988 ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
990 list_del(&rule->next);
991 kfree(rule);
993 return ret;
996 static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
998 unsigned int i;
1000 for (i = 0; i < sizeof(flow->m_u); i++)
1001 flow->m_u.hdata[i] ^= 0xff;
1003 flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
1004 flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
1005 flow->m_ext.data[0] ^= cpu_to_be32(~0);
1006 flow->m_ext.data[1] ^= cpu_to_be32(~0);
1009 static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1010 struct ethtool_rxnfc *nfc)
1012 struct cfp_rule *rule;
1014 rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
1015 if (!rule)
1016 return -EINVAL;
1018 memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
1020 bcm_sf2_invert_masks(&nfc->fs);
1022 /* Put the TCAM size here */
1023 nfc->data = bcm_sf2_cfp_rule_size(priv);
1025 return 0;
1028 /* We implement the search doing a TCAM search operation */
1029 static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
1030 int port, struct ethtool_rxnfc *nfc,
1031 u32 *rule_locs)
1033 unsigned int index = 1, rules_cnt = 0;
1035 for_each_set_bit_from(index, priv->cfp.unique, priv->num_cfp_rules) {
1036 rule_locs[rules_cnt] = index;
1037 rules_cnt++;
1040 /* Put the TCAM size here */
1041 nfc->data = bcm_sf2_cfp_rule_size(priv);
1042 nfc->rule_cnt = rules_cnt;
1044 return 0;
1047 int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
1048 struct ethtool_rxnfc *nfc, u32 *rule_locs)
1050 struct net_device *p = ds->ports[port].cpu_dp->master;
1051 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1052 int ret = 0;
1054 mutex_lock(&priv->cfp.lock);
1056 switch (nfc->cmd) {
1057 case ETHTOOL_GRXCLSRLCNT:
1058 /* Subtract the default, unusable rule */
1059 nfc->rule_cnt = bitmap_weight(priv->cfp.unique,
1060 priv->num_cfp_rules) - 1;
1061 /* We support specifying rule locations */
1062 nfc->data |= RX_CLS_LOC_SPECIAL;
1063 break;
1064 case ETHTOOL_GRXCLSRULE:
1065 ret = bcm_sf2_cfp_rule_get(priv, port, nfc);
1066 break;
1067 case ETHTOOL_GRXCLSRLALL:
1068 ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
1069 break;
1070 default:
1071 ret = -EOPNOTSUPP;
1072 break;
1075 mutex_unlock(&priv->cfp.lock);
1077 if (ret)
1078 return ret;
1080 /* Pass up the commands to the attached master network device */
1081 if (p->ethtool_ops->get_rxnfc) {
1082 ret = p->ethtool_ops->get_rxnfc(p, nfc, rule_locs);
1083 if (ret == -EOPNOTSUPP)
1084 ret = 0;
1087 return ret;
1090 int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
1091 struct ethtool_rxnfc *nfc)
1093 struct net_device *p = ds->ports[port].cpu_dp->master;
1094 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1095 int ret = 0;
1097 mutex_lock(&priv->cfp.lock);
1099 switch (nfc->cmd) {
1100 case ETHTOOL_SRXCLSRLINS:
1101 ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
1102 break;
1104 case ETHTOOL_SRXCLSRLDEL:
1105 ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1106 break;
1107 default:
1108 ret = -EOPNOTSUPP;
1109 break;
1112 mutex_unlock(&priv->cfp.lock);
1114 if (ret)
1115 return ret;
1117 /* Pass up the commands to the attached master network device.
1118 * This can fail, so rollback the operation if we need to.
1120 if (p->ethtool_ops->set_rxnfc) {
1121 ret = p->ethtool_ops->set_rxnfc(p, nfc);
1122 if (ret && ret != -EOPNOTSUPP) {
1123 mutex_lock(&priv->cfp.lock);
1124 bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
1125 mutex_unlock(&priv->cfp.lock);
1126 } else {
1127 ret = 0;
1131 return ret;
1134 int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
1136 unsigned int timeout = 1000;
1137 u32 reg;
1139 reg = core_readl(priv, CORE_CFP_ACC);
1140 reg |= TCAM_RESET;
1141 core_writel(priv, reg, CORE_CFP_ACC);
1143 do {
1144 reg = core_readl(priv, CORE_CFP_ACC);
1145 if (!(reg & TCAM_RESET))
1146 break;
1148 cpu_relax();
1149 } while (timeout--);
1151 if (!timeout)
1152 return -ETIMEDOUT;
1154 return 0;
1157 void bcm_sf2_cfp_exit(struct dsa_switch *ds)
1159 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1160 struct cfp_rule *rule, *n;
1162 if (list_empty(&priv->cfp.rules_list))
1163 return;
1165 list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
1166 bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
1169 int bcm_sf2_cfp_resume(struct dsa_switch *ds)
1171 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1172 struct cfp_rule *rule;
1173 int ret = 0;
1174 u32 reg;
1176 if (list_empty(&priv->cfp.rules_list))
1177 return ret;
1179 reg = core_readl(priv, CORE_CFP_CTL_REG);
1180 reg &= ~CFP_EN_MAP_MASK;
1181 core_writel(priv, reg, CORE_CFP_CTL_REG);
1183 ret = bcm_sf2_cfp_rst(priv);
1184 if (ret)
1185 return ret;
1187 list_for_each_entry(rule, &priv->cfp.rules_list, next) {
1188 ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
1189 rule->fs.location);
1190 if (ret) {
1191 dev_err(ds->dev, "failed to remove rule\n");
1192 return ret;
1195 ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
1196 if (ret) {
1197 dev_err(ds->dev, "failed to restore rule\n");
1198 return ret;
1202 return ret;
1205 static const struct bcm_sf2_cfp_stat {
1206 unsigned int offset;
1207 unsigned int ram_loc;
1208 const char *name;
1209 } bcm_sf2_cfp_stats[] = {
1211 .offset = CORE_STAT_GREEN_CNTR,
1212 .ram_loc = GREEN_STAT_RAM,
1213 .name = "Green"
1216 .offset = CORE_STAT_YELLOW_CNTR,
1217 .ram_loc = YELLOW_STAT_RAM,
1218 .name = "Yellow"
1221 .offset = CORE_STAT_RED_CNTR,
1222 .ram_loc = RED_STAT_RAM,
1223 .name = "Red"
1227 void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
1228 u32 stringset, uint8_t *data)
1230 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1231 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1232 char buf[ETH_GSTRING_LEN];
1233 unsigned int i, j, iter;
1235 if (stringset != ETH_SS_STATS)
1236 return;
1238 for (i = 1; i < priv->num_cfp_rules; i++) {
1239 for (j = 0; j < s; j++) {
1240 snprintf(buf, sizeof(buf),
1241 "CFP%03d_%sCntr",
1242 i, bcm_sf2_cfp_stats[j].name);
1243 iter = (i - 1) * s + j;
1244 strlcpy(data + iter * ETH_GSTRING_LEN,
1245 buf, ETH_GSTRING_LEN);
1250 void bcm_sf2_cfp_get_ethtool_stats(struct dsa_switch *ds, int port,
1251 uint64_t *data)
1253 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1254 unsigned int s = ARRAY_SIZE(bcm_sf2_cfp_stats);
1255 const struct bcm_sf2_cfp_stat *stat;
1256 unsigned int i, j, iter;
1257 struct cfp_rule *rule;
1258 int ret;
1260 mutex_lock(&priv->cfp.lock);
1261 for (i = 1; i < priv->num_cfp_rules; i++) {
1262 rule = bcm_sf2_cfp_rule_find(priv, port, i);
1263 if (!rule)
1264 continue;
1266 for (j = 0; j < s; j++) {
1267 stat = &bcm_sf2_cfp_stats[j];
1269 bcm_sf2_cfp_rule_addr_set(priv, i);
1270 ret = bcm_sf2_cfp_op(priv, stat->ram_loc | OP_SEL_READ);
1271 if (ret)
1272 continue;
1274 iter = (i - 1) * s + j;
1275 data[iter] = core_readl(priv, stat->offset);
1279 mutex_unlock(&priv->cfp.lock);
1282 int bcm_sf2_cfp_get_sset_count(struct dsa_switch *ds, int port, int sset)
1284 struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
1286 if (sset != ETH_SS_STATS)
1287 return 0;
1289 /* 3 counters per CFP rules */
1290 return (priv->num_cfp_rules - 1) * ARRAY_SIZE(bcm_sf2_cfp_stats);