treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / marvell / mvpp2 / mvpp2_prs.c
blob5692c6087bbb0781ef473ea5dfe8f6148c4ae4f7
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Header Parser helpers for Marvell PPv2 Network Controller
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
8 */
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <uapi/linux/ppp_defs.h>
15 #include <net/ip.h>
16 #include <net/ipv6.h>
18 #include "mvpp2.h"
19 #include "mvpp2_prs.h"
21 /* Update parser tcam and sram hw entries */
22 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
24 int i;
26 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
27 return -EINVAL;
29 /* Clear entry invalidation bit */
30 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
32 /* Write tcam index - indirect access */
33 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
34 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
35 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
37 /* Write sram index - indirect access */
38 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
39 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
40 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
42 return 0;
45 /* Initialize tcam entry from hw */
46 int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe,
47 int tid)
49 int i;
51 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
52 return -EINVAL;
54 memset(pe, 0, sizeof(*pe));
55 pe->index = tid;
57 /* Write tcam index - indirect access */
58 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
60 pe->tcam[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
61 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
62 if (pe->tcam[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
63 return MVPP2_PRS_TCAM_ENTRY_INVALID;
65 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
66 pe->tcam[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
68 /* Write sram index - indirect access */
69 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
70 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
71 pe->sram[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
73 return 0;
76 /* Invalidate tcam hw entry */
77 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
79 /* Write index - indirect access */
80 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
81 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
82 MVPP2_PRS_TCAM_INV_MASK);
85 /* Enable shadow table entry and set its lookup ID */
86 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
88 priv->prs_shadow[index].valid = true;
89 priv->prs_shadow[index].lu = lu;
92 /* Update ri fields in shadow table entry */
93 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
94 unsigned int ri, unsigned int ri_mask)
96 priv->prs_shadow[index].ri_mask = ri_mask;
97 priv->prs_shadow[index].ri = ri;
100 /* Update lookup field in tcam sw entry */
101 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
103 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK);
104 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
105 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU(lu & MVPP2_PRS_LU_MASK);
106 pe->tcam[MVPP2_PRS_TCAM_LU_WORD] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK);
109 /* Update mask for single port in tcam sw entry */
110 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
111 unsigned int port, bool add)
113 if (add)
114 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port));
115 else
116 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port));
119 /* Update port map in tcam sw entry */
120 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
121 unsigned int ports)
123 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK);
124 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK);
125 pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] |= MVPP2_PRS_TCAM_PORT_EN(~ports & MVPP2_PRS_PORT_MASK);
128 /* Obtain port map from tcam sw entry */
129 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
131 return (~pe->tcam[MVPP2_PRS_TCAM_PORT_WORD] >> 24) & MVPP2_PRS_PORT_MASK;
134 /* Set byte of data and its enable bits in tcam sw entry */
135 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
136 unsigned int offs, unsigned char byte,
137 unsigned char enable)
139 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
141 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(0xff << pos);
142 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos);
143 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= byte << pos;
144 pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] |= MVPP2_PRS_TCAM_EN(enable << pos);
147 /* Get byte of data and its enable bits from tcam sw entry */
148 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
149 unsigned int offs, unsigned char *byte,
150 unsigned char *enable)
152 int pos = MVPP2_PRS_BYTE_IN_WORD(offs) * BITS_PER_BYTE;
154 *byte = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> pos) & 0xff;
155 *enable = (pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] >> (pos + 16)) & 0xff;
158 /* Compare tcam data bytes with a pattern */
159 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
160 u16 data)
162 u16 tcam_data;
164 tcam_data = pe->tcam[MVPP2_PRS_BYTE_TO_WORD(offs)] & 0xffff;
165 return tcam_data == data;
168 /* Update ai bits in tcam sw entry */
169 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
170 unsigned int bits, unsigned int enable)
172 int i;
174 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
175 if (!(enable & BIT(i)))
176 continue;
178 if (bits & BIT(i))
179 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= BIT(i);
180 else
181 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] &= ~BIT(i);
184 pe->tcam[MVPP2_PRS_TCAM_AI_WORD] |= MVPP2_PRS_TCAM_AI_EN(enable);
187 /* Get ai bits from tcam sw entry */
188 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
190 return pe->tcam[MVPP2_PRS_TCAM_AI_WORD] & MVPP2_PRS_AI_MASK;
193 /* Set ethertype in tcam sw entry */
194 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
195 unsigned short ethertype)
197 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
198 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
201 /* Set vid in tcam sw entry */
202 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
203 unsigned short vid)
205 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
206 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
209 /* Set bits in sram sw entry */
210 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
211 u32 val)
213 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] |= (val << (MVPP2_BIT_IN_WORD(bit_num)));
216 /* Clear bits in sram sw entry */
217 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
218 u32 val)
220 pe->sram[MVPP2_BIT_TO_WORD(bit_num)] &= ~(val << (MVPP2_BIT_IN_WORD(bit_num)));
223 /* Update ri bits in sram sw entry */
224 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
225 unsigned int bits, unsigned int mask)
227 unsigned int i;
229 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
230 if (!(mask & BIT(i)))
231 continue;
233 if (bits & BIT(i))
234 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_OFFS + i,
236 else
237 mvpp2_prs_sram_bits_clear(pe,
238 MVPP2_PRS_SRAM_RI_OFFS + i,
241 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
245 /* Obtain ri bits from sram sw entry */
246 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
248 return pe->sram[MVPP2_PRS_SRAM_RI_WORD];
251 /* Update ai bits in sram sw entry */
252 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
253 unsigned int bits, unsigned int mask)
255 unsigned int i;
257 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
258 if (!(mask & BIT(i)))
259 continue;
261 if (bits & BIT(i))
262 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_OFFS + i,
264 else
265 mvpp2_prs_sram_bits_clear(pe,
266 MVPP2_PRS_SRAM_AI_OFFS + i,
269 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
273 /* Read ai bits from sram sw entry */
274 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
276 u8 bits;
277 /* ai is stored on bits 90->97; so it spreads across two u32 */
278 int ai_off = MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS);
279 int ai_shift = MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS);
281 bits = (pe->sram[ai_off] >> ai_shift) |
282 (pe->sram[ai_off + 1] << (32 - ai_shift));
284 return bits;
287 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
288 * lookup interation
290 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
291 unsigned int lu)
293 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
295 mvpp2_prs_sram_bits_clear(pe, sram_next_off,
296 MVPP2_PRS_SRAM_NEXT_LU_MASK);
297 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
300 /* In the sram sw entry set sign and value of the next lookup offset
301 * and the offset value generated to the classifier
303 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
304 unsigned int op)
306 /* Set sign */
307 if (shift < 0) {
308 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
309 shift = 0 - shift;
310 } else {
311 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
314 /* Set value */
315 pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
316 shift & MVPP2_PRS_SRAM_SHIFT_MASK;
318 /* Reset and set operation */
319 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
320 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
321 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
323 /* Set base offset as current */
324 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
327 /* In the sram sw entry set sign and value of the user defined offset
328 * generated to the classifier
330 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
331 unsigned int type, int offset,
332 unsigned int op)
334 /* Set sign */
335 if (offset < 0) {
336 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
337 offset = 0 - offset;
338 } else {
339 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
342 /* Set value */
343 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
344 MVPP2_PRS_SRAM_UDF_MASK);
345 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS,
346 offset & MVPP2_PRS_SRAM_UDF_MASK);
348 /* Set offset type */
349 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
350 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
351 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
353 /* Set offset operation */
354 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
355 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
356 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
357 op & MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
359 /* Set base offset as current */
360 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
363 /* Find parser flow entry */
364 static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
366 struct mvpp2_prs_entry pe;
367 int tid;
369 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
370 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
371 u8 bits;
373 if (!priv->prs_shadow[tid].valid ||
374 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
375 continue;
377 mvpp2_prs_init_from_hw(priv, &pe, tid);
378 bits = mvpp2_prs_sram_ai_get(&pe);
380 /* Sram store classification lookup ID in AI bits [5:0] */
381 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
382 return tid;
385 return -ENOENT;
388 /* Return first free tcam index, seeking from start to end */
389 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
390 unsigned char end)
392 int tid;
394 if (start > end)
395 swap(start, end);
397 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
398 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
400 for (tid = start; tid <= end; tid++) {
401 if (!priv->prs_shadow[tid].valid)
402 return tid;
405 return -EINVAL;
408 /* Enable/disable dropping all mac da's */
409 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
411 struct mvpp2_prs_entry pe;
413 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
414 /* Entry exist - update port only */
415 mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL);
416 } else {
417 /* Entry doesn't exist - create new */
418 memset(&pe, 0, sizeof(pe));
419 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
420 pe.index = MVPP2_PE_DROP_ALL;
422 /* Non-promiscuous mode for all ports - DROP unknown packets */
423 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
424 MVPP2_PRS_RI_DROP_MASK);
426 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
427 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
429 /* Update shadow table */
430 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
432 /* Mask all ports */
433 mvpp2_prs_tcam_port_map_set(&pe, 0);
436 /* Update port mask */
437 mvpp2_prs_tcam_port_set(&pe, port, add);
439 mvpp2_prs_hw_write(priv, &pe);
442 /* Set port to unicast or multicast promiscuous mode */
443 void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port,
444 enum mvpp2_prs_l2_cast l2_cast, bool add)
446 struct mvpp2_prs_entry pe;
447 unsigned char cast_match;
448 unsigned int ri;
449 int tid;
451 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
452 cast_match = MVPP2_PRS_UCAST_VAL;
453 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
454 ri = MVPP2_PRS_RI_L2_UCAST;
455 } else {
456 cast_match = MVPP2_PRS_MCAST_VAL;
457 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
458 ri = MVPP2_PRS_RI_L2_MCAST;
461 /* promiscuous mode - Accept unknown unicast or multicast packets */
462 if (priv->prs_shadow[tid].valid) {
463 mvpp2_prs_init_from_hw(priv, &pe, tid);
464 } else {
465 memset(&pe, 0, sizeof(pe));
466 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
467 pe.index = tid;
469 /* Continue - set next lookup */
470 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
472 /* Set result info bits */
473 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
475 /* Match UC or MC addresses */
476 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
477 MVPP2_PRS_CAST_MASK);
479 /* Shift to ethertype */
480 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
481 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
483 /* Mask all ports */
484 mvpp2_prs_tcam_port_map_set(&pe, 0);
486 /* Update shadow table */
487 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
490 /* Update port mask */
491 mvpp2_prs_tcam_port_set(&pe, port, add);
493 mvpp2_prs_hw_write(priv, &pe);
496 /* Set entry for dsa packets */
497 static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
498 bool tagged, bool extend)
500 struct mvpp2_prs_entry pe;
501 int tid, shift;
503 if (extend) {
504 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
505 shift = 8;
506 } else {
507 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
508 shift = 4;
511 if (priv->prs_shadow[tid].valid) {
512 /* Entry exist - update port only */
513 mvpp2_prs_init_from_hw(priv, &pe, tid);
514 } else {
515 /* Entry doesn't exist - create new */
516 memset(&pe, 0, sizeof(pe));
517 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
518 pe.index = tid;
520 /* Update shadow table */
521 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
523 if (tagged) {
524 /* Set tagged bit in DSA tag */
525 mvpp2_prs_tcam_data_byte_set(&pe, 0,
526 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
527 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
529 /* Set ai bits for next iteration */
530 if (extend)
531 mvpp2_prs_sram_ai_update(&pe, 1,
532 MVPP2_PRS_SRAM_AI_MASK);
533 else
534 mvpp2_prs_sram_ai_update(&pe, 0,
535 MVPP2_PRS_SRAM_AI_MASK);
537 /* Set result info bits to 'single vlan' */
538 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
539 MVPP2_PRS_RI_VLAN_MASK);
540 /* If packet is tagged continue check vid filtering */
541 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
542 } else {
543 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
544 mvpp2_prs_sram_shift_set(&pe, shift,
545 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
547 /* Set result info bits to 'no vlans' */
548 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
549 MVPP2_PRS_RI_VLAN_MASK);
550 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
553 /* Mask all ports */
554 mvpp2_prs_tcam_port_map_set(&pe, 0);
557 /* Update port mask */
558 mvpp2_prs_tcam_port_set(&pe, port, add);
560 mvpp2_prs_hw_write(priv, &pe);
563 /* Set entry for dsa ethertype */
564 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
565 bool add, bool tagged, bool extend)
567 struct mvpp2_prs_entry pe;
568 int tid, shift, port_mask;
570 if (extend) {
571 tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
572 MVPP2_PE_ETYPE_EDSA_UNTAGGED;
573 port_mask = 0;
574 shift = 8;
575 } else {
576 tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
577 MVPP2_PE_ETYPE_DSA_UNTAGGED;
578 port_mask = MVPP2_PRS_PORT_MASK;
579 shift = 4;
582 if (priv->prs_shadow[tid].valid) {
583 /* Entry exist - update port only */
584 mvpp2_prs_init_from_hw(priv, &pe, tid);
585 } else {
586 /* Entry doesn't exist - create new */
587 memset(&pe, 0, sizeof(pe));
588 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
589 pe.index = tid;
591 /* Set ethertype */
592 mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
593 mvpp2_prs_match_etype(&pe, 2, 0);
595 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
596 MVPP2_PRS_RI_DSA_MASK);
597 /* Shift ethertype + 2 byte reserved + tag*/
598 mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
599 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
601 /* Update shadow table */
602 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
604 if (tagged) {
605 /* Set tagged bit in DSA tag */
606 mvpp2_prs_tcam_data_byte_set(&pe,
607 MVPP2_ETH_TYPE_LEN + 2 + 3,
608 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
609 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
610 /* Clear all ai bits for next iteration */
611 mvpp2_prs_sram_ai_update(&pe, 0,
612 MVPP2_PRS_SRAM_AI_MASK);
613 /* If packet is tagged continue check vlans */
614 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
615 } else {
616 /* Set result info bits to 'no vlans' */
617 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
618 MVPP2_PRS_RI_VLAN_MASK);
619 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
621 /* Mask/unmask all ports, depending on dsa type */
622 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
625 /* Update port mask */
626 mvpp2_prs_tcam_port_set(&pe, port, add);
628 mvpp2_prs_hw_write(priv, &pe);
631 /* Search for existing single/triple vlan entry */
632 static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai)
634 struct mvpp2_prs_entry pe;
635 int tid;
637 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
638 for (tid = MVPP2_PE_FIRST_FREE_TID;
639 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
640 unsigned int ri_bits, ai_bits;
641 bool match;
643 if (!priv->prs_shadow[tid].valid ||
644 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
645 continue;
647 mvpp2_prs_init_from_hw(priv, &pe, tid);
648 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid);
649 if (!match)
650 continue;
652 /* Get vlan type */
653 ri_bits = mvpp2_prs_sram_ri_get(&pe);
654 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
656 /* Get current ai value from tcam */
657 ai_bits = mvpp2_prs_tcam_ai_get(&pe);
658 /* Clear double vlan bit */
659 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
661 if (ai != ai_bits)
662 continue;
664 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
665 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
666 return tid;
669 return -ENOENT;
672 /* Add/update single/triple vlan entry */
673 static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
674 unsigned int port_map)
676 struct mvpp2_prs_entry pe;
677 int tid_aux, tid;
678 int ret = 0;
680 memset(&pe, 0, sizeof(pe));
682 tid = mvpp2_prs_vlan_find(priv, tpid, ai);
684 if (tid < 0) {
685 /* Create new tcam entry */
686 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
687 MVPP2_PE_FIRST_FREE_TID);
688 if (tid < 0)
689 return tid;
691 /* Get last double vlan tid */
692 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
693 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
694 unsigned int ri_bits;
696 if (!priv->prs_shadow[tid_aux].valid ||
697 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
698 continue;
700 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
701 ri_bits = mvpp2_prs_sram_ri_get(&pe);
702 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
703 MVPP2_PRS_RI_VLAN_DOUBLE)
704 break;
707 if (tid <= tid_aux)
708 return -EINVAL;
710 memset(&pe, 0, sizeof(pe));
711 pe.index = tid;
712 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
714 mvpp2_prs_match_etype(&pe, 0, tpid);
716 /* VLAN tag detected, proceed with VID filtering */
717 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
719 /* Clear all ai bits for next iteration */
720 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
722 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
723 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_SINGLE,
724 MVPP2_PRS_RI_VLAN_MASK);
725 } else {
726 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
727 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_TRIPLE,
728 MVPP2_PRS_RI_VLAN_MASK);
730 mvpp2_prs_tcam_ai_update(&pe, ai, MVPP2_PRS_SRAM_AI_MASK);
732 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
733 } else {
734 mvpp2_prs_init_from_hw(priv, &pe, tid);
736 /* Update ports' mask */
737 mvpp2_prs_tcam_port_map_set(&pe, port_map);
739 mvpp2_prs_hw_write(priv, &pe);
741 return ret;
744 /* Get first free double vlan ai number */
745 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
747 int i;
749 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
750 if (!priv->prs_double_vlans[i])
751 return i;
754 return -EINVAL;
757 /* Search for existing double vlan entry */
758 static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1,
759 unsigned short tpid2)
761 struct mvpp2_prs_entry pe;
762 int tid;
764 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
765 for (tid = MVPP2_PE_FIRST_FREE_TID;
766 tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
767 unsigned int ri_mask;
768 bool match;
770 if (!priv->prs_shadow[tid].valid ||
771 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
772 continue;
774 mvpp2_prs_init_from_hw(priv, &pe, tid);
776 match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) &&
777 mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2);
779 if (!match)
780 continue;
782 ri_mask = mvpp2_prs_sram_ri_get(&pe) & MVPP2_PRS_RI_VLAN_MASK;
783 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
784 return tid;
787 return -ENOENT;
790 /* Add or update double vlan entry */
791 static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
792 unsigned short tpid2,
793 unsigned int port_map)
795 int tid_aux, tid, ai, ret = 0;
796 struct mvpp2_prs_entry pe;
798 memset(&pe, 0, sizeof(pe));
800 tid = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
802 if (tid < 0) {
803 /* Create new tcam entry */
804 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
805 MVPP2_PE_LAST_FREE_TID);
806 if (tid < 0)
807 return tid;
809 /* Set ai value for new double vlan entry */
810 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
811 if (ai < 0)
812 return ai;
814 /* Get first single/triple vlan tid */
815 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
816 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
817 unsigned int ri_bits;
819 if (!priv->prs_shadow[tid_aux].valid ||
820 priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
821 continue;
823 mvpp2_prs_init_from_hw(priv, &pe, tid_aux);
824 ri_bits = mvpp2_prs_sram_ri_get(&pe);
825 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
826 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
827 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
828 break;
831 if (tid >= tid_aux)
832 return -ERANGE;
834 memset(&pe, 0, sizeof(pe));
835 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
836 pe.index = tid;
838 priv->prs_double_vlans[ai] = true;
840 mvpp2_prs_match_etype(&pe, 0, tpid1);
841 mvpp2_prs_match_etype(&pe, 4, tpid2);
843 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
844 /* Shift 4 bytes - skip outer vlan tag */
845 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
846 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
847 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
848 MVPP2_PRS_RI_VLAN_MASK);
849 mvpp2_prs_sram_ai_update(&pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
850 MVPP2_PRS_SRAM_AI_MASK);
852 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
853 } else {
854 mvpp2_prs_init_from_hw(priv, &pe, tid);
857 /* Update ports' mask */
858 mvpp2_prs_tcam_port_map_set(&pe, port_map);
859 mvpp2_prs_hw_write(priv, &pe);
861 return ret;
864 /* IPv4 header parsing for fragmentation and L4 offset */
865 static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
866 unsigned int ri, unsigned int ri_mask)
868 struct mvpp2_prs_entry pe;
869 int tid;
871 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
872 (proto != IPPROTO_IGMP))
873 return -EINVAL;
875 /* Not fragmented packet */
876 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
877 MVPP2_PE_LAST_FREE_TID);
878 if (tid < 0)
879 return tid;
881 memset(&pe, 0, sizeof(pe));
882 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
883 pe.index = tid;
885 /* Set next lu to IPv4 */
886 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
887 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
888 /* Set L4 offset */
889 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
890 sizeof(struct iphdr) - 4,
891 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
892 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
893 MVPP2_PRS_IPV4_DIP_AI_BIT);
894 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
896 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
897 MVPP2_PRS_TCAM_PROTO_MASK_L);
898 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
899 MVPP2_PRS_TCAM_PROTO_MASK);
901 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
902 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
903 /* Unmask all ports */
904 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
906 /* Update shadow table and hw entry */
907 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
908 mvpp2_prs_hw_write(priv, &pe);
910 /* Fragmented packet */
911 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
912 MVPP2_PE_LAST_FREE_TID);
913 if (tid < 0)
914 return tid;
916 pe.index = tid;
917 /* Clear ri before updating */
918 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
919 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
920 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
922 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
923 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
925 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
926 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
928 /* Update shadow table and hw entry */
929 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
930 mvpp2_prs_hw_write(priv, &pe);
932 return 0;
935 /* IPv4 L3 multicast or broadcast */
936 static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
938 struct mvpp2_prs_entry pe;
939 int mask, tid;
941 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
942 MVPP2_PE_LAST_FREE_TID);
943 if (tid < 0)
944 return tid;
946 memset(&pe, 0, sizeof(pe));
947 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
948 pe.index = tid;
950 switch (l3_cast) {
951 case MVPP2_PRS_L3_MULTI_CAST:
952 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
953 MVPP2_PRS_IPV4_MC_MASK);
954 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
955 MVPP2_PRS_RI_L3_ADDR_MASK);
956 break;
957 case MVPP2_PRS_L3_BROAD_CAST:
958 mask = MVPP2_PRS_IPV4_BC_MASK;
959 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
960 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
961 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
962 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
963 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
964 MVPP2_PRS_RI_L3_ADDR_MASK);
965 break;
966 default:
967 return -EINVAL;
970 /* Finished: go to flowid generation */
971 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
972 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
974 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
975 MVPP2_PRS_IPV4_DIP_AI_BIT);
976 /* Unmask all ports */
977 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
979 /* Update shadow table and hw entry */
980 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
981 mvpp2_prs_hw_write(priv, &pe);
983 return 0;
986 /* Set entries for protocols over IPv6 */
987 static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
988 unsigned int ri, unsigned int ri_mask)
990 struct mvpp2_prs_entry pe;
991 int tid;
993 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
994 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
995 return -EINVAL;
997 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
998 MVPP2_PE_LAST_FREE_TID);
999 if (tid < 0)
1000 return tid;
1002 memset(&pe, 0, sizeof(pe));
1003 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1004 pe.index = tid;
1006 /* Finished: go to flowid generation */
1007 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1008 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1009 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
1010 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1011 sizeof(struct ipv6hdr) - 6,
1012 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1014 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
1015 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1016 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1017 /* Unmask all ports */
1018 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1020 /* Write HW */
1021 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1022 mvpp2_prs_hw_write(priv, &pe);
1024 return 0;
1027 /* IPv6 L3 multicast entry */
1028 static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
1030 struct mvpp2_prs_entry pe;
1031 int tid;
1033 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
1034 return -EINVAL;
1036 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1037 MVPP2_PE_LAST_FREE_TID);
1038 if (tid < 0)
1039 return tid;
1041 memset(&pe, 0, sizeof(pe));
1042 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1043 pe.index = tid;
1045 /* Finished: go to flowid generation */
1046 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1047 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
1048 MVPP2_PRS_RI_L3_ADDR_MASK);
1049 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1050 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1051 /* Shift back to IPv6 NH */
1052 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1054 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
1055 MVPP2_PRS_IPV6_MC_MASK);
1056 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1057 /* Unmask all ports */
1058 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1060 /* Update shadow table and hw entry */
1061 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1062 mvpp2_prs_hw_write(priv, &pe);
1064 return 0;
1067 /* Parser per-port initialization */
1068 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1069 int lu_max, int offset)
1071 u32 val;
1073 /* Set lookup ID */
1074 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1075 val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1076 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1077 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1079 /* Set maximum number of loops for packet received from port */
1080 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1081 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1082 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1083 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1085 /* Set initial offset for packet header extraction for the first
1086 * searching loop
1088 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1089 val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1090 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1091 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1094 /* Default flow entries initialization for all ports */
1095 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1097 struct mvpp2_prs_entry pe;
1098 int port;
1100 for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1101 memset(&pe, 0, sizeof(pe));
1102 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1103 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1105 /* Mask all ports */
1106 mvpp2_prs_tcam_port_map_set(&pe, 0);
1108 /* Set flow ID*/
1109 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1110 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1112 /* Update shadow table and hw entry */
1113 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1114 mvpp2_prs_hw_write(priv, &pe);
1118 /* Set default entry for Marvell Header field */
1119 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1121 struct mvpp2_prs_entry pe;
1123 memset(&pe, 0, sizeof(pe));
1125 pe.index = MVPP2_PE_MH_DEFAULT;
1126 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1127 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1128 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1129 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1131 /* Unmask all ports */
1132 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1134 /* Update shadow table and hw entry */
1135 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1136 mvpp2_prs_hw_write(priv, &pe);
1139 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1140 * multicast MAC addresses
1142 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1144 struct mvpp2_prs_entry pe;
1146 memset(&pe, 0, sizeof(pe));
1148 /* Non-promiscuous mode for all ports - DROP unknown packets */
1149 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1150 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1152 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1153 MVPP2_PRS_RI_DROP_MASK);
1154 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1155 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1157 /* Unmask all ports */
1158 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1160 /* Update shadow table and hw entry */
1161 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1162 mvpp2_prs_hw_write(priv, &pe);
1164 /* Create dummy entries for drop all and promiscuous modes */
1165 mvpp2_prs_mac_drop_all_set(priv, 0, false);
1166 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
1167 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
1170 /* Set default entries for various types of dsa packets */
1171 static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
1173 struct mvpp2_prs_entry pe;
1175 /* None tagged EDSA entry - place holder */
1176 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1177 MVPP2_PRS_EDSA);
1179 /* Tagged EDSA entry - place holder */
1180 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1182 /* None tagged DSA entry - place holder */
1183 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
1184 MVPP2_PRS_DSA);
1186 /* Tagged DSA entry - place holder */
1187 mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1189 /* None tagged EDSA ethertype entry - place holder*/
1190 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1191 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
1193 /* Tagged EDSA ethertype entry - place holder*/
1194 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
1195 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
1197 /* None tagged DSA ethertype entry */
1198 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1199 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
1201 /* Tagged DSA ethertype entry */
1202 mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
1203 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
1205 /* Set default entry, in case DSA or EDSA tag not found */
1206 memset(&pe, 0, sizeof(pe));
1207 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
1208 pe.index = MVPP2_PE_DSA_DEFAULT;
1209 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1211 /* Shift 0 bytes */
1212 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1213 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1215 /* Clear all sram ai bits for next iteration */
1216 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1218 /* Unmask all ports */
1219 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1221 mvpp2_prs_hw_write(priv, &pe);
1224 /* Initialize parser entries for VID filtering */
1225 static void mvpp2_prs_vid_init(struct mvpp2 *priv)
1227 struct mvpp2_prs_entry pe;
1229 memset(&pe, 0, sizeof(pe));
1231 /* Set default vid entry */
1232 pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
1233 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1235 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
1237 /* Skip VLAN header - Set offset to 4 bytes */
1238 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
1239 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1241 /* Clear all ai bits for next iteration */
1242 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1244 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1246 /* Unmask all ports */
1247 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1249 /* Update shadow table and hw entry */
1250 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1251 mvpp2_prs_hw_write(priv, &pe);
1253 /* Set default vid entry for extended DSA*/
1254 memset(&pe, 0, sizeof(pe));
1256 /* Set default vid entry */
1257 pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
1258 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1260 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
1261 MVPP2_PRS_EDSA_VID_AI_BIT);
1263 /* Skip VLAN header - Set offset to 8 bytes */
1264 mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
1265 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1267 /* Clear all ai bits for next iteration */
1268 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1270 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1272 /* Unmask all ports */
1273 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1275 /* Update shadow table and hw entry */
1276 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1277 mvpp2_prs_hw_write(priv, &pe);
1280 /* Match basic ethertypes */
1281 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1283 struct mvpp2_prs_entry pe;
1284 int tid;
1286 /* Ethertype: PPPoE */
1287 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1288 MVPP2_PE_LAST_FREE_TID);
1289 if (tid < 0)
1290 return tid;
1292 memset(&pe, 0, sizeof(pe));
1293 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1294 pe.index = tid;
1296 mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
1298 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
1299 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1300 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1301 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
1302 MVPP2_PRS_RI_PPPOE_MASK);
1304 /* Update shadow table and hw entry */
1305 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1306 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1307 priv->prs_shadow[pe.index].finish = false;
1308 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
1309 MVPP2_PRS_RI_PPPOE_MASK);
1310 mvpp2_prs_hw_write(priv, &pe);
1312 /* Ethertype: ARP */
1313 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1314 MVPP2_PE_LAST_FREE_TID);
1315 if (tid < 0)
1316 return tid;
1318 memset(&pe, 0, sizeof(pe));
1319 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1320 pe.index = tid;
1322 mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
1324 /* Generate flow in the next iteration*/
1325 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1326 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1327 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
1328 MVPP2_PRS_RI_L3_PROTO_MASK);
1329 /* Set L3 offset */
1330 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1331 MVPP2_ETH_TYPE_LEN,
1332 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1334 /* Update shadow table and hw entry */
1335 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1336 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1337 priv->prs_shadow[pe.index].finish = true;
1338 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
1339 MVPP2_PRS_RI_L3_PROTO_MASK);
1340 mvpp2_prs_hw_write(priv, &pe);
1342 /* Ethertype: LBTD */
1343 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1344 MVPP2_PE_LAST_FREE_TID);
1345 if (tid < 0)
1346 return tid;
1348 memset(&pe, 0, sizeof(pe));
1349 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1350 pe.index = tid;
1352 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
1354 /* Generate flow in the next iteration*/
1355 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1356 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1357 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1358 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1359 MVPP2_PRS_RI_CPU_CODE_MASK |
1360 MVPP2_PRS_RI_UDF3_MASK);
1361 /* Set L3 offset */
1362 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1363 MVPP2_ETH_TYPE_LEN,
1364 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1366 /* Update shadow table and hw entry */
1367 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1368 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1369 priv->prs_shadow[pe.index].finish = true;
1370 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1371 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1372 MVPP2_PRS_RI_CPU_CODE_MASK |
1373 MVPP2_PRS_RI_UDF3_MASK);
1374 mvpp2_prs_hw_write(priv, &pe);
1376 /* Ethertype: IPv4 without options */
1377 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1378 MVPP2_PE_LAST_FREE_TID);
1379 if (tid < 0)
1380 return tid;
1382 memset(&pe, 0, sizeof(pe));
1383 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1384 pe.index = tid;
1386 mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
1387 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1388 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1389 MVPP2_PRS_IPV4_HEAD_MASK |
1390 MVPP2_PRS_IPV4_IHL_MASK);
1392 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1393 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1394 MVPP2_PRS_RI_L3_PROTO_MASK);
1395 /* Skip eth_type + 4 bytes of IP header */
1396 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1397 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1398 /* Set L3 offset */
1399 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1400 MVPP2_ETH_TYPE_LEN,
1401 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1403 /* Update shadow table and hw entry */
1404 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1405 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1406 priv->prs_shadow[pe.index].finish = false;
1407 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
1408 MVPP2_PRS_RI_L3_PROTO_MASK);
1409 mvpp2_prs_hw_write(priv, &pe);
1411 /* Ethertype: IPv4 with options */
1412 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1413 MVPP2_PE_LAST_FREE_TID);
1414 if (tid < 0)
1415 return tid;
1417 pe.index = tid;
1419 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1420 MVPP2_PRS_IPV4_HEAD,
1421 MVPP2_PRS_IPV4_HEAD_MASK);
1423 /* Clear ri before updating */
1424 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1425 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1426 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1427 MVPP2_PRS_RI_L3_PROTO_MASK);
1429 /* Update shadow table and hw entry */
1430 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1431 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1432 priv->prs_shadow[pe.index].finish = false;
1433 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
1434 MVPP2_PRS_RI_L3_PROTO_MASK);
1435 mvpp2_prs_hw_write(priv, &pe);
1437 /* Ethertype: IPv6 without options */
1438 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1439 MVPP2_PE_LAST_FREE_TID);
1440 if (tid < 0)
1441 return tid;
1443 memset(&pe, 0, sizeof(pe));
1444 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1445 pe.index = tid;
1447 mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
1449 /* Skip DIP of IPV6 header */
1450 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
1451 MVPP2_MAX_L3_ADDR_SIZE,
1452 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1453 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1454 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1455 MVPP2_PRS_RI_L3_PROTO_MASK);
1456 /* Set L3 offset */
1457 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1458 MVPP2_ETH_TYPE_LEN,
1459 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1461 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1462 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1463 priv->prs_shadow[pe.index].finish = false;
1464 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
1465 MVPP2_PRS_RI_L3_PROTO_MASK);
1466 mvpp2_prs_hw_write(priv, &pe);
1468 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1469 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1470 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1471 pe.index = MVPP2_PE_ETH_TYPE_UN;
1473 /* Unmask all ports */
1474 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1476 /* Generate flow in the next iteration*/
1477 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1478 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1479 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1480 MVPP2_PRS_RI_L3_PROTO_MASK);
1481 /* Set L3 offset even it's unknown L3 */
1482 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1483 MVPP2_ETH_TYPE_LEN,
1484 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1486 /* Update shadow table and hw entry */
1487 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
1488 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1489 priv->prs_shadow[pe.index].finish = true;
1490 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
1491 MVPP2_PRS_RI_L3_PROTO_MASK);
1492 mvpp2_prs_hw_write(priv, &pe);
1494 return 0;
1497 /* Configure vlan entries and detect up to 2 successive VLAN tags.
1498 * Possible options:
1499 * 0x8100, 0x88A8
1500 * 0x8100, 0x8100
1501 * 0x8100
1502 * 0x88A8
1504 static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
1506 struct mvpp2_prs_entry pe;
1507 int err;
1509 priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
1510 MVPP2_PRS_DBL_VLANS_MAX,
1511 GFP_KERNEL);
1512 if (!priv->prs_double_vlans)
1513 return -ENOMEM;
1515 /* Double VLAN: 0x8100, 0x88A8 */
1516 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
1517 MVPP2_PRS_PORT_MASK);
1518 if (err)
1519 return err;
1521 /* Double VLAN: 0x8100, 0x8100 */
1522 err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
1523 MVPP2_PRS_PORT_MASK);
1524 if (err)
1525 return err;
1527 /* Single VLAN: 0x88a8 */
1528 err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
1529 MVPP2_PRS_PORT_MASK);
1530 if (err)
1531 return err;
1533 /* Single VLAN: 0x8100 */
1534 err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
1535 MVPP2_PRS_PORT_MASK);
1536 if (err)
1537 return err;
1539 /* Set default double vlan entry */
1540 memset(&pe, 0, sizeof(pe));
1541 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1542 pe.index = MVPP2_PE_VLAN_DBL;
1544 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
1546 /* Clear ai for next iterations */
1547 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1548 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1549 MVPP2_PRS_RI_VLAN_MASK);
1551 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1552 MVPP2_PRS_DBL_VLAN_AI_BIT);
1553 /* Unmask all ports */
1554 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1556 /* Update shadow table and hw entry */
1557 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1558 mvpp2_prs_hw_write(priv, &pe);
1560 /* Set default vlan none entry */
1561 memset(&pe, 0, sizeof(pe));
1562 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1563 pe.index = MVPP2_PE_VLAN_NONE;
1565 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1566 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1567 MVPP2_PRS_RI_VLAN_MASK);
1569 /* Unmask all ports */
1570 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1572 /* Update shadow table and hw entry */
1573 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
1574 mvpp2_prs_hw_write(priv, &pe);
1576 return 0;
1579 /* Set entries for PPPoE ethertype */
1580 static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
1582 struct mvpp2_prs_entry pe;
1583 int tid;
1585 /* IPv4 over PPPoE with options */
1586 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1587 MVPP2_PE_LAST_FREE_TID);
1588 if (tid < 0)
1589 return tid;
1591 memset(&pe, 0, sizeof(pe));
1592 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1593 pe.index = tid;
1595 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1597 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1598 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1599 MVPP2_PRS_RI_L3_PROTO_MASK);
1600 /* Skip eth_type + 4 bytes of IP header */
1601 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1602 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1603 /* Set L3 offset */
1604 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1605 MVPP2_ETH_TYPE_LEN,
1606 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1608 /* Update shadow table and hw entry */
1609 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1610 mvpp2_prs_hw_write(priv, &pe);
1612 /* IPv4 over PPPoE without options */
1613 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1614 MVPP2_PE_LAST_FREE_TID);
1615 if (tid < 0)
1616 return tid;
1618 pe.index = tid;
1620 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1621 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1622 MVPP2_PRS_IPV4_HEAD_MASK |
1623 MVPP2_PRS_IPV4_IHL_MASK);
1625 /* Clear ri before updating */
1626 pe.sram[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1627 pe.sram[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1628 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
1629 MVPP2_PRS_RI_L3_PROTO_MASK);
1631 /* Update shadow table and hw entry */
1632 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1633 mvpp2_prs_hw_write(priv, &pe);
1635 /* IPv6 over PPPoE */
1636 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1637 MVPP2_PE_LAST_FREE_TID);
1638 if (tid < 0)
1639 return tid;
1641 memset(&pe, 0, sizeof(pe));
1642 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1643 pe.index = tid;
1645 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1647 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1648 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1649 MVPP2_PRS_RI_L3_PROTO_MASK);
1650 /* Skip eth_type + 4 bytes of IPv6 header */
1651 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1652 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1653 /* Set L3 offset */
1654 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1655 MVPP2_ETH_TYPE_LEN,
1656 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1658 /* Update shadow table and hw entry */
1659 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1660 mvpp2_prs_hw_write(priv, &pe);
1662 /* Non-IP over PPPoE */
1663 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1664 MVPP2_PE_LAST_FREE_TID);
1665 if (tid < 0)
1666 return tid;
1668 memset(&pe, 0, sizeof(pe));
1669 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1670 pe.index = tid;
1672 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1673 MVPP2_PRS_RI_L3_PROTO_MASK);
1675 /* Finished: go to flowid generation */
1676 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1677 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1678 /* Set L3 offset even if it's unknown L3 */
1679 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1680 MVPP2_ETH_TYPE_LEN,
1681 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1683 /* Update shadow table and hw entry */
1684 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
1685 mvpp2_prs_hw_write(priv, &pe);
1687 return 0;
1690 /* Initialize entries for IPv4 */
1691 static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
1693 struct mvpp2_prs_entry pe;
1694 int err;
1696 /* Set entries for TCP, UDP and IGMP over IPv4 */
1697 err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1698 MVPP2_PRS_RI_L4_PROTO_MASK);
1699 if (err)
1700 return err;
1702 err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1703 MVPP2_PRS_RI_L4_PROTO_MASK);
1704 if (err)
1705 return err;
1707 err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
1708 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1709 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1710 MVPP2_PRS_RI_CPU_CODE_MASK |
1711 MVPP2_PRS_RI_UDF3_MASK);
1712 if (err)
1713 return err;
1715 /* IPv4 Broadcast */
1716 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
1717 if (err)
1718 return err;
1720 /* IPv4 Multicast */
1721 err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1722 if (err)
1723 return err;
1725 /* Default IPv4 entry for unknown protocols */
1726 memset(&pe, 0, sizeof(pe));
1727 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1728 pe.index = MVPP2_PE_IP4_PROTO_UN;
1730 /* Set next lu to IPv4 */
1731 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1732 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1733 /* Set L4 offset */
1734 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1735 sizeof(struct iphdr) - 4,
1736 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1737 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1738 MVPP2_PRS_IPV4_DIP_AI_BIT);
1739 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1740 MVPP2_PRS_RI_L4_PROTO_MASK);
1742 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1743 /* Unmask all ports */
1744 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1746 /* Update shadow table and hw entry */
1747 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1748 mvpp2_prs_hw_write(priv, &pe);
1750 /* Default IPv4 entry for unicast address */
1751 memset(&pe, 0, sizeof(pe));
1752 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1753 pe.index = MVPP2_PE_IP4_ADDR_UN;
1755 /* Finished: go to flowid generation */
1756 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1757 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1758 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1759 MVPP2_PRS_RI_L3_ADDR_MASK);
1761 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1762 MVPP2_PRS_IPV4_DIP_AI_BIT);
1763 /* Unmask all ports */
1764 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1766 /* Update shadow table and hw entry */
1767 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1768 mvpp2_prs_hw_write(priv, &pe);
1770 return 0;
1773 /* Initialize entries for IPv6 */
1774 static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
1776 struct mvpp2_prs_entry pe;
1777 int tid, err;
1779 /* Set entries for TCP, UDP and ICMP over IPv6 */
1780 err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
1781 MVPP2_PRS_RI_L4_TCP,
1782 MVPP2_PRS_RI_L4_PROTO_MASK);
1783 if (err)
1784 return err;
1786 err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
1787 MVPP2_PRS_RI_L4_UDP,
1788 MVPP2_PRS_RI_L4_PROTO_MASK);
1789 if (err)
1790 return err;
1792 err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
1793 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
1794 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1795 MVPP2_PRS_RI_CPU_CODE_MASK |
1796 MVPP2_PRS_RI_UDF3_MASK);
1797 if (err)
1798 return err;
1800 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1801 /* Result Info: UDF7=1, DS lite */
1802 err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
1803 MVPP2_PRS_RI_UDF7_IP6_LITE,
1804 MVPP2_PRS_RI_UDF7_MASK);
1805 if (err)
1806 return err;
1808 /* IPv6 multicast */
1809 err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
1810 if (err)
1811 return err;
1813 /* Entry for checking hop limit */
1814 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1815 MVPP2_PE_LAST_FREE_TID);
1816 if (tid < 0)
1817 return tid;
1819 memset(&pe, 0, sizeof(pe));
1820 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1821 pe.index = tid;
1823 /* Finished: go to flowid generation */
1824 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1825 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1826 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
1827 MVPP2_PRS_RI_DROP_MASK,
1828 MVPP2_PRS_RI_L3_PROTO_MASK |
1829 MVPP2_PRS_RI_DROP_MASK);
1831 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1832 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1833 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1835 /* Update shadow table and hw entry */
1836 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1837 mvpp2_prs_hw_write(priv, &pe);
1839 /* Default IPv6 entry for unknown protocols */
1840 memset(&pe, 0, sizeof(pe));
1841 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1842 pe.index = MVPP2_PE_IP6_PROTO_UN;
1844 /* Finished: go to flowid generation */
1845 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1846 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1847 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1848 MVPP2_PRS_RI_L4_PROTO_MASK);
1849 /* Set L4 offset relatively to our current place */
1850 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1851 sizeof(struct ipv6hdr) - 4,
1852 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1854 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1855 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1856 /* Unmask all ports */
1857 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1859 /* Update shadow table and hw entry */
1860 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1861 mvpp2_prs_hw_write(priv, &pe);
1863 /* Default IPv6 entry for unknown ext protocols */
1864 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1865 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1866 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1868 /* Finished: go to flowid generation */
1869 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1870 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1871 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1872 MVPP2_PRS_RI_L4_PROTO_MASK);
1874 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1875 MVPP2_PRS_IPV6_EXT_AI_BIT);
1876 /* Unmask all ports */
1877 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1879 /* Update shadow table and hw entry */
1880 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
1881 mvpp2_prs_hw_write(priv, &pe);
1883 /* Default IPv6 entry for unicast address */
1884 memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1885 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1886 pe.index = MVPP2_PE_IP6_ADDR_UN;
1888 /* Finished: go to IPv6 again */
1889 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1890 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1891 MVPP2_PRS_RI_L3_ADDR_MASK);
1892 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1893 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1894 /* Shift back to IPV6 NH */
1895 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1897 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1898 /* Unmask all ports */
1899 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1901 /* Update shadow table and hw entry */
1902 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
1903 mvpp2_prs_hw_write(priv, &pe);
1905 return 0;
1908 /* Find tcam entry with matched pair <vid,port> */
1909 static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
1911 unsigned char byte[2], enable[2];
1912 struct mvpp2_prs_entry pe;
1913 u16 rvid, rmask;
1914 int tid;
1916 /* Go through the all entries with MVPP2_PRS_LU_VID */
1917 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
1918 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
1919 if (!port->priv->prs_shadow[tid].valid ||
1920 port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
1921 continue;
1923 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
1925 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
1926 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
1928 rvid = ((byte[0] & 0xf) << 8) + byte[1];
1929 rmask = ((enable[0] & 0xf) << 8) + enable[1];
1931 if (rvid != vid || rmask != mask)
1932 continue;
1934 return tid;
1937 return -ENOENT;
1940 /* Write parser entry for VID filtering */
1941 int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
1943 unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
1944 port->id * MVPP2_PRS_VLAN_FILT_MAX;
1945 unsigned int mask = 0xfff, reg_val, shift;
1946 struct mvpp2 *priv = port->priv;
1947 struct mvpp2_prs_entry pe;
1948 int tid;
1950 memset(&pe, 0, sizeof(pe));
1952 /* Scan TCAM and see if entry with this <vid,port> already exist */
1953 tid = mvpp2_prs_vid_range_find(port, vid, mask);
1955 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
1956 if (reg_val & MVPP2_DSA_EXTENDED)
1957 shift = MVPP2_VLAN_TAG_EDSA_LEN;
1958 else
1959 shift = MVPP2_VLAN_TAG_LEN;
1961 /* No such entry */
1962 if (tid < 0) {
1964 /* Go through all entries from first to last in vlan range */
1965 tid = mvpp2_prs_tcam_first_free(priv, vid_start,
1966 vid_start +
1967 MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
1969 /* There isn't room for a new VID filter */
1970 if (tid < 0)
1971 return tid;
1973 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
1974 pe.index = tid;
1976 /* Mask all ports */
1977 mvpp2_prs_tcam_port_map_set(&pe, 0);
1978 } else {
1979 mvpp2_prs_init_from_hw(priv, &pe, tid);
1982 /* Enable the current port */
1983 mvpp2_prs_tcam_port_set(&pe, port->id, true);
1985 /* Continue - set next lookup */
1986 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1988 /* Skip VLAN header - Set offset to 4 or 8 bytes */
1989 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1991 /* Set match on VID */
1992 mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
1994 /* Clear all ai bits for next iteration */
1995 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1997 /* Update shadow table */
1998 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
1999 mvpp2_prs_hw_write(priv, &pe);
2001 return 0;
2004 /* Write parser entry for VID filtering */
2005 void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
2007 struct mvpp2 *priv = port->priv;
2008 int tid;
2010 /* Scan TCAM and see if entry with this <vid,port> already exist */
2011 tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
2013 /* No such entry */
2014 if (tid < 0)
2015 return;
2017 mvpp2_prs_hw_inv(priv, tid);
2018 priv->prs_shadow[tid].valid = false;
2021 /* Remove all existing VID filters on this port */
2022 void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
2024 struct mvpp2 *priv = port->priv;
2025 int tid;
2027 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
2028 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
2029 if (priv->prs_shadow[tid].valid) {
2030 mvpp2_prs_hw_inv(priv, tid);
2031 priv->prs_shadow[tid].valid = false;
2036 /* Remove VID filering entry for this port */
2037 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
2039 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2040 struct mvpp2 *priv = port->priv;
2042 /* Invalidate the guard entry */
2043 mvpp2_prs_hw_inv(priv, tid);
2045 priv->prs_shadow[tid].valid = false;
2048 /* Add guard entry that drops packets when no VID is matched on this port */
2049 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
2051 unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
2052 struct mvpp2 *priv = port->priv;
2053 unsigned int reg_val, shift;
2054 struct mvpp2_prs_entry pe;
2056 if (priv->prs_shadow[tid].valid)
2057 return;
2059 memset(&pe, 0, sizeof(pe));
2061 pe.index = tid;
2063 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
2064 if (reg_val & MVPP2_DSA_EXTENDED)
2065 shift = MVPP2_VLAN_TAG_EDSA_LEN;
2066 else
2067 shift = MVPP2_VLAN_TAG_LEN;
2069 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
2071 /* Mask all ports */
2072 mvpp2_prs_tcam_port_map_set(&pe, 0);
2074 /* Update port mask */
2075 mvpp2_prs_tcam_port_set(&pe, port->id, true);
2077 /* Continue - set next lookup */
2078 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
2080 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2081 mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2083 /* Drop VLAN packets that don't belong to any VIDs on this port */
2084 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
2085 MVPP2_PRS_RI_DROP_MASK);
2087 /* Clear all ai bits for next iteration */
2088 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
2090 /* Update shadow table */
2091 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
2092 mvpp2_prs_hw_write(priv, &pe);
2095 /* Parser default initialization */
2096 int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv)
2098 int err, index, i;
2100 /* Enable tcam table */
2101 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2103 /* Clear all tcam and sram entries */
2104 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2105 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2106 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2107 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2109 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2110 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2111 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2114 /* Invalidate all tcam entries */
2115 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2116 mvpp2_prs_hw_inv(priv, index);
2118 priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2119 sizeof(*priv->prs_shadow),
2120 GFP_KERNEL);
2121 if (!priv->prs_shadow)
2122 return -ENOMEM;
2124 /* Always start from lookup = 0 */
2125 for (index = 0; index < MVPP2_MAX_PORTS; index++)
2126 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2127 MVPP2_PRS_PORT_LU_MAX, 0);
2129 mvpp2_prs_def_flow_init(priv);
2131 mvpp2_prs_mh_init(priv);
2133 mvpp2_prs_mac_init(priv);
2135 mvpp2_prs_dsa_init(priv);
2137 mvpp2_prs_vid_init(priv);
2139 err = mvpp2_prs_etype_init(priv);
2140 if (err)
2141 return err;
2143 err = mvpp2_prs_vlan_init(pdev, priv);
2144 if (err)
2145 return err;
2147 err = mvpp2_prs_pppoe_init(priv);
2148 if (err)
2149 return err;
2151 err = mvpp2_prs_ip6_init(priv);
2152 if (err)
2153 return err;
2155 err = mvpp2_prs_ip4_init(priv);
2156 if (err)
2157 return err;
2159 return 0;
2162 /* Compare MAC DA with tcam entry data */
2163 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2164 const u8 *da, unsigned char *mask)
2166 unsigned char tcam_byte, tcam_mask;
2167 int index;
2169 for (index = 0; index < ETH_ALEN; index++) {
2170 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2171 if (tcam_mask != mask[index])
2172 return false;
2174 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2175 return false;
2178 return true;
2181 /* Find tcam entry with matched pair <MAC DA, port> */
2182 static int
2183 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2184 unsigned char *mask, int udf_type)
2186 struct mvpp2_prs_entry pe;
2187 int tid;
2189 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2190 for (tid = MVPP2_PE_MAC_RANGE_START;
2191 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2192 unsigned int entry_pmap;
2194 if (!priv->prs_shadow[tid].valid ||
2195 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2196 (priv->prs_shadow[tid].udf != udf_type))
2197 continue;
2199 mvpp2_prs_init_from_hw(priv, &pe, tid);
2200 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
2202 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
2203 entry_pmap == pmap)
2204 return tid;
2207 return -ENOENT;
2210 /* Update parser's mac da entry */
2211 int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add)
2213 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2214 struct mvpp2 *priv = port->priv;
2215 unsigned int pmap, len, ri;
2216 struct mvpp2_prs_entry pe;
2217 int tid;
2219 memset(&pe, 0, sizeof(pe));
2221 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2222 tid = mvpp2_prs_mac_da_range_find(priv, BIT(port->id), da, mask,
2223 MVPP2_PRS_UDF_MAC_DEF);
2225 /* No such entry */
2226 if (tid < 0) {
2227 if (!add)
2228 return 0;
2230 /* Create new TCAM entry */
2231 /* Go through the all entries from first to last */
2232 tid = mvpp2_prs_tcam_first_free(priv,
2233 MVPP2_PE_MAC_RANGE_START,
2234 MVPP2_PE_MAC_RANGE_END);
2235 if (tid < 0)
2236 return tid;
2238 pe.index = tid;
2240 /* Mask all ports */
2241 mvpp2_prs_tcam_port_map_set(&pe, 0);
2242 } else {
2243 mvpp2_prs_init_from_hw(priv, &pe, tid);
2246 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
2248 /* Update port mask */
2249 mvpp2_prs_tcam_port_set(&pe, port->id, add);
2251 /* Invalidate the entry if no ports are left enabled */
2252 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2253 if (pmap == 0) {
2254 if (add)
2255 return -EINVAL;
2257 mvpp2_prs_hw_inv(priv, pe.index);
2258 priv->prs_shadow[pe.index].valid = false;
2259 return 0;
2262 /* Continue - set next lookup */
2263 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
2265 /* Set match on DA */
2266 len = ETH_ALEN;
2267 while (len--)
2268 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
2270 /* Set result info bits */
2271 if (is_broadcast_ether_addr(da)) {
2272 ri = MVPP2_PRS_RI_L2_BCAST;
2273 } else if (is_multicast_ether_addr(da)) {
2274 ri = MVPP2_PRS_RI_L2_MCAST;
2275 } else {
2276 ri = MVPP2_PRS_RI_L2_UCAST;
2278 if (ether_addr_equal(da, port->dev->dev_addr))
2279 ri |= MVPP2_PRS_RI_MAC_ME_MASK;
2282 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2283 MVPP2_PRS_RI_MAC_ME_MASK);
2284 mvpp2_prs_shadow_ri_set(priv, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2285 MVPP2_PRS_RI_MAC_ME_MASK);
2287 /* Shift to ethertype */
2288 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
2289 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2291 /* Update shadow table and hw entry */
2292 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
2293 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
2294 mvpp2_prs_hw_write(priv, &pe);
2296 return 0;
2299 int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
2301 struct mvpp2_port *port = netdev_priv(dev);
2302 int err;
2304 /* Remove old parser entry */
2305 err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, false);
2306 if (err)
2307 return err;
2309 /* Add new parser entry */
2310 err = mvpp2_prs_mac_da_accept(port, da, true);
2311 if (err)
2312 return err;
2314 /* Set addr in the device */
2315 ether_addr_copy(dev->dev_addr, da);
2317 return 0;
2320 void mvpp2_prs_mac_del_all(struct mvpp2_port *port)
2322 struct mvpp2 *priv = port->priv;
2323 struct mvpp2_prs_entry pe;
2324 unsigned long pmap;
2325 int index, tid;
2327 for (tid = MVPP2_PE_MAC_RANGE_START;
2328 tid <= MVPP2_PE_MAC_RANGE_END; tid++) {
2329 unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
2331 if (!priv->prs_shadow[tid].valid ||
2332 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2333 (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
2334 continue;
2336 mvpp2_prs_init_from_hw(priv, &pe, tid);
2338 pmap = mvpp2_prs_tcam_port_map_get(&pe);
2340 /* We only want entries active on this port */
2341 if (!test_bit(port->id, &pmap))
2342 continue;
2344 /* Read mac addr from entry */
2345 for (index = 0; index < ETH_ALEN; index++)
2346 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
2347 &da_mask[index]);
2349 /* Special cases : Don't remove broadcast and port's own
2350 * address
2352 if (is_broadcast_ether_addr(da) ||
2353 ether_addr_equal(da, port->dev->dev_addr))
2354 continue;
2356 /* Remove entry from TCAM */
2357 mvpp2_prs_mac_da_accept(port, da, false);
2361 int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
2363 switch (type) {
2364 case MVPP2_TAG_TYPE_EDSA:
2365 /* Add port to EDSA entries */
2366 mvpp2_prs_dsa_tag_set(priv, port, true,
2367 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2368 mvpp2_prs_dsa_tag_set(priv, port, true,
2369 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2370 /* Remove port from DSA entries */
2371 mvpp2_prs_dsa_tag_set(priv, port, false,
2372 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2373 mvpp2_prs_dsa_tag_set(priv, port, false,
2374 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2375 break;
2377 case MVPP2_TAG_TYPE_DSA:
2378 /* Add port to DSA entries */
2379 mvpp2_prs_dsa_tag_set(priv, port, true,
2380 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2381 mvpp2_prs_dsa_tag_set(priv, port, true,
2382 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2383 /* Remove port from EDSA entries */
2384 mvpp2_prs_dsa_tag_set(priv, port, false,
2385 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2386 mvpp2_prs_dsa_tag_set(priv, port, false,
2387 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2388 break;
2390 case MVPP2_TAG_TYPE_MH:
2391 case MVPP2_TAG_TYPE_NONE:
2392 /* Remove port form EDSA and DSA entries */
2393 mvpp2_prs_dsa_tag_set(priv, port, false,
2394 MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
2395 mvpp2_prs_dsa_tag_set(priv, port, false,
2396 MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
2397 mvpp2_prs_dsa_tag_set(priv, port, false,
2398 MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
2399 mvpp2_prs_dsa_tag_set(priv, port, false,
2400 MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
2401 break;
2403 default:
2404 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
2405 return -EINVAL;
2408 return 0;
2411 int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask)
2413 struct mvpp2_prs_entry pe;
2414 u8 *ri_byte, *ri_byte_mask;
2415 int tid, i;
2417 memset(&pe, 0, sizeof(pe));
2419 tid = mvpp2_prs_tcam_first_free(priv,
2420 MVPP2_PE_LAST_FREE_TID,
2421 MVPP2_PE_FIRST_FREE_TID);
2422 if (tid < 0)
2423 return tid;
2425 pe.index = tid;
2427 ri_byte = (u8 *)&ri;
2428 ri_byte_mask = (u8 *)&ri_mask;
2430 mvpp2_prs_sram_ai_update(&pe, flow, MVPP2_PRS_FLOW_ID_MASK);
2431 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2433 for (i = 0; i < 4; i++) {
2434 mvpp2_prs_tcam_data_byte_set(&pe, i, ri_byte[i],
2435 ri_byte_mask[i]);
2438 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
2439 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2440 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2441 mvpp2_prs_hw_write(priv, &pe);
2443 return 0;
2446 /* Set prs flow for the port */
2447 int mvpp2_prs_def_flow(struct mvpp2_port *port)
2449 struct mvpp2_prs_entry pe;
2450 int tid;
2452 memset(&pe, 0, sizeof(pe));
2454 tid = mvpp2_prs_flow_find(port->priv, port->id);
2456 /* Such entry not exist */
2457 if (tid < 0) {
2458 /* Go through the all entires from last to first */
2459 tid = mvpp2_prs_tcam_first_free(port->priv,
2460 MVPP2_PE_LAST_FREE_TID,
2461 MVPP2_PE_FIRST_FREE_TID);
2462 if (tid < 0)
2463 return tid;
2465 pe.index = tid;
2467 /* Set flow ID*/
2468 mvpp2_prs_sram_ai_update(&pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2469 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2471 /* Update shadow table */
2472 mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS);
2473 } else {
2474 mvpp2_prs_init_from_hw(port->priv, &pe, tid);
2477 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2478 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id));
2479 mvpp2_prs_hw_write(port->priv, &pe);
2481 return 0;
2484 int mvpp2_prs_hits(struct mvpp2 *priv, int index)
2486 u32 val;
2488 if (index > MVPP2_PRS_TCAM_SRAM_SIZE)
2489 return -EINVAL;
2491 mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index);
2493 val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG);
2495 val &= MVPP2_PRS_TCAM_HIT_CNT_MASK;
2497 return val;