1 // SPDX-License-Identifier: GPL-2.0
3 * Header Parser helpers for Marvell PPv2 Network Controller
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/platform_device.h>
14 #include <uapi/linux/ppp_defs.h>
19 #include "mvpp2_prs.h"
21 /* Update parser tcam and sram hw entries */
22 static int mvpp2_prs_hw_write(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
)
26 if (pe
->index
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
29 /* Clear entry invalidation bit */
30 pe
->tcam
[MVPP2_PRS_TCAM_INV_WORD
] &= ~MVPP2_PRS_TCAM_INV_MASK
;
32 /* Write tcam index - indirect access */
33 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
34 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
35 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), pe
->tcam
[i
]);
37 /* Write sram index - indirect access */
38 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
39 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
40 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), pe
->sram
[i
]);
45 /* Initialize tcam entry from hw */
46 int mvpp2_prs_init_from_hw(struct mvpp2
*priv
, struct mvpp2_prs_entry
*pe
,
51 if (tid
> MVPP2_PRS_TCAM_SRAM_SIZE
- 1)
54 memset(pe
, 0, sizeof(*pe
));
57 /* Write tcam index - indirect access */
58 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, pe
->index
);
60 pe
->tcam
[MVPP2_PRS_TCAM_INV_WORD
] = mvpp2_read(priv
,
61 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
));
62 if (pe
->tcam
[MVPP2_PRS_TCAM_INV_WORD
] & MVPP2_PRS_TCAM_INV_MASK
)
63 return MVPP2_PRS_TCAM_ENTRY_INVALID
;
65 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
66 pe
->tcam
[i
] = mvpp2_read(priv
, MVPP2_PRS_TCAM_DATA_REG(i
));
68 /* Write sram index - indirect access */
69 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, pe
->index
);
70 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
71 pe
->sram
[i
] = mvpp2_read(priv
, MVPP2_PRS_SRAM_DATA_REG(i
));
76 /* Invalidate tcam hw entry */
77 static void mvpp2_prs_hw_inv(struct mvpp2
*priv
, int index
)
79 /* Write index - indirect access */
80 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
81 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD
),
82 MVPP2_PRS_TCAM_INV_MASK
);
85 /* Enable shadow table entry and set its lookup ID */
86 static void mvpp2_prs_shadow_set(struct mvpp2
*priv
, int index
, int lu
)
88 priv
->prs_shadow
[index
].valid
= true;
89 priv
->prs_shadow
[index
].lu
= lu
;
92 /* Update ri fields in shadow table entry */
93 static void mvpp2_prs_shadow_ri_set(struct mvpp2
*priv
, int index
,
94 unsigned int ri
, unsigned int ri_mask
)
96 priv
->prs_shadow
[index
].ri_mask
= ri_mask
;
97 priv
->prs_shadow
[index
].ri
= ri
;
100 /* Update lookup field in tcam sw entry */
101 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry
*pe
, unsigned int lu
)
103 pe
->tcam
[MVPP2_PRS_TCAM_LU_WORD
] &= ~MVPP2_PRS_TCAM_LU(MVPP2_PRS_LU_MASK
);
104 pe
->tcam
[MVPP2_PRS_TCAM_LU_WORD
] &= ~MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK
);
105 pe
->tcam
[MVPP2_PRS_TCAM_LU_WORD
] |= MVPP2_PRS_TCAM_LU(lu
& MVPP2_PRS_LU_MASK
);
106 pe
->tcam
[MVPP2_PRS_TCAM_LU_WORD
] |= MVPP2_PRS_TCAM_LU_EN(MVPP2_PRS_LU_MASK
);
109 /* Update mask for single port in tcam sw entry */
110 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry
*pe
,
111 unsigned int port
, bool add
)
114 pe
->tcam
[MVPP2_PRS_TCAM_PORT_WORD
] &= ~MVPP2_PRS_TCAM_PORT_EN(BIT(port
));
116 pe
->tcam
[MVPP2_PRS_TCAM_PORT_WORD
] |= MVPP2_PRS_TCAM_PORT_EN(BIT(port
));
119 /* Update port map in tcam sw entry */
120 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry
*pe
,
123 pe
->tcam
[MVPP2_PRS_TCAM_PORT_WORD
] &= ~MVPP2_PRS_TCAM_PORT(MVPP2_PRS_PORT_MASK
);
124 pe
->tcam
[MVPP2_PRS_TCAM_PORT_WORD
] &= ~MVPP2_PRS_TCAM_PORT_EN(MVPP2_PRS_PORT_MASK
);
125 pe
->tcam
[MVPP2_PRS_TCAM_PORT_WORD
] |= MVPP2_PRS_TCAM_PORT_EN(~ports
& MVPP2_PRS_PORT_MASK
);
128 /* Obtain port map from tcam sw entry */
129 unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry
*pe
)
131 return (~pe
->tcam
[MVPP2_PRS_TCAM_PORT_WORD
] >> 24) & MVPP2_PRS_PORT_MASK
;
134 /* Set byte of data and its enable bits in tcam sw entry */
135 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry
*pe
,
136 unsigned int offs
, unsigned char byte
,
137 unsigned char enable
)
139 int pos
= MVPP2_PRS_BYTE_IN_WORD(offs
) * BITS_PER_BYTE
;
141 pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] &= ~(0xff << pos
);
142 pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] &= ~(MVPP2_PRS_TCAM_EN(0xff) << pos
);
143 pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] |= byte
<< pos
;
144 pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] |= MVPP2_PRS_TCAM_EN(enable
<< pos
);
147 /* Get byte of data and its enable bits from tcam sw entry */
148 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry
*pe
,
149 unsigned int offs
, unsigned char *byte
,
150 unsigned char *enable
)
152 int pos
= MVPP2_PRS_BYTE_IN_WORD(offs
) * BITS_PER_BYTE
;
154 *byte
= (pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] >> pos
) & 0xff;
155 *enable
= (pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] >> (pos
+ 16)) & 0xff;
158 /* Compare tcam data bytes with a pattern */
159 static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry
*pe
, int offs
,
164 tcam_data
= pe
->tcam
[MVPP2_PRS_BYTE_TO_WORD(offs
)] & 0xffff;
165 return tcam_data
== data
;
168 /* Update ai bits in tcam sw entry */
169 static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry
*pe
,
170 unsigned int bits
, unsigned int enable
)
174 for (i
= 0; i
< MVPP2_PRS_AI_BITS
; i
++) {
175 if (!(enable
& BIT(i
)))
179 pe
->tcam
[MVPP2_PRS_TCAM_AI_WORD
] |= BIT(i
);
181 pe
->tcam
[MVPP2_PRS_TCAM_AI_WORD
] &= ~BIT(i
);
184 pe
->tcam
[MVPP2_PRS_TCAM_AI_WORD
] |= MVPP2_PRS_TCAM_AI_EN(enable
);
187 /* Get ai bits from tcam sw entry */
188 static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry
*pe
)
190 return pe
->tcam
[MVPP2_PRS_TCAM_AI_WORD
] & MVPP2_PRS_AI_MASK
;
193 /* Set ethertype in tcam sw entry */
194 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry
*pe
, int offset
,
195 unsigned short ethertype
)
197 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, ethertype
>> 8, 0xff);
198 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, ethertype
& 0xff, 0xff);
201 /* Set vid in tcam sw entry */
202 static void mvpp2_prs_match_vid(struct mvpp2_prs_entry
*pe
, int offset
,
205 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 0, (vid
& 0xf00) >> 8, 0xf);
206 mvpp2_prs_tcam_data_byte_set(pe
, offset
+ 1, vid
& 0xff, 0xff);
209 /* Set bits in sram sw entry */
210 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry
*pe
, int bit_num
,
213 pe
->sram
[MVPP2_BIT_TO_WORD(bit_num
)] |= (val
<< (MVPP2_BIT_IN_WORD(bit_num
)));
216 /* Clear bits in sram sw entry */
217 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry
*pe
, int bit_num
,
220 pe
->sram
[MVPP2_BIT_TO_WORD(bit_num
)] &= ~(val
<< (MVPP2_BIT_IN_WORD(bit_num
)));
223 /* Update ri bits in sram sw entry */
224 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry
*pe
,
225 unsigned int bits
, unsigned int mask
)
229 for (i
= 0; i
< MVPP2_PRS_SRAM_RI_CTRL_BITS
; i
++) {
230 if (!(mask
& BIT(i
)))
234 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_OFFS
+ i
,
237 mvpp2_prs_sram_bits_clear(pe
,
238 MVPP2_PRS_SRAM_RI_OFFS
+ i
,
241 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_RI_CTRL_OFFS
+ i
, 1);
245 /* Obtain ri bits from sram sw entry */
246 static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry
*pe
)
248 return pe
->sram
[MVPP2_PRS_SRAM_RI_WORD
];
251 /* Update ai bits in sram sw entry */
252 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry
*pe
,
253 unsigned int bits
, unsigned int mask
)
257 for (i
= 0; i
< MVPP2_PRS_SRAM_AI_CTRL_BITS
; i
++) {
258 if (!(mask
& BIT(i
)))
262 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_OFFS
+ i
,
265 mvpp2_prs_sram_bits_clear(pe
,
266 MVPP2_PRS_SRAM_AI_OFFS
+ i
,
269 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_AI_CTRL_OFFS
+ i
, 1);
273 /* Read ai bits from sram sw entry */
274 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry
*pe
)
277 /* ai is stored on bits 90->97; so it spreads across two u32 */
278 int ai_off
= MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_AI_OFFS
);
279 int ai_shift
= MVPP2_BIT_IN_WORD(MVPP2_PRS_SRAM_AI_OFFS
);
281 bits
= (pe
->sram
[ai_off
] >> ai_shift
) |
282 (pe
->sram
[ai_off
+ 1] << (32 - ai_shift
));
287 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
290 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry
*pe
,
293 int sram_next_off
= MVPP2_PRS_SRAM_NEXT_LU_OFFS
;
295 mvpp2_prs_sram_bits_clear(pe
, sram_next_off
,
296 MVPP2_PRS_SRAM_NEXT_LU_MASK
);
297 mvpp2_prs_sram_bits_set(pe
, sram_next_off
, lu
);
300 /* In the sram sw entry set sign and value of the next lookup offset
301 * and the offset value generated to the classifier
303 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry
*pe
, int shift
,
308 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
311 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT
, 1);
315 pe
->sram
[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS
)] |=
316 shift
& MVPP2_PRS_SRAM_SHIFT_MASK
;
318 /* Reset and set operation */
319 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
,
320 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK
);
321 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS
, op
);
323 /* Set base offset as current */
324 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
327 /* In the sram sw entry set sign and value of the user defined offset
328 * generated to the classifier
330 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry
*pe
,
331 unsigned int type
, int offset
,
336 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
339 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_SIGN_BIT
, 1);
343 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
344 MVPP2_PRS_SRAM_UDF_MASK
);
345 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_OFFS
,
346 offset
& MVPP2_PRS_SRAM_UDF_MASK
);
348 /* Set offset type */
349 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
,
350 MVPP2_PRS_SRAM_UDF_TYPE_MASK
);
351 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_UDF_TYPE_OFFS
, type
);
353 /* Set offset operation */
354 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
355 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
356 mvpp2_prs_sram_bits_set(pe
, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS
,
357 op
& MVPP2_PRS_SRAM_OP_SEL_UDF_MASK
);
359 /* Set base offset as current */
360 mvpp2_prs_sram_bits_clear(pe
, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS
, 1);
363 /* Find parser flow entry */
364 static int mvpp2_prs_flow_find(struct mvpp2
*priv
, int flow
)
366 struct mvpp2_prs_entry pe
;
369 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
370 for (tid
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1; tid
>= 0; tid
--) {
373 if (!priv
->prs_shadow
[tid
].valid
||
374 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_FLOWS
)
377 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
378 bits
= mvpp2_prs_sram_ai_get(&pe
);
380 /* Sram store classification lookup ID in AI bits [5:0] */
381 if ((bits
& MVPP2_PRS_FLOW_ID_MASK
) == flow
)
388 /* Return first free tcam index, seeking from start to end */
389 static int mvpp2_prs_tcam_first_free(struct mvpp2
*priv
, unsigned char start
,
397 if (end
>= MVPP2_PRS_TCAM_SRAM_SIZE
)
398 end
= MVPP2_PRS_TCAM_SRAM_SIZE
- 1;
400 for (tid
= start
; tid
<= end
; tid
++) {
401 if (!priv
->prs_shadow
[tid
].valid
)
408 /* Drop flow control pause frames */
409 static void mvpp2_prs_drop_fc(struct mvpp2
*priv
)
411 unsigned char da
[ETH_ALEN
] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
412 struct mvpp2_prs_entry pe
;
415 memset(&pe
, 0, sizeof(pe
));
417 /* For all ports - drop flow control frames */
418 pe
.index
= MVPP2_PE_FC_DROP
;
419 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
421 /* Set match on DA */
424 mvpp2_prs_tcam_data_byte_set(&pe
, len
, da
[len
], 0xff);
426 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
427 MVPP2_PRS_RI_DROP_MASK
);
429 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
430 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
433 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
435 /* Update shadow table and hw entry */
436 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
437 mvpp2_prs_hw_write(priv
, &pe
);
440 /* Enable/disable dropping all mac da's */
441 static void mvpp2_prs_mac_drop_all_set(struct mvpp2
*priv
, int port
, bool add
)
443 struct mvpp2_prs_entry pe
;
445 if (priv
->prs_shadow
[MVPP2_PE_DROP_ALL
].valid
) {
446 /* Entry exist - update port only */
447 mvpp2_prs_init_from_hw(priv
, &pe
, MVPP2_PE_DROP_ALL
);
449 /* Entry doesn't exist - create new */
450 memset(&pe
, 0, sizeof(pe
));
451 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
452 pe
.index
= MVPP2_PE_DROP_ALL
;
454 /* Non-promiscuous mode for all ports - DROP unknown packets */
455 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
456 MVPP2_PRS_RI_DROP_MASK
);
458 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
459 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
461 /* Update shadow table */
462 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
465 mvpp2_prs_tcam_port_map_set(&pe
, 0);
468 /* Update port mask */
469 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
471 mvpp2_prs_hw_write(priv
, &pe
);
474 /* Set port to unicast or multicast promiscuous mode */
475 void mvpp2_prs_mac_promisc_set(struct mvpp2
*priv
, int port
,
476 enum mvpp2_prs_l2_cast l2_cast
, bool add
)
478 struct mvpp2_prs_entry pe
;
479 unsigned char cast_match
;
483 if (l2_cast
== MVPP2_PRS_L2_UNI_CAST
) {
484 cast_match
= MVPP2_PRS_UCAST_VAL
;
485 tid
= MVPP2_PE_MAC_UC_PROMISCUOUS
;
486 ri
= MVPP2_PRS_RI_L2_UCAST
;
488 cast_match
= MVPP2_PRS_MCAST_VAL
;
489 tid
= MVPP2_PE_MAC_MC_PROMISCUOUS
;
490 ri
= MVPP2_PRS_RI_L2_MCAST
;
493 /* promiscuous mode - Accept unknown unicast or multicast packets */
494 if (priv
->prs_shadow
[tid
].valid
) {
495 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
497 memset(&pe
, 0, sizeof(pe
));
498 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
501 /* Continue - set next lookup */
502 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
504 /* Set result info bits */
505 mvpp2_prs_sram_ri_update(&pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
);
507 /* Match UC or MC addresses */
508 mvpp2_prs_tcam_data_byte_set(&pe
, 0, cast_match
,
509 MVPP2_PRS_CAST_MASK
);
511 /* Shift to ethertype */
512 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
513 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
516 mvpp2_prs_tcam_port_map_set(&pe
, 0);
518 /* Update shadow table */
519 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
522 /* Update port mask */
523 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
525 mvpp2_prs_hw_write(priv
, &pe
);
528 /* Set entry for dsa packets */
529 static void mvpp2_prs_dsa_tag_set(struct mvpp2
*priv
, int port
, bool add
,
530 bool tagged
, bool extend
)
532 struct mvpp2_prs_entry pe
;
536 tid
= tagged
? MVPP2_PE_EDSA_TAGGED
: MVPP2_PE_EDSA_UNTAGGED
;
539 tid
= tagged
? MVPP2_PE_DSA_TAGGED
: MVPP2_PE_DSA_UNTAGGED
;
543 if (priv
->prs_shadow
[tid
].valid
) {
544 /* Entry exist - update port only */
545 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
547 /* Entry doesn't exist - create new */
548 memset(&pe
, 0, sizeof(pe
));
549 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
552 /* Update shadow table */
553 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
556 /* Set tagged bit in DSA tag */
557 mvpp2_prs_tcam_data_byte_set(&pe
, 0,
558 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
559 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
561 /* Set ai bits for next iteration */
563 mvpp2_prs_sram_ai_update(&pe
, 1,
564 MVPP2_PRS_SRAM_AI_MASK
);
566 mvpp2_prs_sram_ai_update(&pe
, 0,
567 MVPP2_PRS_SRAM_AI_MASK
);
569 /* Set result info bits to 'single vlan' */
570 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
571 MVPP2_PRS_RI_VLAN_MASK
);
572 /* If packet is tagged continue check vid filtering */
573 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VID
);
575 /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
576 mvpp2_prs_sram_shift_set(&pe
, shift
,
577 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
579 /* Set result info bits to 'no vlans' */
580 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
581 MVPP2_PRS_RI_VLAN_MASK
);
582 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
586 mvpp2_prs_tcam_port_map_set(&pe
, 0);
589 /* Update port mask */
590 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
592 mvpp2_prs_hw_write(priv
, &pe
);
595 /* Set entry for dsa ethertype */
596 static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2
*priv
, int port
,
597 bool add
, bool tagged
, bool extend
)
599 struct mvpp2_prs_entry pe
;
600 int tid
, shift
, port_mask
;
603 tid
= tagged
? MVPP2_PE_ETYPE_EDSA_TAGGED
:
604 MVPP2_PE_ETYPE_EDSA_UNTAGGED
;
608 tid
= tagged
? MVPP2_PE_ETYPE_DSA_TAGGED
:
609 MVPP2_PE_ETYPE_DSA_UNTAGGED
;
610 port_mask
= MVPP2_PRS_PORT_MASK
;
614 if (priv
->prs_shadow
[tid
].valid
) {
615 /* Entry exist - update port only */
616 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
618 /* Entry doesn't exist - create new */
619 memset(&pe
, 0, sizeof(pe
));
620 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
624 mvpp2_prs_match_etype(&pe
, 0, ETH_P_EDSA
);
625 mvpp2_prs_match_etype(&pe
, 2, 0);
627 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DSA_MASK
,
628 MVPP2_PRS_RI_DSA_MASK
);
629 /* Shift ethertype + 2 byte reserved + tag*/
630 mvpp2_prs_sram_shift_set(&pe
, 2 + MVPP2_ETH_TYPE_LEN
+ shift
,
631 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
633 /* Update shadow table */
634 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_DSA
);
637 /* Set tagged bit in DSA tag */
638 mvpp2_prs_tcam_data_byte_set(&pe
,
639 MVPP2_ETH_TYPE_LEN
+ 2 + 3,
640 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
,
641 MVPP2_PRS_TCAM_DSA_TAGGED_BIT
);
642 /* Clear all ai bits for next iteration */
643 mvpp2_prs_sram_ai_update(&pe
, 0,
644 MVPP2_PRS_SRAM_AI_MASK
);
645 /* If packet is tagged continue check vlans */
646 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
648 /* Set result info bits to 'no vlans' */
649 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
650 MVPP2_PRS_RI_VLAN_MASK
);
651 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
653 /* Mask/unmask all ports, depending on dsa type */
654 mvpp2_prs_tcam_port_map_set(&pe
, port_mask
);
657 /* Update port mask */
658 mvpp2_prs_tcam_port_set(&pe
, port
, add
);
660 mvpp2_prs_hw_write(priv
, &pe
);
663 /* Search for existing single/triple vlan entry */
664 static int mvpp2_prs_vlan_find(struct mvpp2
*priv
, unsigned short tpid
, int ai
)
666 struct mvpp2_prs_entry pe
;
669 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
670 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
671 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
672 unsigned int ri_bits
, ai_bits
;
675 if (!priv
->prs_shadow
[tid
].valid
||
676 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
679 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
680 match
= mvpp2_prs_tcam_data_cmp(&pe
, 0, tpid
);
685 ri_bits
= mvpp2_prs_sram_ri_get(&pe
);
686 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
688 /* Get current ai value from tcam */
689 ai_bits
= mvpp2_prs_tcam_ai_get(&pe
);
690 /* Clear double vlan bit */
691 ai_bits
&= ~MVPP2_PRS_DBL_VLAN_AI_BIT
;
696 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
697 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
704 /* Add/update single/triple vlan entry */
705 static int mvpp2_prs_vlan_add(struct mvpp2
*priv
, unsigned short tpid
, int ai
,
706 unsigned int port_map
)
708 struct mvpp2_prs_entry pe
;
712 memset(&pe
, 0, sizeof(pe
));
714 tid
= mvpp2_prs_vlan_find(priv
, tpid
, ai
);
717 /* Create new tcam entry */
718 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_LAST_FREE_TID
,
719 MVPP2_PE_FIRST_FREE_TID
);
723 /* Get last double vlan tid */
724 for (tid_aux
= MVPP2_PE_LAST_FREE_TID
;
725 tid_aux
>= MVPP2_PE_FIRST_FREE_TID
; tid_aux
--) {
726 unsigned int ri_bits
;
728 if (!priv
->prs_shadow
[tid_aux
].valid
||
729 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
732 mvpp2_prs_init_from_hw(priv
, &pe
, tid_aux
);
733 ri_bits
= mvpp2_prs_sram_ri_get(&pe
);
734 if ((ri_bits
& MVPP2_PRS_RI_VLAN_MASK
) ==
735 MVPP2_PRS_RI_VLAN_DOUBLE
)
742 memset(&pe
, 0, sizeof(pe
));
744 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
746 mvpp2_prs_match_etype(&pe
, 0, tpid
);
748 /* VLAN tag detected, proceed with VID filtering */
749 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VID
);
751 /* Clear all ai bits for next iteration */
752 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
754 if (ai
== MVPP2_PRS_SINGLE_VLAN_AI
) {
755 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_SINGLE
,
756 MVPP2_PRS_RI_VLAN_MASK
);
758 ai
|= MVPP2_PRS_DBL_VLAN_AI_BIT
;
759 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_TRIPLE
,
760 MVPP2_PRS_RI_VLAN_MASK
);
762 mvpp2_prs_tcam_ai_update(&pe
, ai
, MVPP2_PRS_SRAM_AI_MASK
);
764 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
766 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
768 /* Update ports' mask */
769 mvpp2_prs_tcam_port_map_set(&pe
, port_map
);
771 mvpp2_prs_hw_write(priv
, &pe
);
776 /* Get first free double vlan ai number */
777 static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2
*priv
)
781 for (i
= 1; i
< MVPP2_PRS_DBL_VLANS_MAX
; i
++) {
782 if (!priv
->prs_double_vlans
[i
])
789 /* Search for existing double vlan entry */
790 static int mvpp2_prs_double_vlan_find(struct mvpp2
*priv
, unsigned short tpid1
,
791 unsigned short tpid2
)
793 struct mvpp2_prs_entry pe
;
796 /* Go through the all entries with MVPP2_PRS_LU_VLAN */
797 for (tid
= MVPP2_PE_FIRST_FREE_TID
;
798 tid
<= MVPP2_PE_LAST_FREE_TID
; tid
++) {
799 unsigned int ri_mask
;
802 if (!priv
->prs_shadow
[tid
].valid
||
803 priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VLAN
)
806 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
808 match
= mvpp2_prs_tcam_data_cmp(&pe
, 0, tpid1
) &&
809 mvpp2_prs_tcam_data_cmp(&pe
, 4, tpid2
);
814 ri_mask
= mvpp2_prs_sram_ri_get(&pe
) & MVPP2_PRS_RI_VLAN_MASK
;
815 if (ri_mask
== MVPP2_PRS_RI_VLAN_DOUBLE
)
822 /* Add or update double vlan entry */
823 static int mvpp2_prs_double_vlan_add(struct mvpp2
*priv
, unsigned short tpid1
,
824 unsigned short tpid2
,
825 unsigned int port_map
)
827 int tid_aux
, tid
, ai
, ret
= 0;
828 struct mvpp2_prs_entry pe
;
830 memset(&pe
, 0, sizeof(pe
));
832 tid
= mvpp2_prs_double_vlan_find(priv
, tpid1
, tpid2
);
835 /* Create new tcam entry */
836 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
837 MVPP2_PE_LAST_FREE_TID
);
841 /* Set ai value for new double vlan entry */
842 ai
= mvpp2_prs_double_vlan_ai_free_get(priv
);
846 /* Get first single/triple vlan tid */
847 for (tid_aux
= MVPP2_PE_FIRST_FREE_TID
;
848 tid_aux
<= MVPP2_PE_LAST_FREE_TID
; tid_aux
++) {
849 unsigned int ri_bits
;
851 if (!priv
->prs_shadow
[tid_aux
].valid
||
852 priv
->prs_shadow
[tid_aux
].lu
!= MVPP2_PRS_LU_VLAN
)
855 mvpp2_prs_init_from_hw(priv
, &pe
, tid_aux
);
856 ri_bits
= mvpp2_prs_sram_ri_get(&pe
);
857 ri_bits
&= MVPP2_PRS_RI_VLAN_MASK
;
858 if (ri_bits
== MVPP2_PRS_RI_VLAN_SINGLE
||
859 ri_bits
== MVPP2_PRS_RI_VLAN_TRIPLE
)
866 memset(&pe
, 0, sizeof(pe
));
867 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
870 priv
->prs_double_vlans
[ai
] = true;
872 mvpp2_prs_match_etype(&pe
, 0, tpid1
);
873 mvpp2_prs_match_etype(&pe
, 4, tpid2
);
875 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
876 /* Shift 4 bytes - skip outer vlan tag */
877 mvpp2_prs_sram_shift_set(&pe
, MVPP2_VLAN_TAG_LEN
,
878 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
879 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
880 MVPP2_PRS_RI_VLAN_MASK
);
881 mvpp2_prs_sram_ai_update(&pe
, ai
| MVPP2_PRS_DBL_VLAN_AI_BIT
,
882 MVPP2_PRS_SRAM_AI_MASK
);
884 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
886 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
889 /* Update ports' mask */
890 mvpp2_prs_tcam_port_map_set(&pe
, port_map
);
891 mvpp2_prs_hw_write(priv
, &pe
);
896 /* IPv4 header parsing for fragmentation and L4 offset */
897 static int mvpp2_prs_ip4_proto(struct mvpp2
*priv
, unsigned short proto
,
898 unsigned int ri
, unsigned int ri_mask
)
900 struct mvpp2_prs_entry pe
;
903 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
904 (proto
!= IPPROTO_IGMP
))
907 /* Not fragmented packet */
908 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
909 MVPP2_PE_LAST_FREE_TID
);
913 memset(&pe
, 0, sizeof(pe
));
914 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
917 /* Set next lu to IPv4 */
918 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
919 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
921 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
922 sizeof(struct iphdr
) - 4,
923 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
924 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
925 MVPP2_PRS_IPV4_DIP_AI_BIT
);
926 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
928 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00,
929 MVPP2_PRS_TCAM_PROTO_MASK_L
);
930 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00,
931 MVPP2_PRS_TCAM_PROTO_MASK
);
933 mvpp2_prs_tcam_data_byte_set(&pe
, 5, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
934 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
935 /* Unmask all ports */
936 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
938 /* Update shadow table and hw entry */
939 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
940 mvpp2_prs_hw_write(priv
, &pe
);
942 /* Fragmented packet */
943 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
944 MVPP2_PE_LAST_FREE_TID
);
949 /* Clear ri before updating */
950 pe
.sram
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
951 pe
.sram
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
952 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
954 mvpp2_prs_sram_ri_update(&pe
, ri
| MVPP2_PRS_RI_IP_FRAG_TRUE
,
955 ri_mask
| MVPP2_PRS_RI_IP_FRAG_MASK
);
957 mvpp2_prs_tcam_data_byte_set(&pe
, 2, 0x00, 0x0);
958 mvpp2_prs_tcam_data_byte_set(&pe
, 3, 0x00, 0x0);
960 /* Update shadow table and hw entry */
961 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
962 mvpp2_prs_hw_write(priv
, &pe
);
967 /* IPv4 L3 multicast or broadcast */
968 static int mvpp2_prs_ip4_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
970 struct mvpp2_prs_entry pe
;
973 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
974 MVPP2_PE_LAST_FREE_TID
);
978 memset(&pe
, 0, sizeof(pe
));
979 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
983 case MVPP2_PRS_L3_MULTI_CAST
:
984 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV4_MC
,
985 MVPP2_PRS_IPV4_MC_MASK
);
986 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
987 MVPP2_PRS_RI_L3_ADDR_MASK
);
989 case MVPP2_PRS_L3_BROAD_CAST
:
990 mask
= MVPP2_PRS_IPV4_BC_MASK
;
991 mvpp2_prs_tcam_data_byte_set(&pe
, 0, mask
, mask
);
992 mvpp2_prs_tcam_data_byte_set(&pe
, 1, mask
, mask
);
993 mvpp2_prs_tcam_data_byte_set(&pe
, 2, mask
, mask
);
994 mvpp2_prs_tcam_data_byte_set(&pe
, 3, mask
, mask
);
995 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_BCAST
,
996 MVPP2_PRS_RI_L3_ADDR_MASK
);
1002 /* Finished: go to flowid generation */
1003 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1004 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1006 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
1007 MVPP2_PRS_IPV4_DIP_AI_BIT
);
1008 /* Unmask all ports */
1009 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1011 /* Update shadow table and hw entry */
1012 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1013 mvpp2_prs_hw_write(priv
, &pe
);
1018 /* Set entries for protocols over IPv6 */
1019 static int mvpp2_prs_ip6_proto(struct mvpp2
*priv
, unsigned short proto
,
1020 unsigned int ri
, unsigned int ri_mask
)
1022 struct mvpp2_prs_entry pe
;
1025 if ((proto
!= IPPROTO_TCP
) && (proto
!= IPPROTO_UDP
) &&
1026 (proto
!= IPPROTO_ICMPV6
) && (proto
!= IPPROTO_IPIP
))
1029 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1030 MVPP2_PE_LAST_FREE_TID
);
1034 memset(&pe
, 0, sizeof(pe
));
1035 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1038 /* Finished: go to flowid generation */
1039 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1040 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1041 mvpp2_prs_sram_ri_update(&pe
, ri
, ri_mask
);
1042 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
1043 sizeof(struct ipv6hdr
) - 6,
1044 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1046 mvpp2_prs_tcam_data_byte_set(&pe
, 0, proto
, MVPP2_PRS_TCAM_PROTO_MASK
);
1047 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
1048 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1049 /* Unmask all ports */
1050 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1053 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
1054 mvpp2_prs_hw_write(priv
, &pe
);
1059 /* IPv6 L3 multicast entry */
1060 static int mvpp2_prs_ip6_cast(struct mvpp2
*priv
, unsigned short l3_cast
)
1062 struct mvpp2_prs_entry pe
;
1065 if (l3_cast
!= MVPP2_PRS_L3_MULTI_CAST
)
1068 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1069 MVPP2_PE_LAST_FREE_TID
);
1073 memset(&pe
, 0, sizeof(pe
));
1074 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1077 /* Finished: go to flowid generation */
1078 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1079 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_MCAST
,
1080 MVPP2_PRS_RI_L3_ADDR_MASK
);
1081 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
1082 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1083 /* Shift back to IPv6 NH */
1084 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1086 mvpp2_prs_tcam_data_byte_set(&pe
, 0, MVPP2_PRS_IPV6_MC
,
1087 MVPP2_PRS_IPV6_MC_MASK
);
1088 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1089 /* Unmask all ports */
1090 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1092 /* Update shadow table and hw entry */
1093 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
1094 mvpp2_prs_hw_write(priv
, &pe
);
1099 /* Parser per-port initialization */
1100 static void mvpp2_prs_hw_port_init(struct mvpp2
*priv
, int port
, int lu_first
,
1101 int lu_max
, int offset
)
1106 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_LOOKUP_REG
);
1107 val
&= ~MVPP2_PRS_PORT_LU_MASK(port
);
1108 val
|= MVPP2_PRS_PORT_LU_VAL(port
, lu_first
);
1109 mvpp2_write(priv
, MVPP2_PRS_INIT_LOOKUP_REG
, val
);
1111 /* Set maximum number of loops for packet received from port */
1112 val
= mvpp2_read(priv
, MVPP2_PRS_MAX_LOOP_REG(port
));
1113 val
&= ~MVPP2_PRS_MAX_LOOP_MASK(port
);
1114 val
|= MVPP2_PRS_MAX_LOOP_VAL(port
, lu_max
);
1115 mvpp2_write(priv
, MVPP2_PRS_MAX_LOOP_REG(port
), val
);
1117 /* Set initial offset for packet header extraction for the first
1120 val
= mvpp2_read(priv
, MVPP2_PRS_INIT_OFFS_REG(port
));
1121 val
&= ~MVPP2_PRS_INIT_OFF_MASK(port
);
1122 val
|= MVPP2_PRS_INIT_OFF_VAL(port
, offset
);
1123 mvpp2_write(priv
, MVPP2_PRS_INIT_OFFS_REG(port
), val
);
1126 /* Default flow entries initialization for all ports */
1127 static void mvpp2_prs_def_flow_init(struct mvpp2
*priv
)
1129 struct mvpp2_prs_entry pe
;
1132 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
1133 memset(&pe
, 0, sizeof(pe
));
1134 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1135 pe
.index
= MVPP2_PE_FIRST_DEFAULT_FLOW
- port
;
1137 /* Mask all ports */
1138 mvpp2_prs_tcam_port_map_set(&pe
, 0);
1141 mvpp2_prs_sram_ai_update(&pe
, port
, MVPP2_PRS_FLOW_ID_MASK
);
1142 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
1144 /* Update shadow table and hw entry */
1145 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
1146 mvpp2_prs_hw_write(priv
, &pe
);
1150 /* Set default entry for Marvell Header field */
1151 static void mvpp2_prs_mh_init(struct mvpp2
*priv
)
1153 struct mvpp2_prs_entry pe
;
1155 memset(&pe
, 0, sizeof(pe
));
1157 pe
.index
= MVPP2_PE_MH_DEFAULT
;
1158 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MH
);
1159 mvpp2_prs_sram_shift_set(&pe
, MVPP2_MH_SIZE
,
1160 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1161 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1163 /* Unmask all ports */
1164 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1166 /* Update shadow table and hw entry */
1167 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MH
);
1168 mvpp2_prs_hw_write(priv
, &pe
);
1171 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1172 * multicast MAC addresses
1174 static void mvpp2_prs_mac_init(struct mvpp2
*priv
)
1176 struct mvpp2_prs_entry pe
;
1178 memset(&pe
, 0, sizeof(pe
));
1180 /* Non-promiscuous mode for all ports - DROP unknown packets */
1181 pe
.index
= MVPP2_PE_MAC_NON_PROMISCUOUS
;
1182 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
1184 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
1185 MVPP2_PRS_RI_DROP_MASK
);
1186 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1187 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1189 /* Unmask all ports */
1190 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1192 /* Update shadow table and hw entry */
1193 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1194 mvpp2_prs_hw_write(priv
, &pe
);
1196 /* Create dummy entries for drop all and promiscuous modes */
1197 mvpp2_prs_drop_fc(priv
);
1198 mvpp2_prs_mac_drop_all_set(priv
, 0, false);
1199 mvpp2_prs_mac_promisc_set(priv
, 0, MVPP2_PRS_L2_UNI_CAST
, false);
1200 mvpp2_prs_mac_promisc_set(priv
, 0, MVPP2_PRS_L2_MULTI_CAST
, false);
1203 /* Set default entries for various types of dsa packets */
1204 static void mvpp2_prs_dsa_init(struct mvpp2
*priv
)
1206 struct mvpp2_prs_entry pe
;
1208 /* None tagged EDSA entry - place holder */
1209 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
1212 /* Tagged EDSA entry - place holder */
1213 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
1215 /* None tagged DSA entry - place holder */
1216 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_UNTAGGED
,
1219 /* Tagged DSA entry - place holder */
1220 mvpp2_prs_dsa_tag_set(priv
, 0, false, MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
1222 /* None tagged EDSA ethertype entry - place holder*/
1223 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
1224 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
1226 /* Tagged EDSA ethertype entry - place holder*/
1227 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, false,
1228 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
1230 /* None tagged DSA ethertype entry */
1231 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
1232 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
1234 /* Tagged DSA ethertype entry */
1235 mvpp2_prs_dsa_tag_ethertype_set(priv
, 0, true,
1236 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
1238 /* Set default entry, in case DSA or EDSA tag not found */
1239 memset(&pe
, 0, sizeof(pe
));
1240 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
1241 pe
.index
= MVPP2_PE_DSA_DEFAULT
;
1242 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1245 mvpp2_prs_sram_shift_set(&pe
, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1246 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
1248 /* Clear all sram ai bits for next iteration */
1249 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1251 /* Unmask all ports */
1252 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1254 mvpp2_prs_hw_write(priv
, &pe
);
1257 /* Initialize parser entries for VID filtering */
1258 static void mvpp2_prs_vid_init(struct mvpp2
*priv
)
1260 struct mvpp2_prs_entry pe
;
1262 memset(&pe
, 0, sizeof(pe
));
1264 /* Set default vid entry */
1265 pe
.index
= MVPP2_PE_VID_FLTR_DEFAULT
;
1266 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VID
);
1268 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_EDSA_VID_AI_BIT
);
1270 /* Skip VLAN header - Set offset to 4 bytes */
1271 mvpp2_prs_sram_shift_set(&pe
, MVPP2_VLAN_TAG_LEN
,
1272 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1274 /* Clear all ai bits for next iteration */
1275 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1277 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1279 /* Unmask all ports */
1280 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1282 /* Update shadow table and hw entry */
1283 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VID
);
1284 mvpp2_prs_hw_write(priv
, &pe
);
1286 /* Set default vid entry for extended DSA*/
1287 memset(&pe
, 0, sizeof(pe
));
1289 /* Set default vid entry */
1290 pe
.index
= MVPP2_PE_VID_EDSA_FLTR_DEFAULT
;
1291 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VID
);
1293 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_EDSA_VID_AI_BIT
,
1294 MVPP2_PRS_EDSA_VID_AI_BIT
);
1296 /* Skip VLAN header - Set offset to 8 bytes */
1297 mvpp2_prs_sram_shift_set(&pe
, MVPP2_VLAN_TAG_EDSA_LEN
,
1298 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1300 /* Clear all ai bits for next iteration */
1301 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1303 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1305 /* Unmask all ports */
1306 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1308 /* Update shadow table and hw entry */
1309 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VID
);
1310 mvpp2_prs_hw_write(priv
, &pe
);
1313 /* Match basic ethertypes */
1314 static int mvpp2_prs_etype_init(struct mvpp2
*priv
)
1316 struct mvpp2_prs_entry pe
;
1319 /* Ethertype: PPPoE */
1320 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1321 MVPP2_PE_LAST_FREE_TID
);
1325 memset(&pe
, 0, sizeof(pe
));
1326 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1329 mvpp2_prs_match_etype(&pe
, 0, ETH_P_PPP_SES
);
1331 mvpp2_prs_sram_shift_set(&pe
, MVPP2_PPPOE_HDR_SIZE
,
1332 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1333 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
1334 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_PPPOE_MASK
,
1335 MVPP2_PRS_RI_PPPOE_MASK
);
1337 /* Update shadow table and hw entry */
1338 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1339 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1340 priv
->prs_shadow
[pe
.index
].finish
= false;
1341 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_PPPOE_MASK
,
1342 MVPP2_PRS_RI_PPPOE_MASK
);
1343 mvpp2_prs_hw_write(priv
, &pe
);
1345 /* Ethertype: ARP */
1346 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1347 MVPP2_PE_LAST_FREE_TID
);
1351 memset(&pe
, 0, sizeof(pe
));
1352 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1355 mvpp2_prs_match_etype(&pe
, 0, ETH_P_ARP
);
1357 /* Generate flow in the next iteration*/
1358 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1359 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1360 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_ARP
,
1361 MVPP2_PRS_RI_L3_PROTO_MASK
);
1363 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1365 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1367 /* Update shadow table and hw entry */
1368 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1369 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1370 priv
->prs_shadow
[pe
.index
].finish
= true;
1371 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_ARP
,
1372 MVPP2_PRS_RI_L3_PROTO_MASK
);
1373 mvpp2_prs_hw_write(priv
, &pe
);
1375 /* Ethertype: LBTD */
1376 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1377 MVPP2_PE_LAST_FREE_TID
);
1381 memset(&pe
, 0, sizeof(pe
));
1382 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1385 mvpp2_prs_match_etype(&pe
, 0, MVPP2_IP_LBDT_TYPE
);
1387 /* Generate flow in the next iteration*/
1388 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1389 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1390 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
1391 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
1392 MVPP2_PRS_RI_CPU_CODE_MASK
|
1393 MVPP2_PRS_RI_UDF3_MASK
);
1395 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1397 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1399 /* Update shadow table and hw entry */
1400 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1401 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1402 priv
->prs_shadow
[pe
.index
].finish
= true;
1403 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
1404 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
1405 MVPP2_PRS_RI_CPU_CODE_MASK
|
1406 MVPP2_PRS_RI_UDF3_MASK
);
1407 mvpp2_prs_hw_write(priv
, &pe
);
1409 /* Ethertype: IPv4 without options */
1410 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1411 MVPP2_PE_LAST_FREE_TID
);
1415 memset(&pe
, 0, sizeof(pe
));
1416 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1419 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IP
);
1420 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
1421 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
1422 MVPP2_PRS_IPV4_HEAD_MASK
|
1423 MVPP2_PRS_IPV4_IHL_MASK
);
1425 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1426 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
1427 MVPP2_PRS_RI_L3_PROTO_MASK
);
1428 /* Skip eth_type + 4 bytes of IP header */
1429 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
1430 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1432 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1434 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1436 /* Update shadow table and hw entry */
1437 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1438 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1439 priv
->prs_shadow
[pe
.index
].finish
= false;
1440 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4
,
1441 MVPP2_PRS_RI_L3_PROTO_MASK
);
1442 mvpp2_prs_hw_write(priv
, &pe
);
1444 /* Ethertype: IPv4 with options */
1445 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1446 MVPP2_PE_LAST_FREE_TID
);
1452 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
1453 MVPP2_PRS_IPV4_HEAD
,
1454 MVPP2_PRS_IPV4_HEAD_MASK
);
1456 /* Clear ri before updating */
1457 pe
.sram
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
1458 pe
.sram
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
1459 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
1460 MVPP2_PRS_RI_L3_PROTO_MASK
);
1462 /* Update shadow table and hw entry */
1463 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1464 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1465 priv
->prs_shadow
[pe
.index
].finish
= false;
1466 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP4_OPT
,
1467 MVPP2_PRS_RI_L3_PROTO_MASK
);
1468 mvpp2_prs_hw_write(priv
, &pe
);
1470 /* Ethertype: IPv6 without options */
1471 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1472 MVPP2_PE_LAST_FREE_TID
);
1476 memset(&pe
, 0, sizeof(pe
));
1477 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1480 mvpp2_prs_match_etype(&pe
, 0, ETH_P_IPV6
);
1482 /* Skip DIP of IPV6 header */
1483 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
1484 MVPP2_MAX_L3_ADDR_SIZE
,
1485 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1486 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1487 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
1488 MVPP2_PRS_RI_L3_PROTO_MASK
);
1490 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1492 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1494 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1495 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1496 priv
->prs_shadow
[pe
.index
].finish
= false;
1497 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_IP6
,
1498 MVPP2_PRS_RI_L3_PROTO_MASK
);
1499 mvpp2_prs_hw_write(priv
, &pe
);
1501 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1502 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1503 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1504 pe
.index
= MVPP2_PE_ETH_TYPE_UN
;
1506 /* Unmask all ports */
1507 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1509 /* Generate flow in the next iteration*/
1510 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1511 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1512 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
1513 MVPP2_PRS_RI_L3_PROTO_MASK
);
1514 /* Set L3 offset even it's unknown L3 */
1515 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1517 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1519 /* Update shadow table and hw entry */
1520 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_L2
);
1521 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_L2_DEF
;
1522 priv
->prs_shadow
[pe
.index
].finish
= true;
1523 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, MVPP2_PRS_RI_L3_UN
,
1524 MVPP2_PRS_RI_L3_PROTO_MASK
);
1525 mvpp2_prs_hw_write(priv
, &pe
);
1530 /* Configure vlan entries and detect up to 2 successive VLAN tags.
1537 static int mvpp2_prs_vlan_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
1539 struct mvpp2_prs_entry pe
;
1542 priv
->prs_double_vlans
= devm_kcalloc(&pdev
->dev
, sizeof(bool),
1543 MVPP2_PRS_DBL_VLANS_MAX
,
1545 if (!priv
->prs_double_vlans
)
1548 /* Double VLAN: 0x8100, 0x88A8 */
1549 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021AD
,
1550 MVPP2_PRS_PORT_MASK
);
1554 /* Double VLAN: 0x8100, 0x8100 */
1555 err
= mvpp2_prs_double_vlan_add(priv
, ETH_P_8021Q
, ETH_P_8021Q
,
1556 MVPP2_PRS_PORT_MASK
);
1560 /* Single VLAN: 0x88a8 */
1561 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021AD
, MVPP2_PRS_SINGLE_VLAN_AI
,
1562 MVPP2_PRS_PORT_MASK
);
1566 /* Single VLAN: 0x8100 */
1567 err
= mvpp2_prs_vlan_add(priv
, ETH_P_8021Q
, MVPP2_PRS_SINGLE_VLAN_AI
,
1568 MVPP2_PRS_PORT_MASK
);
1572 /* Set default double vlan entry */
1573 memset(&pe
, 0, sizeof(pe
));
1574 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1575 pe
.index
= MVPP2_PE_VLAN_DBL
;
1577 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_VID
);
1579 /* Clear ai for next iterations */
1580 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
1581 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_DOUBLE
,
1582 MVPP2_PRS_RI_VLAN_MASK
);
1584 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_DBL_VLAN_AI_BIT
,
1585 MVPP2_PRS_DBL_VLAN_AI_BIT
);
1586 /* Unmask all ports */
1587 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1589 /* Update shadow table and hw entry */
1590 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
1591 mvpp2_prs_hw_write(priv
, &pe
);
1593 /* Set default vlan none entry */
1594 memset(&pe
, 0, sizeof(pe
));
1595 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VLAN
);
1596 pe
.index
= MVPP2_PE_VLAN_NONE
;
1598 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
1599 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_VLAN_NONE
,
1600 MVPP2_PRS_RI_VLAN_MASK
);
1602 /* Unmask all ports */
1603 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1605 /* Update shadow table and hw entry */
1606 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VLAN
);
1607 mvpp2_prs_hw_write(priv
, &pe
);
1612 /* Set entries for PPPoE ethertype */
1613 static int mvpp2_prs_pppoe_init(struct mvpp2
*priv
)
1615 struct mvpp2_prs_entry pe
;
1618 /* IPv4 over PPPoE with options */
1619 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1620 MVPP2_PE_LAST_FREE_TID
);
1624 memset(&pe
, 0, sizeof(pe
));
1625 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
1628 mvpp2_prs_match_etype(&pe
, 0, PPP_IP
);
1630 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1631 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4_OPT
,
1632 MVPP2_PRS_RI_L3_PROTO_MASK
);
1633 /* Skip eth_type + 4 bytes of IP header */
1634 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 4,
1635 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1637 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1639 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1641 /* Update shadow table and hw entry */
1642 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
1643 mvpp2_prs_hw_write(priv
, &pe
);
1645 /* IPv4 over PPPoE without options */
1646 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1647 MVPP2_PE_LAST_FREE_TID
);
1653 mvpp2_prs_tcam_data_byte_set(&pe
, MVPP2_ETH_TYPE_LEN
,
1654 MVPP2_PRS_IPV4_HEAD
| MVPP2_PRS_IPV4_IHL
,
1655 MVPP2_PRS_IPV4_HEAD_MASK
|
1656 MVPP2_PRS_IPV4_IHL_MASK
);
1658 /* Clear ri before updating */
1659 pe
.sram
[MVPP2_PRS_SRAM_RI_WORD
] = 0x0;
1660 pe
.sram
[MVPP2_PRS_SRAM_RI_CTRL_WORD
] = 0x0;
1661 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP4
,
1662 MVPP2_PRS_RI_L3_PROTO_MASK
);
1664 /* Update shadow table and hw entry */
1665 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
1666 mvpp2_prs_hw_write(priv
, &pe
);
1668 /* IPv6 over PPPoE */
1669 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1670 MVPP2_PE_LAST_FREE_TID
);
1674 memset(&pe
, 0, sizeof(pe
));
1675 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
1678 mvpp2_prs_match_etype(&pe
, 0, PPP_IPV6
);
1680 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1681 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_IP6
,
1682 MVPP2_PRS_RI_L3_PROTO_MASK
);
1683 /* Jump to DIP of IPV6 header */
1684 mvpp2_prs_sram_shift_set(&pe
, MVPP2_ETH_TYPE_LEN
+ 8 +
1685 MVPP2_MAX_L3_ADDR_SIZE
,
1686 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1688 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1690 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1692 /* Update shadow table and hw entry */
1693 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
1694 mvpp2_prs_hw_write(priv
, &pe
);
1696 /* Non-IP over PPPoE */
1697 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1698 MVPP2_PE_LAST_FREE_TID
);
1702 memset(&pe
, 0, sizeof(pe
));
1703 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_PPPOE
);
1706 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
,
1707 MVPP2_PRS_RI_L3_PROTO_MASK
);
1709 /* Finished: go to flowid generation */
1710 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1711 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1712 /* Set L3 offset even if it's unknown L3 */
1713 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L3
,
1715 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1717 /* Update shadow table and hw entry */
1718 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_PPPOE
);
1719 mvpp2_prs_hw_write(priv
, &pe
);
1724 /* Initialize entries for IPv4 */
1725 static int mvpp2_prs_ip4_init(struct mvpp2
*priv
)
1727 struct mvpp2_prs_entry pe
;
1730 /* Set entries for TCP, UDP and IGMP over IPv4 */
1731 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_TCP
, MVPP2_PRS_RI_L4_TCP
,
1732 MVPP2_PRS_RI_L4_PROTO_MASK
);
1736 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_UDP
, MVPP2_PRS_RI_L4_UDP
,
1737 MVPP2_PRS_RI_L4_PROTO_MASK
);
1741 err
= mvpp2_prs_ip4_proto(priv
, IPPROTO_IGMP
,
1742 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
1743 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
1744 MVPP2_PRS_RI_CPU_CODE_MASK
|
1745 MVPP2_PRS_RI_UDF3_MASK
);
1749 /* IPv4 Broadcast */
1750 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_BROAD_CAST
);
1754 /* IPv4 Multicast */
1755 err
= mvpp2_prs_ip4_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
1759 /* Default IPv4 entry for unknown protocols */
1760 memset(&pe
, 0, sizeof(pe
));
1761 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1762 pe
.index
= MVPP2_PE_IP4_PROTO_UN
;
1764 /* Set next lu to IPv4 */
1765 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1766 mvpp2_prs_sram_shift_set(&pe
, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1768 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
1769 sizeof(struct iphdr
) - 4,
1770 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1771 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
1772 MVPP2_PRS_IPV4_DIP_AI_BIT
);
1773 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
1774 MVPP2_PRS_RI_L4_PROTO_MASK
);
1776 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV4_DIP_AI_BIT
);
1777 /* Unmask all ports */
1778 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1780 /* Update shadow table and hw entry */
1781 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1782 mvpp2_prs_hw_write(priv
, &pe
);
1784 /* Default IPv4 entry for unicast address */
1785 memset(&pe
, 0, sizeof(pe
));
1786 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP4
);
1787 pe
.index
= MVPP2_PE_IP4_ADDR_UN
;
1789 /* Finished: go to flowid generation */
1790 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1791 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1792 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
1793 MVPP2_PRS_RI_L3_ADDR_MASK
);
1795 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV4_DIP_AI_BIT
,
1796 MVPP2_PRS_IPV4_DIP_AI_BIT
);
1797 /* Unmask all ports */
1798 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1800 /* Update shadow table and hw entry */
1801 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1802 mvpp2_prs_hw_write(priv
, &pe
);
1807 /* Initialize entries for IPv6 */
1808 static int mvpp2_prs_ip6_init(struct mvpp2
*priv
)
1810 struct mvpp2_prs_entry pe
;
1813 /* Set entries for TCP, UDP and ICMP over IPv6 */
1814 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_TCP
,
1815 MVPP2_PRS_RI_L4_TCP
,
1816 MVPP2_PRS_RI_L4_PROTO_MASK
);
1820 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_UDP
,
1821 MVPP2_PRS_RI_L4_UDP
,
1822 MVPP2_PRS_RI_L4_PROTO_MASK
);
1826 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_ICMPV6
,
1827 MVPP2_PRS_RI_CPU_CODE_RX_SPEC
|
1828 MVPP2_PRS_RI_UDF3_RX_SPECIAL
,
1829 MVPP2_PRS_RI_CPU_CODE_MASK
|
1830 MVPP2_PRS_RI_UDF3_MASK
);
1834 /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
1835 /* Result Info: UDF7=1, DS lite */
1836 err
= mvpp2_prs_ip6_proto(priv
, IPPROTO_IPIP
,
1837 MVPP2_PRS_RI_UDF7_IP6_LITE
,
1838 MVPP2_PRS_RI_UDF7_MASK
);
1842 /* IPv6 multicast */
1843 err
= mvpp2_prs_ip6_cast(priv
, MVPP2_PRS_L3_MULTI_CAST
);
1847 /* Entry for checking hop limit */
1848 tid
= mvpp2_prs_tcam_first_free(priv
, MVPP2_PE_FIRST_FREE_TID
,
1849 MVPP2_PE_LAST_FREE_TID
);
1853 memset(&pe
, 0, sizeof(pe
));
1854 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1857 /* Finished: go to flowid generation */
1858 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1859 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1860 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UN
|
1861 MVPP2_PRS_RI_DROP_MASK
,
1862 MVPP2_PRS_RI_L3_PROTO_MASK
|
1863 MVPP2_PRS_RI_DROP_MASK
);
1865 mvpp2_prs_tcam_data_byte_set(&pe
, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK
);
1866 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
1867 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1869 /* Update shadow table and hw entry */
1870 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1871 mvpp2_prs_hw_write(priv
, &pe
);
1873 /* Default IPv6 entry for unknown protocols */
1874 memset(&pe
, 0, sizeof(pe
));
1875 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1876 pe
.index
= MVPP2_PE_IP6_PROTO_UN
;
1878 /* Finished: go to flowid generation */
1879 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1880 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1881 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
1882 MVPP2_PRS_RI_L4_PROTO_MASK
);
1883 /* Set L4 offset relatively to our current place */
1884 mvpp2_prs_sram_offset_set(&pe
, MVPP2_PRS_SRAM_UDF_TYPE_L4
,
1885 sizeof(struct ipv6hdr
) - 4,
1886 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD
);
1888 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
1889 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1890 /* Unmask all ports */
1891 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1893 /* Update shadow table and hw entry */
1894 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1895 mvpp2_prs_hw_write(priv
, &pe
);
1897 /* Default IPv6 entry for unknown ext protocols */
1898 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1899 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1900 pe
.index
= MVPP2_PE_IP6_EXT_PROTO_UN
;
1902 /* Finished: go to flowid generation */
1903 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
1904 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_GEN_BIT
, 1);
1905 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L4_OTHER
,
1906 MVPP2_PRS_RI_L4_PROTO_MASK
);
1908 mvpp2_prs_tcam_ai_update(&pe
, MVPP2_PRS_IPV6_EXT_AI_BIT
,
1909 MVPP2_PRS_IPV6_EXT_AI_BIT
);
1910 /* Unmask all ports */
1911 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1913 /* Update shadow table and hw entry */
1914 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP4
);
1915 mvpp2_prs_hw_write(priv
, &pe
);
1917 /* Default IPv6 entry for unicast address */
1918 memset(&pe
, 0, sizeof(struct mvpp2_prs_entry
));
1919 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1920 pe
.index
= MVPP2_PE_IP6_ADDR_UN
;
1922 /* Finished: go to IPv6 again */
1923 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_IP6
);
1924 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_L3_UCAST
,
1925 MVPP2_PRS_RI_L3_ADDR_MASK
);
1926 mvpp2_prs_sram_ai_update(&pe
, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
,
1927 MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1928 /* Shift back to IPV6 NH */
1929 mvpp2_prs_sram_shift_set(&pe
, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
1931 mvpp2_prs_tcam_ai_update(&pe
, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT
);
1932 /* Unmask all ports */
1933 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
1935 /* Update shadow table and hw entry */
1936 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_IP6
);
1937 mvpp2_prs_hw_write(priv
, &pe
);
1942 /* Find tcam entry with matched pair <vid,port> */
1943 static int mvpp2_prs_vid_range_find(struct mvpp2_port
*port
, u16 vid
, u16 mask
)
1945 unsigned char byte
[2], enable
[2];
1946 struct mvpp2_prs_entry pe
;
1950 /* Go through the all entries with MVPP2_PRS_LU_VID */
1951 for (tid
= MVPP2_PRS_VID_PORT_FIRST(port
->id
);
1952 tid
<= MVPP2_PRS_VID_PORT_LAST(port
->id
); tid
++) {
1953 if (!port
->priv
->prs_shadow
[tid
].valid
||
1954 port
->priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_VID
)
1957 mvpp2_prs_init_from_hw(port
->priv
, &pe
, tid
);
1959 mvpp2_prs_tcam_data_byte_get(&pe
, 2, &byte
[0], &enable
[0]);
1960 mvpp2_prs_tcam_data_byte_get(&pe
, 3, &byte
[1], &enable
[1]);
1962 rvid
= ((byte
[0] & 0xf) << 8) + byte
[1];
1963 rmask
= ((enable
[0] & 0xf) << 8) + enable
[1];
1965 if (rvid
!= vid
|| rmask
!= mask
)
1974 /* Write parser entry for VID filtering */
1975 int mvpp2_prs_vid_entry_add(struct mvpp2_port
*port
, u16 vid
)
1977 unsigned int vid_start
= MVPP2_PE_VID_FILT_RANGE_START
+
1978 port
->id
* MVPP2_PRS_VLAN_FILT_MAX
;
1979 unsigned int mask
= 0xfff, reg_val
, shift
;
1980 struct mvpp2
*priv
= port
->priv
;
1981 struct mvpp2_prs_entry pe
;
1984 memset(&pe
, 0, sizeof(pe
));
1986 /* Scan TCAM and see if entry with this <vid,port> already exist */
1987 tid
= mvpp2_prs_vid_range_find(port
, vid
, mask
);
1989 reg_val
= mvpp2_read(priv
, MVPP2_MH_REG(port
->id
));
1990 if (reg_val
& MVPP2_DSA_EXTENDED
)
1991 shift
= MVPP2_VLAN_TAG_EDSA_LEN
;
1993 shift
= MVPP2_VLAN_TAG_LEN
;
1998 /* Go through all entries from first to last in vlan range */
1999 tid
= mvpp2_prs_tcam_first_free(priv
, vid_start
,
2001 MVPP2_PRS_VLAN_FILT_MAX_ENTRY
);
2003 /* There isn't room for a new VID filter */
2007 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VID
);
2010 /* Mask all ports */
2011 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2013 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
2016 /* Enable the current port */
2017 mvpp2_prs_tcam_port_set(&pe
, port
->id
, true);
2019 /* Continue - set next lookup */
2020 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2022 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2023 mvpp2_prs_sram_shift_set(&pe
, shift
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2025 /* Set match on VID */
2026 mvpp2_prs_match_vid(&pe
, MVPP2_PRS_VID_TCAM_BYTE
, vid
);
2028 /* Clear all ai bits for next iteration */
2029 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2031 /* Update shadow table */
2032 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VID
);
2033 mvpp2_prs_hw_write(priv
, &pe
);
2038 /* Write parser entry for VID filtering */
2039 void mvpp2_prs_vid_entry_remove(struct mvpp2_port
*port
, u16 vid
)
2041 struct mvpp2
*priv
= port
->priv
;
2044 /* Scan TCAM and see if entry with this <vid,port> already exist */
2045 tid
= mvpp2_prs_vid_range_find(port
, vid
, 0xfff);
2051 mvpp2_prs_hw_inv(priv
, tid
);
2052 priv
->prs_shadow
[tid
].valid
= false;
2055 /* Remove all existing VID filters on this port */
2056 void mvpp2_prs_vid_remove_all(struct mvpp2_port
*port
)
2058 struct mvpp2
*priv
= port
->priv
;
2061 for (tid
= MVPP2_PRS_VID_PORT_FIRST(port
->id
);
2062 tid
<= MVPP2_PRS_VID_PORT_LAST(port
->id
); tid
++) {
2063 if (priv
->prs_shadow
[tid
].valid
) {
2064 mvpp2_prs_hw_inv(priv
, tid
);
2065 priv
->prs_shadow
[tid
].valid
= false;
2070 /* Remove VID filering entry for this port */
2071 void mvpp2_prs_vid_disable_filtering(struct mvpp2_port
*port
)
2073 unsigned int tid
= MVPP2_PRS_VID_PORT_DFLT(port
->id
);
2074 struct mvpp2
*priv
= port
->priv
;
2076 /* Invalidate the guard entry */
2077 mvpp2_prs_hw_inv(priv
, tid
);
2079 priv
->prs_shadow
[tid
].valid
= false;
2082 /* Add guard entry that drops packets when no VID is matched on this port */
2083 void mvpp2_prs_vid_enable_filtering(struct mvpp2_port
*port
)
2085 unsigned int tid
= MVPP2_PRS_VID_PORT_DFLT(port
->id
);
2086 struct mvpp2
*priv
= port
->priv
;
2087 unsigned int reg_val
, shift
;
2088 struct mvpp2_prs_entry pe
;
2090 if (priv
->prs_shadow
[tid
].valid
)
2093 memset(&pe
, 0, sizeof(pe
));
2097 reg_val
= mvpp2_read(priv
, MVPP2_MH_REG(port
->id
));
2098 if (reg_val
& MVPP2_DSA_EXTENDED
)
2099 shift
= MVPP2_VLAN_TAG_EDSA_LEN
;
2101 shift
= MVPP2_VLAN_TAG_LEN
;
2103 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_VID
);
2105 /* Mask all ports */
2106 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2108 /* Update port mask */
2109 mvpp2_prs_tcam_port_set(&pe
, port
->id
, true);
2111 /* Continue - set next lookup */
2112 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_L2
);
2114 /* Skip VLAN header - Set offset to 4 or 8 bytes */
2115 mvpp2_prs_sram_shift_set(&pe
, shift
, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2117 /* Drop VLAN packets that don't belong to any VIDs on this port */
2118 mvpp2_prs_sram_ri_update(&pe
, MVPP2_PRS_RI_DROP_MASK
,
2119 MVPP2_PRS_RI_DROP_MASK
);
2121 /* Clear all ai bits for next iteration */
2122 mvpp2_prs_sram_ai_update(&pe
, 0, MVPP2_PRS_SRAM_AI_MASK
);
2124 /* Update shadow table */
2125 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_VID
);
2126 mvpp2_prs_hw_write(priv
, &pe
);
2129 /* Parser default initialization */
2130 int mvpp2_prs_default_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
2134 /* Enable tcam table */
2135 mvpp2_write(priv
, MVPP2_PRS_TCAM_CTRL_REG
, MVPP2_PRS_TCAM_EN_MASK
);
2137 /* Clear all tcam and sram entries */
2138 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++) {
2139 mvpp2_write(priv
, MVPP2_PRS_TCAM_IDX_REG
, index
);
2140 for (i
= 0; i
< MVPP2_PRS_TCAM_WORDS
; i
++)
2141 mvpp2_write(priv
, MVPP2_PRS_TCAM_DATA_REG(i
), 0);
2143 mvpp2_write(priv
, MVPP2_PRS_SRAM_IDX_REG
, index
);
2144 for (i
= 0; i
< MVPP2_PRS_SRAM_WORDS
; i
++)
2145 mvpp2_write(priv
, MVPP2_PRS_SRAM_DATA_REG(i
), 0);
2148 /* Invalidate all tcam entries */
2149 for (index
= 0; index
< MVPP2_PRS_TCAM_SRAM_SIZE
; index
++)
2150 mvpp2_prs_hw_inv(priv
, index
);
2152 priv
->prs_shadow
= devm_kcalloc(&pdev
->dev
, MVPP2_PRS_TCAM_SRAM_SIZE
,
2153 sizeof(*priv
->prs_shadow
),
2155 if (!priv
->prs_shadow
)
2158 /* Always start from lookup = 0 */
2159 for (index
= 0; index
< MVPP2_MAX_PORTS
; index
++)
2160 mvpp2_prs_hw_port_init(priv
, index
, MVPP2_PRS_LU_MH
,
2161 MVPP2_PRS_PORT_LU_MAX
, 0);
2163 mvpp2_prs_def_flow_init(priv
);
2165 mvpp2_prs_mh_init(priv
);
2167 mvpp2_prs_mac_init(priv
);
2169 mvpp2_prs_dsa_init(priv
);
2171 mvpp2_prs_vid_init(priv
);
2173 err
= mvpp2_prs_etype_init(priv
);
2177 err
= mvpp2_prs_vlan_init(pdev
, priv
);
2181 err
= mvpp2_prs_pppoe_init(priv
);
2185 err
= mvpp2_prs_ip6_init(priv
);
2189 err
= mvpp2_prs_ip4_init(priv
);
2196 /* Compare MAC DA with tcam entry data */
2197 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry
*pe
,
2198 const u8
*da
, unsigned char *mask
)
2200 unsigned char tcam_byte
, tcam_mask
;
2203 for (index
= 0; index
< ETH_ALEN
; index
++) {
2204 mvpp2_prs_tcam_data_byte_get(pe
, index
, &tcam_byte
, &tcam_mask
);
2205 if (tcam_mask
!= mask
[index
])
2208 if ((tcam_mask
& tcam_byte
) != (da
[index
] & mask
[index
]))
2215 /* Find tcam entry with matched pair <MAC DA, port> */
2217 mvpp2_prs_mac_da_range_find(struct mvpp2
*priv
, int pmap
, const u8
*da
,
2218 unsigned char *mask
, int udf_type
)
2220 struct mvpp2_prs_entry pe
;
2223 /* Go through the all entires with MVPP2_PRS_LU_MAC */
2224 for (tid
= MVPP2_PE_MAC_RANGE_START
;
2225 tid
<= MVPP2_PE_MAC_RANGE_END
; tid
++) {
2226 unsigned int entry_pmap
;
2228 if (!priv
->prs_shadow
[tid
].valid
||
2229 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
2230 (priv
->prs_shadow
[tid
].udf
!= udf_type
))
2233 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
2234 entry_pmap
= mvpp2_prs_tcam_port_map_get(&pe
);
2236 if (mvpp2_prs_mac_range_equals(&pe
, da
, mask
) &&
2244 /* Update parser's mac da entry */
2245 int mvpp2_prs_mac_da_accept(struct mvpp2_port
*port
, const u8
*da
, bool add
)
2247 unsigned char mask
[ETH_ALEN
] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2248 struct mvpp2
*priv
= port
->priv
;
2249 unsigned int pmap
, len
, ri
;
2250 struct mvpp2_prs_entry pe
;
2253 memset(&pe
, 0, sizeof(pe
));
2255 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2256 tid
= mvpp2_prs_mac_da_range_find(priv
, BIT(port
->id
), da
, mask
,
2257 MVPP2_PRS_UDF_MAC_DEF
);
2264 /* Create new TCAM entry */
2265 /* Go through the all entries from first to last */
2266 tid
= mvpp2_prs_tcam_first_free(priv
,
2267 MVPP2_PE_MAC_RANGE_START
,
2268 MVPP2_PE_MAC_RANGE_END
);
2274 /* Mask all ports */
2275 mvpp2_prs_tcam_port_map_set(&pe
, 0);
2277 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
2280 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_MAC
);
2282 /* Update port mask */
2283 mvpp2_prs_tcam_port_set(&pe
, port
->id
, add
);
2285 /* Invalidate the entry if no ports are left enabled */
2286 pmap
= mvpp2_prs_tcam_port_map_get(&pe
);
2291 mvpp2_prs_hw_inv(priv
, pe
.index
);
2292 priv
->prs_shadow
[pe
.index
].valid
= false;
2296 /* Continue - set next lookup */
2297 mvpp2_prs_sram_next_lu_set(&pe
, MVPP2_PRS_LU_DSA
);
2299 /* Set match on DA */
2302 mvpp2_prs_tcam_data_byte_set(&pe
, len
, da
[len
], 0xff);
2304 /* Set result info bits */
2305 if (is_broadcast_ether_addr(da
)) {
2306 ri
= MVPP2_PRS_RI_L2_BCAST
;
2307 } else if (is_multicast_ether_addr(da
)) {
2308 ri
= MVPP2_PRS_RI_L2_MCAST
;
2310 ri
= MVPP2_PRS_RI_L2_UCAST
;
2312 if (ether_addr_equal(da
, port
->dev
->dev_addr
))
2313 ri
|= MVPP2_PRS_RI_MAC_ME_MASK
;
2316 mvpp2_prs_sram_ri_update(&pe
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
2317 MVPP2_PRS_RI_MAC_ME_MASK
);
2318 mvpp2_prs_shadow_ri_set(priv
, pe
.index
, ri
, MVPP2_PRS_RI_L2_CAST_MASK
|
2319 MVPP2_PRS_RI_MAC_ME_MASK
);
2321 /* Shift to ethertype */
2322 mvpp2_prs_sram_shift_set(&pe
, 2 * ETH_ALEN
,
2323 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD
);
2325 /* Update shadow table and hw entry */
2326 priv
->prs_shadow
[pe
.index
].udf
= MVPP2_PRS_UDF_MAC_DEF
;
2327 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_MAC
);
2328 mvpp2_prs_hw_write(priv
, &pe
);
2333 int mvpp2_prs_update_mac_da(struct net_device
*dev
, const u8
*da
)
2335 struct mvpp2_port
*port
= netdev_priv(dev
);
2338 /* Remove old parser entry */
2339 err
= mvpp2_prs_mac_da_accept(port
, dev
->dev_addr
, false);
2343 /* Add new parser entry */
2344 err
= mvpp2_prs_mac_da_accept(port
, da
, true);
2348 /* Set addr in the device */
2349 ether_addr_copy(dev
->dev_addr
, da
);
2354 void mvpp2_prs_mac_del_all(struct mvpp2_port
*port
)
2356 struct mvpp2
*priv
= port
->priv
;
2357 struct mvpp2_prs_entry pe
;
2361 for (tid
= MVPP2_PE_MAC_RANGE_START
;
2362 tid
<= MVPP2_PE_MAC_RANGE_END
; tid
++) {
2363 unsigned char da
[ETH_ALEN
], da_mask
[ETH_ALEN
];
2365 if (!priv
->prs_shadow
[tid
].valid
||
2366 (priv
->prs_shadow
[tid
].lu
!= MVPP2_PRS_LU_MAC
) ||
2367 (priv
->prs_shadow
[tid
].udf
!= MVPP2_PRS_UDF_MAC_DEF
))
2370 mvpp2_prs_init_from_hw(priv
, &pe
, tid
);
2372 pmap
= mvpp2_prs_tcam_port_map_get(&pe
);
2374 /* We only want entries active on this port */
2375 if (!test_bit(port
->id
, &pmap
))
2378 /* Read mac addr from entry */
2379 for (index
= 0; index
< ETH_ALEN
; index
++)
2380 mvpp2_prs_tcam_data_byte_get(&pe
, index
, &da
[index
],
2383 /* Special cases : Don't remove broadcast and port's own
2386 if (is_broadcast_ether_addr(da
) ||
2387 ether_addr_equal(da
, port
->dev
->dev_addr
))
2390 /* Remove entry from TCAM */
2391 mvpp2_prs_mac_da_accept(port
, da
, false);
2395 int mvpp2_prs_tag_mode_set(struct mvpp2
*priv
, int port
, int type
)
2398 case MVPP2_TAG_TYPE_EDSA
:
2399 /* Add port to EDSA entries */
2400 mvpp2_prs_dsa_tag_set(priv
, port
, true,
2401 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2402 mvpp2_prs_dsa_tag_set(priv
, port
, true,
2403 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2404 /* Remove port from DSA entries */
2405 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2406 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2407 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2408 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2411 case MVPP2_TAG_TYPE_DSA
:
2412 /* Add port to DSA entries */
2413 mvpp2_prs_dsa_tag_set(priv
, port
, true,
2414 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2415 mvpp2_prs_dsa_tag_set(priv
, port
, true,
2416 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2417 /* Remove port from EDSA entries */
2418 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2419 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2420 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2421 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2424 case MVPP2_TAG_TYPE_MH
:
2425 case MVPP2_TAG_TYPE_NONE
:
2426 /* Remove port form EDSA and DSA entries */
2427 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2428 MVPP2_PRS_TAGGED
, MVPP2_PRS_DSA
);
2429 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2430 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_DSA
);
2431 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2432 MVPP2_PRS_TAGGED
, MVPP2_PRS_EDSA
);
2433 mvpp2_prs_dsa_tag_set(priv
, port
, false,
2434 MVPP2_PRS_UNTAGGED
, MVPP2_PRS_EDSA
);
2438 if ((type
< 0) || (type
> MVPP2_TAG_TYPE_EDSA
))
2445 int mvpp2_prs_add_flow(struct mvpp2
*priv
, int flow
, u32 ri
, u32 ri_mask
)
2447 struct mvpp2_prs_entry pe
;
2448 u8
*ri_byte
, *ri_byte_mask
;
2451 memset(&pe
, 0, sizeof(pe
));
2453 tid
= mvpp2_prs_tcam_first_free(priv
,
2454 MVPP2_PE_LAST_FREE_TID
,
2455 MVPP2_PE_FIRST_FREE_TID
);
2461 ri_byte
= (u8
*)&ri
;
2462 ri_byte_mask
= (u8
*)&ri_mask
;
2464 mvpp2_prs_sram_ai_update(&pe
, flow
, MVPP2_PRS_FLOW_ID_MASK
);
2465 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2467 for (i
= 0; i
< 4; i
++) {
2468 mvpp2_prs_tcam_data_byte_set(&pe
, i
, ri_byte
[i
],
2472 mvpp2_prs_shadow_set(priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2473 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2474 mvpp2_prs_tcam_port_map_set(&pe
, MVPP2_PRS_PORT_MASK
);
2475 mvpp2_prs_hw_write(priv
, &pe
);
2480 /* Set prs flow for the port */
2481 int mvpp2_prs_def_flow(struct mvpp2_port
*port
)
2483 struct mvpp2_prs_entry pe
;
2486 memset(&pe
, 0, sizeof(pe
));
2488 tid
= mvpp2_prs_flow_find(port
->priv
, port
->id
);
2490 /* Such entry not exist */
2492 /* Go through the all entires from last to first */
2493 tid
= mvpp2_prs_tcam_first_free(port
->priv
,
2494 MVPP2_PE_LAST_FREE_TID
,
2495 MVPP2_PE_FIRST_FREE_TID
);
2502 mvpp2_prs_sram_ai_update(&pe
, port
->id
, MVPP2_PRS_FLOW_ID_MASK
);
2503 mvpp2_prs_sram_bits_set(&pe
, MVPP2_PRS_SRAM_LU_DONE_BIT
, 1);
2505 /* Update shadow table */
2506 mvpp2_prs_shadow_set(port
->priv
, pe
.index
, MVPP2_PRS_LU_FLOWS
);
2508 mvpp2_prs_init_from_hw(port
->priv
, &pe
, tid
);
2511 mvpp2_prs_tcam_lu_set(&pe
, MVPP2_PRS_LU_FLOWS
);
2512 mvpp2_prs_tcam_port_map_set(&pe
, (1 << port
->id
));
2513 mvpp2_prs_hw_write(port
->priv
, &pe
);
2518 int mvpp2_prs_hits(struct mvpp2
*priv
, int index
)
2522 if (index
> MVPP2_PRS_TCAM_SRAM_SIZE
)
2525 mvpp2_write(priv
, MVPP2_PRS_TCAM_HIT_IDX_REG
, index
);
2527 val
= mvpp2_read(priv
, MVPP2_PRS_TCAM_HIT_CNT_REG
);
2529 val
&= MVPP2_PRS_TCAM_HIT_CNT_MASK
;