1 // SPDX-License-Identifier: GPL-2.0
3 * RSS and Classifier helpers for Marvell PPv2 Network Controller
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
11 #include "mvpp2_cls.h"
12 #include "mvpp2_prs.h"
14 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
18 .supported_hash_opts = _opts, \
25 static const struct mvpp2_cls_flow cls_flows
[MVPP2_N_PRS_FLOWS
] = {
26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_NF_UNTAG
,
28 MVPP22_CLS_HEK_IP4_5T
,
29 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
31 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
33 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_NF_UNTAG
,
34 MVPP22_CLS_HEK_IP4_5T
,
35 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
37 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
39 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_NF_UNTAG
,
40 MVPP22_CLS_HEK_IP4_5T
,
41 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
43 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_NF_TAG
,
47 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_TAGGED
,
48 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_TCP
,
51 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_NF_TAG
,
52 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_TAGGED
,
53 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_TCP
,
56 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_NF_TAG
,
57 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_TAGGED
,
58 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_TCP
,
61 /* TCP over IPv4 flows, fragmented, no vlan tag */
62 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_FRAG_UNTAG
,
63 MVPP22_CLS_HEK_IP4_2T
,
64 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
66 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
68 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_FRAG_UNTAG
,
69 MVPP22_CLS_HEK_IP4_2T
,
70 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
72 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
74 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_FRAG_UNTAG
,
75 MVPP22_CLS_HEK_IP4_2T
,
76 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
78 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
80 /* TCP over IPv4 flows, fragmented, with vlan tag */
81 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_FRAG_TAG
,
82 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
83 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_TCP
,
86 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_FRAG_TAG
,
87 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
88 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_TCP
,
91 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4
, MVPP2_FL_IP4_TCP_FRAG_TAG
,
92 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
93 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_TCP
,
96 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
97 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_NF_UNTAG
,
98 MVPP22_CLS_HEK_IP4_5T
,
99 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
101 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
103 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_NF_UNTAG
,
104 MVPP22_CLS_HEK_IP4_5T
,
105 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
107 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
109 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_NF_UNTAG
,
110 MVPP22_CLS_HEK_IP4_5T
,
111 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
113 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
116 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_NF_TAG
,
117 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_TAGGED
,
118 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_UDP
,
121 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_NF_TAG
,
122 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_TAGGED
,
123 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_UDP
,
126 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_NF_TAG
,
127 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_TAGGED
,
128 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_UDP
,
131 /* UDP over IPv4 flows, fragmented, no vlan tag */
132 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_FRAG_UNTAG
,
133 MVPP22_CLS_HEK_IP4_2T
,
134 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
136 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
138 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_FRAG_UNTAG
,
139 MVPP22_CLS_HEK_IP4_2T
,
140 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
142 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
144 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_FRAG_UNTAG
,
145 MVPP22_CLS_HEK_IP4_2T
,
146 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
148 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
150 /* UDP over IPv4 flows, fragmented, with vlan tag */
151 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_FRAG_TAG
,
152 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
153 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_UDP
,
156 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_FRAG_TAG
,
157 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
158 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_UDP
,
161 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4
, MVPP2_FL_IP4_UDP_FRAG_TAG
,
162 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
163 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_UDP
,
166 /* TCP over IPv6 flows, not fragmented, no vlan tag */
167 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_NF_UNTAG
,
168 MVPP22_CLS_HEK_IP6_5T
,
169 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
171 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
173 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_NF_UNTAG
,
174 MVPP22_CLS_HEK_IP6_5T
,
175 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
177 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
179 /* TCP over IPv6 flows, not fragmented, with vlan tag */
180 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_NF_TAG
,
181 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_TAGGED
,
182 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_L4_TCP
,
185 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_NF_TAG
,
186 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_TAGGED
,
187 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_L4_TCP
,
190 /* TCP over IPv6 flows, fragmented, no vlan tag */
191 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_FRAG_UNTAG
,
192 MVPP22_CLS_HEK_IP6_2T
,
193 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
194 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_TCP
,
195 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
197 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_FRAG_UNTAG
,
198 MVPP22_CLS_HEK_IP6_2T
,
199 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
200 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_TCP
,
201 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
203 /* TCP over IPv6 flows, fragmented, with vlan tag */
204 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_FRAG_TAG
,
205 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_TAGGED
,
206 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
210 MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6
, MVPP2_FL_IP6_TCP_FRAG_TAG
,
211 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_TAGGED
,
212 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
216 /* UDP over IPv6 flows, not fragmented, no vlan tag */
217 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_NF_UNTAG
,
218 MVPP22_CLS_HEK_IP6_5T
,
219 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
221 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
223 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_NF_UNTAG
,
224 MVPP22_CLS_HEK_IP6_5T
,
225 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
227 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
229 /* UDP over IPv6 flows, not fragmented, with vlan tag */
230 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_NF_TAG
,
231 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_TAGGED
,
232 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_L4_UDP
,
235 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_NF_TAG
,
236 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_TAGGED
,
237 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_L4_UDP
,
240 /* UDP over IPv6 flows, fragmented, no vlan tag */
241 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_FRAG_UNTAG
,
242 MVPP22_CLS_HEK_IP6_2T
,
243 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
244 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_UDP
,
245 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
247 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_FRAG_UNTAG
,
248 MVPP22_CLS_HEK_IP6_2T
,
249 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
250 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_UDP
,
251 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
253 /* UDP over IPv6 flows, fragmented, with vlan tag */
254 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_FRAG_TAG
,
255 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_TAGGED
,
256 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
260 MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6
, MVPP2_FL_IP6_UDP_FRAG_TAG
,
261 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_TAGGED
,
262 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
266 /* IPv4 flows, no vlan tag */
267 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4
, MVPP2_FL_IP4_UNTAG
,
268 MVPP22_CLS_HEK_IP4_2T
,
269 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
,
270 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
271 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4
, MVPP2_FL_IP4_UNTAG
,
272 MVPP22_CLS_HEK_IP4_2T
,
273 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
,
274 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
275 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4
, MVPP2_FL_IP4_UNTAG
,
276 MVPP22_CLS_HEK_IP4_2T
,
277 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
,
278 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
280 /* IPv4 flows, with vlan tag */
281 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4
, MVPP2_FL_IP4_TAG
,
282 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
284 MVPP2_PRS_RI_L3_PROTO_MASK
),
285 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4
, MVPP2_FL_IP4_TAG
,
286 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
287 MVPP2_PRS_RI_L3_IP4_OPT
,
288 MVPP2_PRS_RI_L3_PROTO_MASK
),
289 MVPP2_DEF_FLOW(MVPP22_FLOW_IP4
, MVPP2_FL_IP4_TAG
,
290 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_TAGGED
,
291 MVPP2_PRS_RI_L3_IP4_OTHER
,
292 MVPP2_PRS_RI_L3_PROTO_MASK
),
294 /* IPv6 flows, no vlan tag */
295 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6
, MVPP2_FL_IP6_UNTAG
,
296 MVPP22_CLS_HEK_IP6_2T
,
297 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
,
298 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
299 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6
, MVPP2_FL_IP6_UNTAG
,
300 MVPP22_CLS_HEK_IP6_2T
,
301 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
,
302 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
304 /* IPv6 flows, with vlan tag */
305 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6
, MVPP2_FL_IP6_TAG
,
306 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_TAGGED
,
308 MVPP2_PRS_RI_L3_PROTO_MASK
),
309 MVPP2_DEF_FLOW(MVPP22_FLOW_IP6
, MVPP2_FL_IP6_TAG
,
310 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_TAGGED
,
312 MVPP2_PRS_RI_L3_PROTO_MASK
),
314 /* Non IP flow, no vlan tag */
315 MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET
, MVPP2_FL_NON_IP_UNTAG
,
317 MVPP2_PRS_RI_VLAN_NONE
,
318 MVPP2_PRS_RI_VLAN_MASK
),
319 /* Non IP flow, with vlan tag */
320 MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET
, MVPP2_FL_NON_IP_TAG
,
321 MVPP22_CLS_HEK_OPT_VLAN
,
325 u32
mvpp2_cls_flow_hits(struct mvpp2
*priv
, int index
)
327 mvpp2_write(priv
, MVPP2_CTRS_IDX
, index
);
329 return mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL_HIT_CTR
);
332 void mvpp2_cls_flow_read(struct mvpp2
*priv
, int index
,
333 struct mvpp2_cls_flow_entry
*fe
)
336 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, index
);
337 fe
->data
[0] = mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL0_REG
);
338 fe
->data
[1] = mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL1_REG
);
339 fe
->data
[2] = mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL2_REG
);
342 /* Update classification flow table registers */
343 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
344 struct mvpp2_cls_flow_entry
*fe
)
346 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
347 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
348 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
349 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
352 u32
mvpp2_cls_lookup_hits(struct mvpp2
*priv
, int index
)
354 mvpp2_write(priv
, MVPP2_CTRS_IDX
, index
);
356 return mvpp2_read(priv
, MVPP2_CLS_DEC_TBL_HIT_CTR
);
359 void mvpp2_cls_lookup_read(struct mvpp2
*priv
, int lkpid
, int way
,
360 struct mvpp2_cls_lookup_entry
*le
)
364 val
= (way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | lkpid
;
365 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
368 le
->data
= mvpp2_read(priv
, MVPP2_CLS_LKP_TBL_REG
);
371 /* Update classification lookup table register */
372 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
373 struct mvpp2_cls_lookup_entry
*le
)
377 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
378 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
379 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
382 /* Operations on flow entry */
383 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry
*fe
)
385 return fe
->data
[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK
;
388 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry
*fe
,
391 fe
->data
[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK
;
392 fe
->data
[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields
);
395 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry
*fe
,
398 return (fe
->data
[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index
)) &
399 MVPP2_CLS_FLOW_TBL2_FLD_MASK
;
402 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry
*fe
,
403 int field_index
, int field_id
)
405 fe
->data
[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index
,
406 MVPP2_CLS_FLOW_TBL2_FLD_MASK
);
407 fe
->data
[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index
, field_id
);
410 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry
*fe
,
413 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK
);
414 fe
->data
[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine
);
417 int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry
*fe
)
419 return (fe
->data
[0] >> MVPP2_CLS_FLOW_TBL0_OFFS
) &
420 MVPP2_CLS_FLOW_TBL0_ENG_MASK
;
423 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry
*fe
,
427 fe
->data
[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL
;
429 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL
;
432 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry
*fe
,
435 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST
;
436 fe
->data
[0] |= !!is_last
;
439 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry
*fe
, int prio
)
441 fe
->data
[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK
);
442 fe
->data
[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio
);
445 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry
*fe
,
448 fe
->data
[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port
);
451 static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry
*fe
,
454 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port
);
457 static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry
*fe
,
460 fe
->data
[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK
);
461 fe
->data
[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type
);
464 /* Initialize the parser entry for the given flow */
465 static void mvpp2_cls_flow_prs_init(struct mvpp2
*priv
,
466 const struct mvpp2_cls_flow
*flow
)
468 mvpp2_prs_add_flow(priv
, flow
->flow_id
, flow
->prs_ri
.ri
,
469 flow
->prs_ri
.ri_mask
);
472 /* Initialize the Lookup Id table entry for the given flow */
473 static void mvpp2_cls_flow_lkp_init(struct mvpp2
*priv
,
474 const struct mvpp2_cls_flow
*flow
)
476 struct mvpp2_cls_lookup_entry le
;
479 le
.lkpid
= flow
->flow_id
;
481 /* The default RxQ for this port is set in the C2 lookup */
484 /* We point on the first lookup in the sequence for the flow, that is
487 le
.data
|= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow
->flow_id
));
489 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
490 le
.data
|= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
492 mvpp2_cls_lookup_write(priv
, &le
);
495 static void mvpp2_cls_c2_write(struct mvpp2
*priv
,
496 struct mvpp2_cls_c2_entry
*c2
)
499 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_IDX
, c2
->index
);
501 val
= mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_INV
);
503 val
&= ~MVPP22_CLS_C2_TCAM_INV_BIT
;
505 val
|= MVPP22_CLS_C2_TCAM_INV_BIT
;
506 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_INV
, val
);
508 mvpp2_write(priv
, MVPP22_CLS_C2_ACT
, c2
->act
);
510 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR0
, c2
->attr
[0]);
511 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR1
, c2
->attr
[1]);
512 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR2
, c2
->attr
[2]);
513 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR3
, c2
->attr
[3]);
515 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA0
, c2
->tcam
[0]);
516 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA1
, c2
->tcam
[1]);
517 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA2
, c2
->tcam
[2]);
518 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA3
, c2
->tcam
[3]);
519 /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
520 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA4
, c2
->tcam
[4]);
523 void mvpp2_cls_c2_read(struct mvpp2
*priv
, int index
,
524 struct mvpp2_cls_c2_entry
*c2
)
527 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_IDX
, index
);
531 c2
->tcam
[0] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA0
);
532 c2
->tcam
[1] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA1
);
533 c2
->tcam
[2] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA2
);
534 c2
->tcam
[3] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA3
);
535 c2
->tcam
[4] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA4
);
537 c2
->act
= mvpp2_read(priv
, MVPP22_CLS_C2_ACT
);
539 c2
->attr
[0] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR0
);
540 c2
->attr
[1] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR1
);
541 c2
->attr
[2] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR2
);
542 c2
->attr
[3] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR3
);
544 val
= mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_INV
);
545 c2
->valid
= !(val
& MVPP22_CLS_C2_TCAM_INV_BIT
);
548 static int mvpp2_cls_ethtool_flow_to_type(int flow_type
)
550 switch (flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
| FLOW_RSS
)) {
552 return MVPP22_FLOW_ETHERNET
;
554 return MVPP22_FLOW_TCP4
;
556 return MVPP22_FLOW_TCP6
;
558 return MVPP22_FLOW_UDP4
;
560 return MVPP22_FLOW_UDP6
;
562 return MVPP22_FLOW_IP4
;
564 return MVPP22_FLOW_IP6
;
570 static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port
*port
, int loc
)
572 return MVPP22_CLS_C2_RFS_LOC(port
->id
, loc
);
575 /* Initialize the flow table entries for the given flow */
576 static void mvpp2_cls_flow_init(struct mvpp2
*priv
,
577 const struct mvpp2_cls_flow
*flow
)
579 struct mvpp2_cls_flow_entry fe
;
582 /* Assign default values to all entries in the flow */
583 for (i
= MVPP2_CLS_FLT_FIRST(flow
->flow_id
);
584 i
<= MVPP2_CLS_FLT_LAST(flow
->flow_id
); i
++) {
585 memset(&fe
, 0, sizeof(fe
));
587 mvpp2_cls_flow_pri_set(&fe
, pri
++);
589 if (i
== MVPP2_CLS_FLT_LAST(flow
->flow_id
))
590 mvpp2_cls_flow_last_set(&fe
, 1);
592 mvpp2_cls_flow_write(priv
, &fe
);
595 /* RSS config C2 lookup */
596 mvpp2_cls_flow_read(priv
, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow
->flow_id
),
599 mvpp2_cls_flow_eng_set(&fe
, MVPP22_CLS_ENGINE_C2
);
600 mvpp2_cls_flow_port_id_sel(&fe
, true);
601 mvpp2_cls_flow_lu_type_set(&fe
, MVPP22_CLS_LU_TYPE_ALL
);
604 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++)
605 mvpp2_cls_flow_port_add(&fe
, BIT(i
));
607 mvpp2_cls_flow_write(priv
, &fe
);
610 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++) {
611 mvpp2_cls_flow_read(priv
,
612 MVPP2_CLS_FLT_HASH_ENTRY(i
, flow
->flow_id
),
615 /* Set a default engine. Will be overwritten when setting the
616 * real HEK parameters
618 mvpp2_cls_flow_eng_set(&fe
, MVPP22_CLS_ENGINE_C3HA
);
619 mvpp2_cls_flow_port_id_sel(&fe
, true);
620 mvpp2_cls_flow_port_add(&fe
, BIT(i
));
622 mvpp2_cls_flow_write(priv
, &fe
);
626 /* Adds a field to the Header Extracted Key generation parameters*/
627 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry
*fe
,
630 int nb_fields
= mvpp2_cls_flow_hek_num_get(fe
);
632 if (nb_fields
== MVPP2_FLOW_N_FIELDS
)
635 mvpp2_cls_flow_hek_set(fe
, nb_fields
, field_id
);
637 mvpp2_cls_flow_hek_num_set(fe
, nb_fields
+ 1);
642 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry
*fe
,
643 unsigned long hash_opts
)
648 /* Clear old fields */
649 mvpp2_cls_flow_hek_num_set(fe
, 0);
652 for_each_set_bit(i
, &hash_opts
, MVPP22_CLS_HEK_N_FIELDS
) {
654 case MVPP22_CLS_HEK_OPT_MAC_DA
:
655 field_id
= MVPP22_CLS_FIELD_MAC_DA
;
657 case MVPP22_CLS_HEK_OPT_VLAN
:
658 field_id
= MVPP22_CLS_FIELD_VLAN
;
660 case MVPP22_CLS_HEK_OPT_VLAN_PRI
:
661 field_id
= MVPP22_CLS_FIELD_VLAN_PRI
;
663 case MVPP22_CLS_HEK_OPT_IP4SA
:
664 field_id
= MVPP22_CLS_FIELD_IP4SA
;
666 case MVPP22_CLS_HEK_OPT_IP4DA
:
667 field_id
= MVPP22_CLS_FIELD_IP4DA
;
669 case MVPP22_CLS_HEK_OPT_IP6SA
:
670 field_id
= MVPP22_CLS_FIELD_IP6SA
;
672 case MVPP22_CLS_HEK_OPT_IP6DA
:
673 field_id
= MVPP22_CLS_FIELD_IP6DA
;
675 case MVPP22_CLS_HEK_OPT_L4SIP
:
676 field_id
= MVPP22_CLS_FIELD_L4SIP
;
678 case MVPP22_CLS_HEK_OPT_L4DIP
:
679 field_id
= MVPP22_CLS_FIELD_L4DIP
;
684 if (mvpp2_flow_add_hek_field(fe
, field_id
))
691 /* Returns the size, in bits, of the corresponding HEK field */
692 static int mvpp2_cls_hek_field_size(u32 field
)
695 case MVPP22_CLS_HEK_OPT_MAC_DA
:
697 case MVPP22_CLS_HEK_OPT_VLAN
:
699 case MVPP22_CLS_HEK_OPT_VLAN_PRI
:
701 case MVPP22_CLS_HEK_OPT_IP4SA
:
702 case MVPP22_CLS_HEK_OPT_IP4DA
:
704 case MVPP22_CLS_HEK_OPT_IP6SA
:
705 case MVPP22_CLS_HEK_OPT_IP6DA
:
707 case MVPP22_CLS_HEK_OPT_L4SIP
:
708 case MVPP22_CLS_HEK_OPT_L4DIP
:
715 const struct mvpp2_cls_flow
*mvpp2_cls_flow_get(int flow
)
717 if (flow
>= MVPP2_N_PRS_FLOWS
)
720 return &cls_flows
[flow
];
723 /* Set the hash generation options for the given traffic flow.
724 * One traffic flow (in the ethtool sense) has multiple classification flows,
725 * to handle specific cases such as fragmentation, or the presence of a
728 * Each of these individual flows has different constraints, for example we
729 * can't hash fragmented packets on L4 data (else we would risk having packet
730 * re-ordering), so each classification flows masks the options with their
734 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port
*port
, int flow_type
,
737 const struct mvpp2_cls_flow
*flow
;
738 struct mvpp2_cls_flow_entry fe
;
739 int i
, engine
, flow_index
;
742 for_each_cls_flow_id_with_type(i
, flow_type
) {
743 flow
= mvpp2_cls_flow_get(i
);
747 flow_index
= MVPP2_CLS_FLT_HASH_ENTRY(port
->id
, flow
->flow_id
);
749 mvpp2_cls_flow_read(port
->priv
, flow_index
, &fe
);
751 hash_opts
= flow
->supported_hash_opts
& requested_opts
;
753 /* Use C3HB engine to access L4 infos. This adds L4 infos to the
756 if (hash_opts
& MVPP22_CLS_HEK_L4_OPTS
)
757 engine
= MVPP22_CLS_ENGINE_C3HB
;
759 engine
= MVPP22_CLS_ENGINE_C3HA
;
761 if (mvpp2_flow_set_hek_fields(&fe
, hash_opts
))
764 mvpp2_cls_flow_eng_set(&fe
, engine
);
766 mvpp2_cls_flow_write(port
->priv
, &fe
);
772 u16
mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry
*fe
)
775 int n_fields
, i
, field
;
777 n_fields
= mvpp2_cls_flow_hek_num_get(fe
);
779 for (i
= 0; i
< n_fields
; i
++) {
780 field
= mvpp2_cls_flow_hek_get(fe
, i
);
783 case MVPP22_CLS_FIELD_MAC_DA
:
784 hash_opts
|= MVPP22_CLS_HEK_OPT_MAC_DA
;
786 case MVPP22_CLS_FIELD_VLAN
:
787 hash_opts
|= MVPP22_CLS_HEK_OPT_VLAN
;
789 case MVPP22_CLS_FIELD_VLAN_PRI
:
790 hash_opts
|= MVPP22_CLS_HEK_OPT_VLAN_PRI
;
792 case MVPP22_CLS_FIELD_L3_PROTO
:
793 hash_opts
|= MVPP22_CLS_HEK_OPT_L3_PROTO
;
795 case MVPP22_CLS_FIELD_IP4SA
:
796 hash_opts
|= MVPP22_CLS_HEK_OPT_IP4SA
;
798 case MVPP22_CLS_FIELD_IP4DA
:
799 hash_opts
|= MVPP22_CLS_HEK_OPT_IP4DA
;
801 case MVPP22_CLS_FIELD_IP6SA
:
802 hash_opts
|= MVPP22_CLS_HEK_OPT_IP6SA
;
804 case MVPP22_CLS_FIELD_IP6DA
:
805 hash_opts
|= MVPP22_CLS_HEK_OPT_IP6DA
;
807 case MVPP22_CLS_FIELD_L4SIP
:
808 hash_opts
|= MVPP22_CLS_HEK_OPT_L4SIP
;
810 case MVPP22_CLS_FIELD_L4DIP
:
811 hash_opts
|= MVPP22_CLS_HEK_OPT_L4DIP
;
820 /* Returns the hash opts for this flow. There are several classifier flows
821 * for one traffic flow, this returns an aggregation of all configurations.
823 static u16
mvpp2_port_rss_hash_opts_get(struct mvpp2_port
*port
, int flow_type
)
825 const struct mvpp2_cls_flow
*flow
;
826 struct mvpp2_cls_flow_entry fe
;
830 for_each_cls_flow_id_with_type(i
, flow_type
) {
831 flow
= mvpp2_cls_flow_get(i
);
835 flow_index
= MVPP2_CLS_FLT_HASH_ENTRY(port
->id
, flow
->flow_id
);
837 mvpp2_cls_flow_read(port
->priv
, flow_index
, &fe
);
839 hash_opts
|= mvpp2_flow_get_hek_fields(&fe
);
845 static void mvpp2_cls_port_init_flows(struct mvpp2
*priv
)
847 const struct mvpp2_cls_flow
*flow
;
850 for (i
= 0; i
< MVPP2_N_PRS_FLOWS
; i
++) {
851 flow
= mvpp2_cls_flow_get(i
);
855 mvpp2_cls_flow_prs_init(priv
, flow
);
856 mvpp2_cls_flow_lkp_init(priv
, flow
);
857 mvpp2_cls_flow_init(priv
, flow
);
861 static void mvpp2_port_c2_cls_init(struct mvpp2_port
*port
)
863 struct mvpp2_cls_c2_entry c2
;
866 memset(&c2
, 0, sizeof(c2
));
868 c2
.index
= MVPP22_CLS_C2_RSS_ENTRY(port
->id
);
870 pmap
= BIT(port
->id
);
871 c2
.tcam
[4] = MVPP22_CLS_C2_PORT_ID(pmap
);
872 c2
.tcam
[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap
));
874 /* Match on Lookup Type */
875 c2
.tcam
[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK
));
876 c2
.tcam
[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL
);
878 /* Update RSS status after matching this entry */
879 c2
.act
= MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK
);
881 /* Mark packet as "forwarded to software", needed for RSS */
882 c2
.act
|= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK
);
884 /* Configure the default rx queue : Update Queue Low and Queue High, but
885 * don't lock, since the rx queue selection might be overridden by RSS
887 c2
.act
|= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD
) |
888 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD
);
890 qh
= (port
->first_rxq
>> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK
;
891 ql
= port
->first_rxq
& MVPP22_CLS_C2_ATTR0_QLOW_MASK
;
893 c2
.attr
[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh
) |
894 MVPP22_CLS_C2_ATTR0_QLOW(ql
);
898 mvpp2_cls_c2_write(port
->priv
, &c2
);
901 /* Classifier default initialization */
902 void mvpp2_cls_init(struct mvpp2
*priv
)
904 struct mvpp2_cls_lookup_entry le
;
905 struct mvpp2_cls_flow_entry fe
;
906 struct mvpp2_cls_c2_entry c2
;
909 /* Enable classifier */
910 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
912 /* Clear classifier flow table */
913 memset(&fe
.data
, 0, sizeof(fe
.data
));
914 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
916 mvpp2_cls_flow_write(priv
, &fe
);
919 /* Clear classifier lookup table */
921 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
924 mvpp2_cls_lookup_write(priv
, &le
);
927 mvpp2_cls_lookup_write(priv
, &le
);
930 /* Clear C2 TCAM engine table */
931 memset(&c2
, 0, sizeof(c2
));
933 for (index
= 0; index
< MVPP22_CLS_C2_N_ENTRIES
; index
++) {
935 mvpp2_cls_c2_write(priv
, &c2
);
938 /* Disable the FIFO stages in C2 engine, which are only used in BIST
941 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_CTRL
,
942 MVPP22_CLS_C2_TCAM_BYPASS_FIFO
);
944 mvpp2_cls_port_init_flows(priv
);
947 void mvpp2_cls_port_config(struct mvpp2_port
*port
)
949 struct mvpp2_cls_lookup_entry le
;
952 /* Set way for the port */
953 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
954 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
955 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
957 /* Pick the entry to be accessed in lookup ID decoding table
958 * according to the way and lkpid.
964 /* Set initial CPU queue for receiving packets */
965 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
966 le
.data
|= port
->first_rxq
;
968 /* Disable classification engines */
969 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
971 /* Update lookup ID table entry */
972 mvpp2_cls_lookup_write(port
->priv
, &le
);
974 mvpp2_port_c2_cls_init(port
);
977 u32
mvpp2_cls_c2_hit_count(struct mvpp2
*priv
, int c2_index
)
979 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_IDX
, c2_index
);
981 return mvpp2_read(priv
, MVPP22_CLS_C2_HIT_CTR
);
984 static void mvpp2_rss_port_c2_enable(struct mvpp2_port
*port
, u32 ctx
)
986 struct mvpp2_cls_c2_entry c2
;
989 mvpp2_cls_c2_read(port
->priv
, MVPP22_CLS_C2_RSS_ENTRY(port
->id
), &c2
);
991 /* The RxQ number is used to select the RSS table. It that case, we set
992 * it to be the ctx number.
994 qh
= (ctx
>> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK
;
995 ql
= ctx
& MVPP22_CLS_C2_ATTR0_QLOW_MASK
;
997 c2
.attr
[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh
) |
998 MVPP22_CLS_C2_ATTR0_QLOW(ql
);
1000 c2
.attr
[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN
;
1002 mvpp2_cls_c2_write(port
->priv
, &c2
);
1005 static void mvpp2_rss_port_c2_disable(struct mvpp2_port
*port
)
1007 struct mvpp2_cls_c2_entry c2
;
1010 mvpp2_cls_c2_read(port
->priv
, MVPP22_CLS_C2_RSS_ENTRY(port
->id
), &c2
);
1012 /* Reset the default destination RxQ to the port's first rx queue. */
1013 qh
= (port
->first_rxq
>> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK
;
1014 ql
= port
->first_rxq
& MVPP22_CLS_C2_ATTR0_QLOW_MASK
;
1016 c2
.attr
[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh
) |
1017 MVPP22_CLS_C2_ATTR0_QLOW(ql
);
1019 c2
.attr
[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN
;
1021 mvpp2_cls_c2_write(port
->priv
, &c2
);
1024 static inline int mvpp22_rss_ctx(struct mvpp2_port
*port
, int port_rss_ctx
)
1026 return port
->rss_ctx
[port_rss_ctx
];
1029 int mvpp22_port_rss_enable(struct mvpp2_port
*port
)
1031 if (mvpp22_rss_ctx(port
, 0) < 0)
1034 mvpp2_rss_port_c2_enable(port
, mvpp22_rss_ctx(port
, 0));
1039 int mvpp22_port_rss_disable(struct mvpp2_port
*port
)
1041 if (mvpp22_rss_ctx(port
, 0) < 0)
1044 mvpp2_rss_port_c2_disable(port
);
1049 static void mvpp22_port_c2_lookup_disable(struct mvpp2_port
*port
, int entry
)
1051 struct mvpp2_cls_c2_entry c2
;
1053 mvpp2_cls_c2_read(port
->priv
, entry
, &c2
);
1055 /* Clear the port map so that the entry doesn't match anymore */
1056 c2
.tcam
[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port
->id
)));
1058 mvpp2_cls_c2_write(port
->priv
, &c2
);
1061 /* Set CPU queue number for oversize packets */
1062 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
1066 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
1067 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
1069 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
1070 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
1072 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
1073 val
&= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
1074 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
1077 static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port
*port
,
1078 struct mvpp2_rfs_rule
*rule
)
1080 struct flow_action_entry
*act
;
1081 struct mvpp2_cls_c2_entry c2
;
1085 if (!flow_action_basic_hw_stats_check(&rule
->flow
->action
, NULL
))
1088 memset(&c2
, 0, sizeof(c2
));
1090 index
= mvpp2_cls_c2_port_flow_index(port
, rule
->loc
);
1095 act
= &rule
->flow
->action
.entries
[0];
1097 rule
->c2_index
= c2
.index
;
1099 c2
.tcam
[3] = (rule
->c2_tcam
& 0xffff) |
1100 ((rule
->c2_tcam_mask
& 0xffff) << 16);
1101 c2
.tcam
[2] = ((rule
->c2_tcam
>> 16) & 0xffff) |
1102 (((rule
->c2_tcam_mask
>> 16) & 0xffff) << 16);
1103 c2
.tcam
[1] = ((rule
->c2_tcam
>> 32) & 0xffff) |
1104 (((rule
->c2_tcam_mask
>> 32) & 0xffff) << 16);
1105 c2
.tcam
[0] = ((rule
->c2_tcam
>> 48) & 0xffff) |
1106 (((rule
->c2_tcam_mask
>> 48) & 0xffff) << 16);
1108 pmap
= BIT(port
->id
);
1109 c2
.tcam
[4] = MVPP22_CLS_C2_PORT_ID(pmap
);
1110 c2
.tcam
[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap
));
1112 /* Match on Lookup Type */
1113 c2
.tcam
[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK
));
1114 c2
.tcam
[4] |= MVPP22_CLS_C2_LU_TYPE(rule
->loc
);
1116 if (act
->id
== FLOW_ACTION_DROP
) {
1117 c2
.act
= MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK
);
1119 /* We want to keep the default color derived from the Header
1120 * Parser drop entries, for VLAN and MAC filtering. This will
1121 * assign a default color of Green or Red, and we want matches
1122 * with a non-drop action to keep that color.
1124 c2
.act
= MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK
);
1126 /* Update RSS status after matching this entry */
1128 c2
.attr
[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN
;
1130 /* Always lock the RSS_EN decision. We might have high prio
1131 * rules steering to an RXQ, and a lower one steering to RSS,
1132 * we don't want the low prio RSS rule overwriting this flag.
1134 c2
.act
= MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK
);
1136 /* Mark packet as "forwarded to software", needed for RSS */
1137 c2
.act
|= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK
);
1139 c2
.act
|= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK
) |
1140 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK
);
1142 if (act
->queue
.ctx
) {
1143 /* Get the global ctx number */
1144 ctx
= mvpp22_rss_ctx(port
, act
->queue
.ctx
);
1148 qh
= (ctx
>> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK
;
1149 ql
= ctx
& MVPP22_CLS_C2_ATTR0_QLOW_MASK
;
1151 qh
= ((act
->queue
.index
+ port
->first_rxq
) >> 3) &
1152 MVPP22_CLS_C2_ATTR0_QHIGH_MASK
;
1153 ql
= (act
->queue
.index
+ port
->first_rxq
) &
1154 MVPP22_CLS_C2_ATTR0_QLOW_MASK
;
1157 c2
.attr
[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh
) |
1158 MVPP22_CLS_C2_ATTR0_QLOW(ql
);
1163 mvpp2_cls_c2_write(port
->priv
, &c2
);
1168 static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port
*port
,
1169 struct mvpp2_rfs_rule
*rule
)
1171 return mvpp2_port_c2_tcam_rule_add(port
, rule
);
1174 static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port
*port
,
1175 struct mvpp2_rfs_rule
*rule
)
1177 const struct mvpp2_cls_flow
*flow
;
1178 struct mvpp2_cls_flow_entry fe
;
1181 for_each_cls_flow_id_containing_type(i
, rule
->flow_type
) {
1182 flow
= mvpp2_cls_flow_get(i
);
1186 index
= MVPP2_CLS_FLT_C2_RFS(port
->id
, flow
->flow_id
, rule
->loc
);
1188 mvpp2_cls_flow_read(port
->priv
, index
, &fe
);
1189 mvpp2_cls_flow_port_remove(&fe
, BIT(port
->id
));
1190 mvpp2_cls_flow_write(port
->priv
, &fe
);
1193 if (rule
->c2_index
>= 0)
1194 mvpp22_port_c2_lookup_disable(port
, rule
->c2_index
);
1199 static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port
*port
,
1200 struct mvpp2_rfs_rule
*rule
)
1202 const struct mvpp2_cls_flow
*flow
;
1203 struct mvpp2
*priv
= port
->priv
;
1204 struct mvpp2_cls_flow_entry fe
;
1207 if (rule
->engine
!= MVPP22_CLS_ENGINE_C2
)
1210 ret
= mvpp2_port_c2_rfs_rule_insert(port
, rule
);
1214 for_each_cls_flow_id_containing_type(i
, rule
->flow_type
) {
1215 flow
= mvpp2_cls_flow_get(i
);
1219 if ((rule
->hek_fields
& flow
->supported_hash_opts
) != rule
->hek_fields
)
1222 index
= MVPP2_CLS_FLT_C2_RFS(port
->id
, flow
->flow_id
, rule
->loc
);
1224 mvpp2_cls_flow_read(priv
, index
, &fe
);
1225 mvpp2_cls_flow_eng_set(&fe
, rule
->engine
);
1226 mvpp2_cls_flow_port_id_sel(&fe
, true);
1227 mvpp2_flow_set_hek_fields(&fe
, rule
->hek_fields
);
1228 mvpp2_cls_flow_lu_type_set(&fe
, rule
->loc
);
1229 mvpp2_cls_flow_port_add(&fe
, 0xf);
1231 mvpp2_cls_flow_write(priv
, &fe
);
1237 static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule
*rule
)
1239 struct flow_rule
*flow
= rule
->flow
;
1242 /* The order of insertion in C2 tcam must match the order in which
1243 * the fields are found in the header
1245 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_VLAN
)) {
1246 struct flow_match_vlan match
;
1248 flow_rule_match_vlan(flow
, &match
);
1249 if (match
.mask
->vlan_id
) {
1250 rule
->hek_fields
|= MVPP22_CLS_HEK_OPT_VLAN
;
1252 rule
->c2_tcam
|= ((u64
)match
.key
->vlan_id
) << offs
;
1253 rule
->c2_tcam_mask
|= ((u64
)match
.mask
->vlan_id
) << offs
;
1255 /* Don't update the offset yet */
1258 if (match
.mask
->vlan_priority
) {
1259 rule
->hek_fields
|= MVPP22_CLS_HEK_OPT_VLAN_PRI
;
1261 /* VLAN pri is always at offset 13 relative to the
1264 rule
->c2_tcam
|= ((u64
)match
.key
->vlan_priority
) <<
1266 rule
->c2_tcam_mask
|= ((u64
)match
.mask
->vlan_priority
) <<
1270 if (match
.mask
->vlan_dei
)
1273 /* vlan id and prio always seem to take a full 16-bit slot in
1274 * the Header Extracted Key.
1279 if (flow_rule_match_key(flow
, FLOW_DISSECTOR_KEY_PORTS
)) {
1280 struct flow_match_ports match
;
1282 flow_rule_match_ports(flow
, &match
);
1283 if (match
.mask
->src
) {
1284 rule
->hek_fields
|= MVPP22_CLS_HEK_OPT_L4SIP
;
1286 rule
->c2_tcam
|= ((u64
)ntohs(match
.key
->src
)) << offs
;
1287 rule
->c2_tcam_mask
|= ((u64
)ntohs(match
.mask
->src
)) << offs
;
1288 offs
+= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP
);
1291 if (match
.mask
->dst
) {
1292 rule
->hek_fields
|= MVPP22_CLS_HEK_OPT_L4DIP
;
1294 rule
->c2_tcam
|= ((u64
)ntohs(match
.key
->dst
)) << offs
;
1295 rule
->c2_tcam_mask
|= ((u64
)ntohs(match
.mask
->dst
)) << offs
;
1296 offs
+= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP
);
1300 if (hweight16(rule
->hek_fields
) > MVPP2_FLOW_N_FIELDS
)
1306 static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule
*rule
)
1308 struct flow_rule
*flow
= rule
->flow
;
1309 struct flow_action_entry
*act
;
1311 if (!flow_action_basic_hw_stats_check(&rule
->flow
->action
, NULL
))
1314 act
= &flow
->action
.entries
[0];
1315 if (act
->id
!= FLOW_ACTION_QUEUE
&& act
->id
!= FLOW_ACTION_DROP
)
1318 /* When both an RSS context and an queue index are set, the index
1319 * is considered as an offset to be added to the indirection table
1320 * entries. We don't support this, so reject this rule.
1322 if (act
->queue
.ctx
&& act
->queue
.index
)
1325 /* For now, only use the C2 engine which has a HEK size limited to 64
1326 * bits for TCAM matching.
1328 rule
->engine
= MVPP22_CLS_ENGINE_C2
;
1330 if (mvpp2_cls_c2_build_match(rule
))
1336 int mvpp2_ethtool_cls_rule_get(struct mvpp2_port
*port
,
1337 struct ethtool_rxnfc
*rxnfc
)
1339 struct mvpp2_ethtool_fs
*efs
;
1341 if (rxnfc
->fs
.location
>= MVPP2_N_RFS_ENTRIES_PER_FLOW
)
1344 efs
= port
->rfs_rules
[rxnfc
->fs
.location
];
1348 memcpy(rxnfc
, &efs
->rxnfc
, sizeof(efs
->rxnfc
));
1353 int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port
*port
,
1354 struct ethtool_rxnfc
*info
)
1356 struct ethtool_rx_flow_spec_input input
= {};
1357 struct ethtool_rx_flow_rule
*ethtool_rule
;
1358 struct mvpp2_ethtool_fs
*efs
, *old_efs
;
1361 if (info
->fs
.location
>= MVPP2_N_RFS_ENTRIES_PER_FLOW
)
1364 efs
= kzalloc(sizeof(*efs
), GFP_KERNEL
);
1368 input
.fs
= &info
->fs
;
1370 /* We need to manually set the rss_ctx, since this info isn't present
1373 if (info
->fs
.flow_type
& FLOW_RSS
)
1374 input
.rss_ctx
= info
->rss_context
;
1376 ethtool_rule
= ethtool_rx_flow_rule_create(&input
);
1377 if (IS_ERR(ethtool_rule
)) {
1378 ret
= PTR_ERR(ethtool_rule
);
1382 efs
->rule
.flow
= ethtool_rule
->rule
;
1383 efs
->rule
.flow_type
= mvpp2_cls_ethtool_flow_to_type(info
->fs
.flow_type
);
1384 if (efs
->rule
.flow_type
< 0) {
1385 ret
= efs
->rule
.flow_type
;
1389 ret
= mvpp2_cls_rfs_parse_rule(&efs
->rule
);
1391 goto clean_eth_rule
;
1393 efs
->rule
.loc
= info
->fs
.location
;
1395 /* Replace an already existing rule */
1396 if (port
->rfs_rules
[efs
->rule
.loc
]) {
1397 old_efs
= port
->rfs_rules
[efs
->rule
.loc
];
1398 ret
= mvpp2_port_cls_rfs_rule_remove(port
, &old_efs
->rule
);
1400 goto clean_eth_rule
;
1402 port
->n_rfs_rules
--;
1405 ret
= mvpp2_port_flt_rfs_rule_insert(port
, &efs
->rule
);
1407 goto clean_eth_rule
;
1409 ethtool_rx_flow_rule_destroy(ethtool_rule
);
1410 efs
->rule
.flow
= NULL
;
1412 memcpy(&efs
->rxnfc
, info
, sizeof(*info
));
1413 port
->rfs_rules
[efs
->rule
.loc
] = efs
;
1414 port
->n_rfs_rules
++;
1419 ethtool_rx_flow_rule_destroy(ethtool_rule
);
1425 int mvpp2_ethtool_cls_rule_del(struct mvpp2_port
*port
,
1426 struct ethtool_rxnfc
*info
)
1428 struct mvpp2_ethtool_fs
*efs
;
1431 if (info
->fs
.location
>= MVPP2_N_RFS_ENTRIES_PER_FLOW
)
1434 efs
= port
->rfs_rules
[info
->fs
.location
];
1438 /* Remove the rule from the engines. */
1439 ret
= mvpp2_port_cls_rfs_rule_remove(port
, &efs
->rule
);
1443 port
->n_rfs_rules
--;
1444 port
->rfs_rules
[info
->fs
.location
] = NULL
;
1450 static inline u32
mvpp22_rxfh_indir(struct mvpp2_port
*port
, u32 rxq
)
1452 int nrxqs
, cpu
, cpus
= num_possible_cpus();
1454 /* Number of RXQs per CPU */
1455 nrxqs
= port
->nrxqs
/ cpus
;
1457 /* CPU that will handle this rx queue */
1460 if (!cpu_online(cpu
))
1461 return port
->first_rxq
;
1463 /* Indirection to better distribute the paquets on the CPUs when
1464 * configuring the RSS queues.
1466 return port
->first_rxq
+ ((rxq
* nrxqs
+ rxq
/ cpus
) % port
->nrxqs
);
1469 static void mvpp22_rss_fill_table(struct mvpp2_port
*port
,
1470 struct mvpp2_rss_table
*table
,
1473 struct mvpp2
*priv
= port
->priv
;
1476 for (i
= 0; i
< MVPP22_RSS_TABLE_ENTRIES
; i
++) {
1477 u32 sel
= MVPP22_RSS_INDEX_TABLE(rss_ctx
) |
1478 MVPP22_RSS_INDEX_TABLE_ENTRY(i
);
1479 mvpp2_write(priv
, MVPP22_RSS_INDEX
, sel
);
1481 mvpp2_write(priv
, MVPP22_RSS_TABLE_ENTRY
,
1482 mvpp22_rxfh_indir(port
, table
->indir
[i
]));
1486 static int mvpp22_rss_context_create(struct mvpp2_port
*port
, u32
*rss_ctx
)
1488 struct mvpp2
*priv
= port
->priv
;
1491 /* Find the first free RSS table */
1492 for (ctx
= 0; ctx
< MVPP22_N_RSS_TABLES
; ctx
++) {
1493 if (!priv
->rss_tables
[ctx
])
1497 if (ctx
== MVPP22_N_RSS_TABLES
)
1500 priv
->rss_tables
[ctx
] = kzalloc(sizeof(*priv
->rss_tables
[ctx
]),
1502 if (!priv
->rss_tables
[ctx
])
1507 /* Set the table width: replace the whole classifier Rx queue number
1508 * with the ones configured in RSS table entries.
1510 mvpp2_write(priv
, MVPP22_RSS_INDEX
, MVPP22_RSS_INDEX_TABLE(ctx
));
1511 mvpp2_write(priv
, MVPP22_RSS_WIDTH
, 8);
1513 mvpp2_write(priv
, MVPP22_RSS_INDEX
, MVPP22_RSS_INDEX_QUEUE(ctx
));
1514 mvpp2_write(priv
, MVPP22_RXQ2RSS_TABLE
, MVPP22_RSS_TABLE_POINTER(ctx
));
1519 int mvpp22_port_rss_ctx_create(struct mvpp2_port
*port
, u32
*port_ctx
)
1524 ret
= mvpp22_rss_context_create(port
, &rss_ctx
);
1528 /* Find the first available context number in the port, starting from 1.
1529 * Context 0 on each port is reserved for the default context.
1531 for (i
= 1; i
< MVPP22_N_RSS_TABLES
; i
++) {
1532 if (port
->rss_ctx
[i
] < 0)
1536 if (i
== MVPP22_N_RSS_TABLES
)
1539 port
->rss_ctx
[i
] = rss_ctx
;
1545 static struct mvpp2_rss_table
*mvpp22_rss_table_get(struct mvpp2
*priv
,
1548 if (rss_ctx
< 0 || rss_ctx
>= MVPP22_N_RSS_TABLES
)
1551 return priv
->rss_tables
[rss_ctx
];
1554 int mvpp22_port_rss_ctx_delete(struct mvpp2_port
*port
, u32 port_ctx
)
1556 struct mvpp2
*priv
= port
->priv
;
1557 struct ethtool_rxnfc
*rxnfc
;
1558 int i
, rss_ctx
, ret
;
1560 rss_ctx
= mvpp22_rss_ctx(port
, port_ctx
);
1562 if (rss_ctx
< 0 || rss_ctx
>= MVPP22_N_RSS_TABLES
)
1565 /* Invalidate any active classification rule that use this context */
1566 for (i
= 0; i
< MVPP2_N_RFS_ENTRIES_PER_FLOW
; i
++) {
1567 if (!port
->rfs_rules
[i
])
1570 rxnfc
= &port
->rfs_rules
[i
]->rxnfc
;
1571 if (!(rxnfc
->fs
.flow_type
& FLOW_RSS
) ||
1572 rxnfc
->rss_context
!= port_ctx
)
1575 ret
= mvpp2_ethtool_cls_rule_del(port
, rxnfc
);
1577 netdev_warn(port
->dev
,
1578 "couldn't remove classification rule %d associated to this context",
1579 rxnfc
->fs
.location
);
1583 kfree(priv
->rss_tables
[rss_ctx
]);
1585 priv
->rss_tables
[rss_ctx
] = NULL
;
1586 port
->rss_ctx
[port_ctx
] = -1;
1591 int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port
*port
, u32 port_ctx
,
1594 int rss_ctx
= mvpp22_rss_ctx(port
, port_ctx
);
1595 struct mvpp2_rss_table
*rss_table
= mvpp22_rss_table_get(port
->priv
,
1601 memcpy(rss_table
->indir
, indir
,
1602 MVPP22_RSS_TABLE_ENTRIES
* sizeof(rss_table
->indir
[0]));
1604 mvpp22_rss_fill_table(port
, rss_table
, rss_ctx
);
1609 int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port
*port
, u32 port_ctx
,
1612 int rss_ctx
= mvpp22_rss_ctx(port
, port_ctx
);
1613 struct mvpp2_rss_table
*rss_table
= mvpp22_rss_table_get(port
->priv
,
1619 memcpy(indir
, rss_table
->indir
,
1620 MVPP22_RSS_TABLE_ENTRIES
* sizeof(rss_table
->indir
[0]));
1625 int mvpp2_ethtool_rxfh_set(struct mvpp2_port
*port
, struct ethtool_rxnfc
*info
)
1630 flow_type
= mvpp2_cls_ethtool_flow_to_type(info
->flow_type
);
1632 switch (flow_type
) {
1633 case MVPP22_FLOW_TCP4
:
1634 case MVPP22_FLOW_UDP4
:
1635 case MVPP22_FLOW_TCP6
:
1636 case MVPP22_FLOW_UDP6
:
1637 if (info
->data
& RXH_L4_B_0_1
)
1638 hash_opts
|= MVPP22_CLS_HEK_OPT_L4SIP
;
1639 if (info
->data
& RXH_L4_B_2_3
)
1640 hash_opts
|= MVPP22_CLS_HEK_OPT_L4DIP
;
1642 case MVPP22_FLOW_IP4
:
1643 case MVPP22_FLOW_IP6
:
1644 if (info
->data
& RXH_L2DA
)
1645 hash_opts
|= MVPP22_CLS_HEK_OPT_MAC_DA
;
1646 if (info
->data
& RXH_VLAN
)
1647 hash_opts
|= MVPP22_CLS_HEK_OPT_VLAN
;
1648 if (info
->data
& RXH_L3_PROTO
)
1649 hash_opts
|= MVPP22_CLS_HEK_OPT_L3_PROTO
;
1650 if (info
->data
& RXH_IP_SRC
)
1651 hash_opts
|= (MVPP22_CLS_HEK_OPT_IP4SA
|
1652 MVPP22_CLS_HEK_OPT_IP6SA
);
1653 if (info
->data
& RXH_IP_DST
)
1654 hash_opts
|= (MVPP22_CLS_HEK_OPT_IP4DA
|
1655 MVPP22_CLS_HEK_OPT_IP6DA
);
1657 default: return -EOPNOTSUPP
;
1660 return mvpp2_port_rss_hash_opts_set(port
, flow_type
, hash_opts
);
1663 int mvpp2_ethtool_rxfh_get(struct mvpp2_port
*port
, struct ethtool_rxnfc
*info
)
1665 unsigned long hash_opts
;
1669 flow_type
= mvpp2_cls_ethtool_flow_to_type(info
->flow_type
);
1671 hash_opts
= mvpp2_port_rss_hash_opts_get(port
, flow_type
);
1674 for_each_set_bit(i
, &hash_opts
, MVPP22_CLS_HEK_N_FIELDS
) {
1676 case MVPP22_CLS_HEK_OPT_MAC_DA
:
1677 info
->data
|= RXH_L2DA
;
1679 case MVPP22_CLS_HEK_OPT_VLAN
:
1680 info
->data
|= RXH_VLAN
;
1682 case MVPP22_CLS_HEK_OPT_L3_PROTO
:
1683 info
->data
|= RXH_L3_PROTO
;
1685 case MVPP22_CLS_HEK_OPT_IP4SA
:
1686 case MVPP22_CLS_HEK_OPT_IP6SA
:
1687 info
->data
|= RXH_IP_SRC
;
1689 case MVPP22_CLS_HEK_OPT_IP4DA
:
1690 case MVPP22_CLS_HEK_OPT_IP6DA
:
1691 info
->data
|= RXH_IP_DST
;
1693 case MVPP22_CLS_HEK_OPT_L4SIP
:
1694 info
->data
|= RXH_L4_B_0_1
;
1696 case MVPP22_CLS_HEK_OPT_L4DIP
:
1697 info
->data
|= RXH_L4_B_2_3
;
1706 int mvpp22_port_rss_init(struct mvpp2_port
*port
)
1708 struct mvpp2_rss_table
*table
;
1712 for (i
= 0; i
< MVPP22_N_RSS_TABLES
; i
++)
1713 port
->rss_ctx
[i
] = -1;
1715 ret
= mvpp22_rss_context_create(port
, &context
);
1719 table
= mvpp22_rss_table_get(port
->priv
, context
);
1723 port
->rss_ctx
[0] = context
;
1725 /* Configure the first table to evenly distribute the packets across
1726 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1728 for (i
= 0; i
< MVPP22_RSS_TABLE_ENTRIES
; i
++)
1729 table
->indir
[i
] = ethtool_rxfh_indir_default(i
, port
->nrxqs
);
1731 mvpp22_rss_fill_table(port
, table
, mvpp22_rss_ctx(port
, 0));
1733 /* Configure default flows */
1734 mvpp2_port_rss_hash_opts_set(port
, MVPP22_FLOW_IP4
, MVPP22_CLS_HEK_IP4_2T
);
1735 mvpp2_port_rss_hash_opts_set(port
, MVPP22_FLOW_IP6
, MVPP22_CLS_HEK_IP6_2T
);
1736 mvpp2_port_rss_hash_opts_set(port
, MVPP22_FLOW_TCP4
, MVPP22_CLS_HEK_IP4_5T
);
1737 mvpp2_port_rss_hash_opts_set(port
, MVPP22_FLOW_TCP6
, MVPP22_CLS_HEK_IP6_5T
);
1738 mvpp2_port_rss_hash_opts_set(port
, MVPP22_FLOW_UDP4
, MVPP22_CLS_HEK_IP4_5T
);
1739 mvpp2_port_rss_hash_opts_set(port
, MVPP22_FLOW_UDP6
, MVPP22_CLS_HEK_IP6_5T
);