1 // SPDX-License-Identifier: GPL-2.0
3 * RSS and Classifier helpers for Marvell PPv2 Network Controller
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
11 #include "mvpp2_cls.h"
12 #include "mvpp2_prs.h"
14 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
18 .supported_hash_opts = _opts, \
25 static struct mvpp2_cls_flow cls_flows
[MVPP2_N_FLOWS
] = {
26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_NF_UNTAG
,
28 MVPP22_CLS_HEK_IP4_5T
,
29 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
31 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
33 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_NF_UNTAG
,
34 MVPP22_CLS_HEK_IP4_5T
,
35 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
37 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
39 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_NF_UNTAG
,
40 MVPP22_CLS_HEK_IP4_5T
,
41 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
43 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_NF_TAG
,
47 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
48 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_TCP
,
51 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_NF_TAG
,
52 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
53 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_TCP
,
56 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_NF_TAG
,
57 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
58 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_TCP
,
61 /* TCP over IPv4 flows, fragmented, no vlan tag */
62 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_FRAG_UNTAG
,
63 MVPP22_CLS_HEK_IP4_2T
,
64 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
66 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
68 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_FRAG_UNTAG
,
69 MVPP22_CLS_HEK_IP4_2T
,
70 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
72 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
74 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_FRAG_UNTAG
,
75 MVPP22_CLS_HEK_IP4_2T
,
76 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
78 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
80 /* TCP over IPv4 flows, fragmented, with vlan tag */
81 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_FRAG_TAG
,
82 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
83 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_TCP
,
86 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_FRAG_TAG
,
87 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
88 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_TCP
,
91 MVPP2_DEF_FLOW(TCP_V4_FLOW
, MVPP2_FL_IP4_TCP_FRAG_TAG
,
92 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
93 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_TCP
,
96 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
97 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_NF_UNTAG
,
98 MVPP22_CLS_HEK_IP4_5T
,
99 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
101 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
103 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_NF_UNTAG
,
104 MVPP22_CLS_HEK_IP4_5T
,
105 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
107 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
109 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_NF_UNTAG
,
110 MVPP22_CLS_HEK_IP4_5T
,
111 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
113 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
116 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_NF_TAG
,
117 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
118 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_UDP
,
121 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_NF_TAG
,
122 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
123 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_UDP
,
126 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_NF_TAG
,
127 MVPP22_CLS_HEK_IP4_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
128 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_UDP
,
131 /* UDP over IPv4 flows, fragmented, no vlan tag */
132 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_FRAG_UNTAG
,
133 MVPP22_CLS_HEK_IP4_2T
,
134 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
|
136 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
138 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_FRAG_UNTAG
,
139 MVPP22_CLS_HEK_IP4_2T
,
140 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
|
142 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
144 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_FRAG_UNTAG
,
145 MVPP22_CLS_HEK_IP4_2T
,
146 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
|
148 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
150 /* UDP over IPv4 flows, fragmented, with vlan tag */
151 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_FRAG_TAG
,
152 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
153 MVPP2_PRS_RI_L3_IP4
| MVPP2_PRS_RI_L4_UDP
,
156 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_FRAG_TAG
,
157 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
158 MVPP2_PRS_RI_L3_IP4_OPT
| MVPP2_PRS_RI_L4_UDP
,
161 MVPP2_DEF_FLOW(UDP_V4_FLOW
, MVPP2_FL_IP4_UDP_FRAG_TAG
,
162 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
163 MVPP2_PRS_RI_L3_IP4_OTHER
| MVPP2_PRS_RI_L4_UDP
,
166 /* TCP over IPv6 flows, not fragmented, no vlan tag */
167 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_NF_UNTAG
,
168 MVPP22_CLS_HEK_IP6_5T
,
169 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
171 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
173 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_NF_UNTAG
,
174 MVPP22_CLS_HEK_IP6_5T
,
175 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
177 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
179 /* TCP over IPv6 flows, not fragmented, with vlan tag */
180 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_NF_TAG
,
181 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
182 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_L4_TCP
,
185 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_NF_TAG
,
186 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
187 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_L4_TCP
,
190 /* TCP over IPv6 flows, fragmented, no vlan tag */
191 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_FRAG_UNTAG
,
192 MVPP22_CLS_HEK_IP6_2T
,
193 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
194 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_TCP
,
195 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
197 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_FRAG_UNTAG
,
198 MVPP22_CLS_HEK_IP6_2T
,
199 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
200 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_TCP
,
201 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
203 /* TCP over IPv6 flows, fragmented, with vlan tag */
204 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_FRAG_TAG
,
205 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
206 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
210 MVPP2_DEF_FLOW(TCP_V6_FLOW
, MVPP2_FL_IP6_TCP_FRAG_TAG
,
211 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
212 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
216 /* UDP over IPv6 flows, not fragmented, no vlan tag */
217 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_NF_UNTAG
,
218 MVPP22_CLS_HEK_IP6_5T
,
219 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
221 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
223 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_NF_UNTAG
,
224 MVPP22_CLS_HEK_IP6_5T
,
225 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
227 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
229 /* UDP over IPv6 flows, not fragmented, with vlan tag */
230 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_NF_TAG
,
231 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
232 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_L4_UDP
,
235 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_NF_TAG
,
236 MVPP22_CLS_HEK_IP6_5T
| MVPP22_CLS_HEK_OPT_VLAN
,
237 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_L4_UDP
,
240 /* UDP over IPv6 flows, fragmented, no vlan tag */
241 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_FRAG_UNTAG
,
242 MVPP22_CLS_HEK_IP6_2T
,
243 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
|
244 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_UDP
,
245 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
247 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_FRAG_UNTAG
,
248 MVPP22_CLS_HEK_IP6_2T
,
249 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6_EXT
|
250 MVPP2_PRS_RI_IP_FRAG_TRUE
| MVPP2_PRS_RI_L4_UDP
,
251 MVPP2_PRS_IP_MASK
| MVPP2_PRS_RI_VLAN_MASK
),
253 /* UDP over IPv6 flows, fragmented, with vlan tag */
254 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_FRAG_TAG
,
255 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
256 MVPP2_PRS_RI_L3_IP6
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
260 MVPP2_DEF_FLOW(UDP_V6_FLOW
, MVPP2_FL_IP6_UDP_FRAG_TAG
,
261 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
262 MVPP2_PRS_RI_L3_IP6_EXT
| MVPP2_PRS_RI_IP_FRAG_TRUE
|
266 /* IPv4 flows, no vlan tag */
267 MVPP2_DEF_FLOW(IPV4_FLOW
, MVPP2_FL_IP4_UNTAG
,
268 MVPP22_CLS_HEK_IP4_2T
,
269 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4
,
270 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
271 MVPP2_DEF_FLOW(IPV4_FLOW
, MVPP2_FL_IP4_UNTAG
,
272 MVPP22_CLS_HEK_IP4_2T
,
273 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OPT
,
274 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
275 MVPP2_DEF_FLOW(IPV4_FLOW
, MVPP2_FL_IP4_UNTAG
,
276 MVPP22_CLS_HEK_IP4_2T
,
277 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP4_OTHER
,
278 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
280 /* IPv4 flows, with vlan tag */
281 MVPP2_DEF_FLOW(IPV4_FLOW
, MVPP2_FL_IP4_TAG
,
282 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
284 MVPP2_PRS_RI_L3_PROTO_MASK
),
285 MVPP2_DEF_FLOW(IPV4_FLOW
, MVPP2_FL_IP4_TAG
,
286 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
287 MVPP2_PRS_RI_L3_IP4_OPT
,
288 MVPP2_PRS_RI_L3_PROTO_MASK
),
289 MVPP2_DEF_FLOW(IPV4_FLOW
, MVPP2_FL_IP4_TAG
,
290 MVPP22_CLS_HEK_IP4_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
291 MVPP2_PRS_RI_L3_IP4_OTHER
,
292 MVPP2_PRS_RI_L3_PROTO_MASK
),
294 /* IPv6 flows, no vlan tag */
295 MVPP2_DEF_FLOW(IPV6_FLOW
, MVPP2_FL_IP6_UNTAG
,
296 MVPP22_CLS_HEK_IP6_2T
,
297 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
,
298 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
299 MVPP2_DEF_FLOW(IPV6_FLOW
, MVPP2_FL_IP6_UNTAG
,
300 MVPP22_CLS_HEK_IP6_2T
,
301 MVPP2_PRS_RI_VLAN_NONE
| MVPP2_PRS_RI_L3_IP6
,
302 MVPP2_PRS_RI_VLAN_MASK
| MVPP2_PRS_RI_L3_PROTO_MASK
),
304 /* IPv6 flows, with vlan tag */
305 MVPP2_DEF_FLOW(IPV6_FLOW
, MVPP2_FL_IP6_TAG
,
306 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
308 MVPP2_PRS_RI_L3_PROTO_MASK
),
309 MVPP2_DEF_FLOW(IPV6_FLOW
, MVPP2_FL_IP6_TAG
,
310 MVPP22_CLS_HEK_IP6_2T
| MVPP22_CLS_HEK_OPT_VLAN
,
312 MVPP2_PRS_RI_L3_PROTO_MASK
),
314 /* Non IP flow, no vlan tag */
315 MVPP2_DEF_FLOW(ETHER_FLOW
, MVPP2_FL_NON_IP_UNTAG
,
317 MVPP2_PRS_RI_VLAN_NONE
,
318 MVPP2_PRS_RI_VLAN_MASK
),
319 /* Non IP flow, with vlan tag */
320 MVPP2_DEF_FLOW(ETHER_FLOW
, MVPP2_FL_NON_IP_TAG
,
321 MVPP22_CLS_HEK_OPT_VLAN
,
325 u32
mvpp2_cls_flow_hits(struct mvpp2
*priv
, int index
)
327 mvpp2_write(priv
, MVPP2_CTRS_IDX
, index
);
329 return mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL_HIT_CTR
);
332 void mvpp2_cls_flow_read(struct mvpp2
*priv
, int index
,
333 struct mvpp2_cls_flow_entry
*fe
)
336 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, index
);
337 fe
->data
[0] = mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL0_REG
);
338 fe
->data
[1] = mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL1_REG
);
339 fe
->data
[2] = mvpp2_read(priv
, MVPP2_CLS_FLOW_TBL2_REG
);
342 /* Update classification flow table registers */
343 static void mvpp2_cls_flow_write(struct mvpp2
*priv
,
344 struct mvpp2_cls_flow_entry
*fe
)
346 mvpp2_write(priv
, MVPP2_CLS_FLOW_INDEX_REG
, fe
->index
);
347 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL0_REG
, fe
->data
[0]);
348 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL1_REG
, fe
->data
[1]);
349 mvpp2_write(priv
, MVPP2_CLS_FLOW_TBL2_REG
, fe
->data
[2]);
352 u32
mvpp2_cls_lookup_hits(struct mvpp2
*priv
, int index
)
354 mvpp2_write(priv
, MVPP2_CTRS_IDX
, index
);
356 return mvpp2_read(priv
, MVPP2_CLS_DEC_TBL_HIT_CTR
);
359 void mvpp2_cls_lookup_read(struct mvpp2
*priv
, int lkpid
, int way
,
360 struct mvpp2_cls_lookup_entry
*le
)
364 val
= (way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | lkpid
;
365 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
368 le
->data
= mvpp2_read(priv
, MVPP2_CLS_LKP_TBL_REG
);
371 /* Update classification lookup table register */
372 static void mvpp2_cls_lookup_write(struct mvpp2
*priv
,
373 struct mvpp2_cls_lookup_entry
*le
)
377 val
= (le
->way
<< MVPP2_CLS_LKP_INDEX_WAY_OFFS
) | le
->lkpid
;
378 mvpp2_write(priv
, MVPP2_CLS_LKP_INDEX_REG
, val
);
379 mvpp2_write(priv
, MVPP2_CLS_LKP_TBL_REG
, le
->data
);
382 /* Operations on flow entry */
383 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry
*fe
)
385 return fe
->data
[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK
;
388 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry
*fe
,
391 fe
->data
[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK
;
392 fe
->data
[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields
);
395 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry
*fe
,
398 return (fe
->data
[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index
)) &
399 MVPP2_CLS_FLOW_TBL2_FLD_MASK
;
402 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry
*fe
,
403 int field_index
, int field_id
)
405 fe
->data
[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index
,
406 MVPP2_CLS_FLOW_TBL2_FLD_MASK
);
407 fe
->data
[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index
, field_id
);
410 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry
*fe
,
413 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK
);
414 fe
->data
[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine
);
417 int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry
*fe
)
419 return (fe
->data
[0] >> MVPP2_CLS_FLOW_TBL0_OFFS
) &
420 MVPP2_CLS_FLOW_TBL0_ENG_MASK
;
423 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry
*fe
,
427 fe
->data
[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL
;
429 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL
;
432 static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry
*fe
, u32 seq
)
434 fe
->data
[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK
);
435 fe
->data
[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq
);
438 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry
*fe
,
441 fe
->data
[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST
;
442 fe
->data
[0] |= !!is_last
;
445 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry
*fe
, int prio
)
447 fe
->data
[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK
);
448 fe
->data
[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio
);
451 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry
*fe
,
454 fe
->data
[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port
);
457 /* Initialize the parser entry for the given flow */
458 static void mvpp2_cls_flow_prs_init(struct mvpp2
*priv
,
459 struct mvpp2_cls_flow
*flow
)
461 mvpp2_prs_add_flow(priv
, flow
->flow_id
, flow
->prs_ri
.ri
,
462 flow
->prs_ri
.ri_mask
);
465 /* Initialize the Lookup Id table entry for the given flow */
466 static void mvpp2_cls_flow_lkp_init(struct mvpp2
*priv
,
467 struct mvpp2_cls_flow
*flow
)
469 struct mvpp2_cls_lookup_entry le
;
472 le
.lkpid
= flow
->flow_id
;
474 /* The default RxQ for this port is set in the C2 lookup */
477 /* We point on the first lookup in the sequence for the flow, that is
480 le
.data
|= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow
->flow_id
));
482 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
483 le
.data
|= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
485 mvpp2_cls_lookup_write(priv
, &le
);
488 /* Initialize the flow table entries for the given flow */
489 static void mvpp2_cls_flow_init(struct mvpp2
*priv
, struct mvpp2_cls_flow
*flow
)
491 struct mvpp2_cls_flow_entry fe
;
495 memset(&fe
, 0, sizeof(fe
));
496 fe
.index
= MVPP2_FLOW_C2_ENTRY(flow
->flow_id
);
498 mvpp2_cls_flow_eng_set(&fe
, MVPP22_CLS_ENGINE_C2
);
499 mvpp2_cls_flow_port_id_sel(&fe
, true);
500 mvpp2_cls_flow_last_set(&fe
, 0);
501 mvpp2_cls_flow_pri_set(&fe
, 0);
502 mvpp2_cls_flow_seq_set(&fe
, MVPP2_CLS_FLOW_SEQ_FIRST1
);
505 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++)
506 mvpp2_cls_flow_port_add(&fe
, BIT(i
));
508 mvpp2_cls_flow_write(priv
, &fe
);
511 for (i
= 0; i
< MVPP2_MAX_PORTS
; i
++) {
512 memset(&fe
, 0, sizeof(fe
));
513 fe
.index
= MVPP2_PORT_FLOW_HASH_ENTRY(i
, flow
->flow_id
);
515 mvpp2_cls_flow_port_id_sel(&fe
, true);
516 mvpp2_cls_flow_pri_set(&fe
, i
+ 1);
517 mvpp2_cls_flow_seq_set(&fe
, MVPP2_CLS_FLOW_SEQ_MIDDLE
);
518 mvpp2_cls_flow_port_add(&fe
, BIT(i
));
520 mvpp2_cls_flow_write(priv
, &fe
);
523 /* Update the last entry */
524 mvpp2_cls_flow_last_set(&fe
, 1);
525 mvpp2_cls_flow_seq_set(&fe
, MVPP2_CLS_FLOW_SEQ_LAST
);
527 mvpp2_cls_flow_write(priv
, &fe
);
530 /* Adds a field to the Header Extracted Key generation parameters*/
531 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry
*fe
,
534 int nb_fields
= mvpp2_cls_flow_hek_num_get(fe
);
536 if (nb_fields
== MVPP2_FLOW_N_FIELDS
)
539 mvpp2_cls_flow_hek_set(fe
, nb_fields
, field_id
);
541 mvpp2_cls_flow_hek_num_set(fe
, nb_fields
+ 1);
546 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry
*fe
,
547 unsigned long hash_opts
)
552 /* Clear old fields */
553 mvpp2_cls_flow_hek_num_set(fe
, 0);
556 for_each_set_bit(i
, &hash_opts
, MVPP22_CLS_HEK_N_FIELDS
) {
558 case MVPP22_CLS_HEK_OPT_VLAN
:
559 field_id
= MVPP22_CLS_FIELD_VLAN
;
561 case MVPP22_CLS_HEK_OPT_IP4SA
:
562 field_id
= MVPP22_CLS_FIELD_IP4SA
;
564 case MVPP22_CLS_HEK_OPT_IP4DA
:
565 field_id
= MVPP22_CLS_FIELD_IP4DA
;
567 case MVPP22_CLS_HEK_OPT_IP6SA
:
568 field_id
= MVPP22_CLS_FIELD_IP6SA
;
570 case MVPP22_CLS_HEK_OPT_IP6DA
:
571 field_id
= MVPP22_CLS_FIELD_IP6DA
;
573 case MVPP22_CLS_HEK_OPT_L4SIP
:
574 field_id
= MVPP22_CLS_FIELD_L4SIP
;
576 case MVPP22_CLS_HEK_OPT_L4DIP
:
577 field_id
= MVPP22_CLS_FIELD_L4DIP
;
582 if (mvpp2_flow_add_hek_field(fe
, field_id
))
589 struct mvpp2_cls_flow
*mvpp2_cls_flow_get(int flow
)
591 if (flow
>= MVPP2_N_FLOWS
)
594 return &cls_flows
[flow
];
597 /* Set the hash generation options for the given traffic flow.
598 * One traffic flow (in the ethtool sense) has multiple classification flows,
599 * to handle specific cases such as fragmentation, or the presence of a
602 * Each of these individual flows has different constraints, for example we
603 * can't hash fragmented packets on L4 data (else we would risk having packet
604 * re-ordering), so each classification flows masks the options with their
608 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port
*port
, int flow_type
,
611 struct mvpp2_cls_flow_entry fe
;
612 struct mvpp2_cls_flow
*flow
;
613 int i
, engine
, flow_index
;
616 for (i
= 0; i
< MVPP2_N_FLOWS
; i
++) {
617 flow
= mvpp2_cls_flow_get(i
);
621 if (flow
->flow_type
!= flow_type
)
624 flow_index
= MVPP2_PORT_FLOW_HASH_ENTRY(port
->id
,
627 mvpp2_cls_flow_read(port
->priv
, flow_index
, &fe
);
629 hash_opts
= flow
->supported_hash_opts
& requested_opts
;
631 /* Use C3HB engine to access L4 infos. This adds L4 infos to the
634 if (hash_opts
& MVPP22_CLS_HEK_L4_OPTS
)
635 engine
= MVPP22_CLS_ENGINE_C3HB
;
637 engine
= MVPP22_CLS_ENGINE_C3HA
;
639 if (mvpp2_flow_set_hek_fields(&fe
, hash_opts
))
642 mvpp2_cls_flow_eng_set(&fe
, engine
);
644 mvpp2_cls_flow_write(port
->priv
, &fe
);
650 u16
mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry
*fe
)
653 int n_fields
, i
, field
;
655 n_fields
= mvpp2_cls_flow_hek_num_get(fe
);
657 for (i
= 0; i
< n_fields
; i
++) {
658 field
= mvpp2_cls_flow_hek_get(fe
, i
);
661 case MVPP22_CLS_FIELD_MAC_DA
:
662 hash_opts
|= MVPP22_CLS_HEK_OPT_MAC_DA
;
664 case MVPP22_CLS_FIELD_VLAN
:
665 hash_opts
|= MVPP22_CLS_HEK_OPT_VLAN
;
667 case MVPP22_CLS_FIELD_L3_PROTO
:
668 hash_opts
|= MVPP22_CLS_HEK_OPT_L3_PROTO
;
670 case MVPP22_CLS_FIELD_IP4SA
:
671 hash_opts
|= MVPP22_CLS_HEK_OPT_IP4SA
;
673 case MVPP22_CLS_FIELD_IP4DA
:
674 hash_opts
|= MVPP22_CLS_HEK_OPT_IP4DA
;
676 case MVPP22_CLS_FIELD_IP6SA
:
677 hash_opts
|= MVPP22_CLS_HEK_OPT_IP6SA
;
679 case MVPP22_CLS_FIELD_IP6DA
:
680 hash_opts
|= MVPP22_CLS_HEK_OPT_IP6DA
;
682 case MVPP22_CLS_FIELD_L4SIP
:
683 hash_opts
|= MVPP22_CLS_HEK_OPT_L4SIP
;
685 case MVPP22_CLS_FIELD_L4DIP
:
686 hash_opts
|= MVPP22_CLS_HEK_OPT_L4DIP
;
695 /* Returns the hash opts for this flow. There are several classifier flows
696 * for one traffic flow, this returns an aggregation of all configurations.
698 static u16
mvpp2_port_rss_hash_opts_get(struct mvpp2_port
*port
, int flow_type
)
700 struct mvpp2_cls_flow_entry fe
;
701 struct mvpp2_cls_flow
*flow
;
705 for (i
= 0; i
< MVPP2_N_FLOWS
; i
++) {
706 flow
= mvpp2_cls_flow_get(i
);
710 if (flow
->flow_type
!= flow_type
)
713 flow_index
= MVPP2_PORT_FLOW_HASH_ENTRY(port
->id
,
716 mvpp2_cls_flow_read(port
->priv
, flow_index
, &fe
);
718 hash_opts
|= mvpp2_flow_get_hek_fields(&fe
);
724 static void mvpp2_cls_port_init_flows(struct mvpp2
*priv
)
726 struct mvpp2_cls_flow
*flow
;
729 for (i
= 0; i
< MVPP2_N_FLOWS
; i
++) {
730 flow
= mvpp2_cls_flow_get(i
);
734 mvpp2_cls_flow_prs_init(priv
, flow
);
735 mvpp2_cls_flow_lkp_init(priv
, flow
);
736 mvpp2_cls_flow_init(priv
, flow
);
740 static void mvpp2_cls_c2_write(struct mvpp2
*priv
,
741 struct mvpp2_cls_c2_entry
*c2
)
743 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_IDX
, c2
->index
);
746 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA0
, c2
->tcam
[0]);
747 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA1
, c2
->tcam
[1]);
748 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA2
, c2
->tcam
[2]);
749 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA3
, c2
->tcam
[3]);
750 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_DATA4
, c2
->tcam
[4]);
752 mvpp2_write(priv
, MVPP22_CLS_C2_ACT
, c2
->act
);
754 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR0
, c2
->attr
[0]);
755 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR1
, c2
->attr
[1]);
756 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR2
, c2
->attr
[2]);
757 mvpp2_write(priv
, MVPP22_CLS_C2_ATTR3
, c2
->attr
[3]);
760 void mvpp2_cls_c2_read(struct mvpp2
*priv
, int index
,
761 struct mvpp2_cls_c2_entry
*c2
)
763 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_IDX
, index
);
767 c2
->tcam
[0] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA0
);
768 c2
->tcam
[1] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA1
);
769 c2
->tcam
[2] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA2
);
770 c2
->tcam
[3] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA3
);
771 c2
->tcam
[4] = mvpp2_read(priv
, MVPP22_CLS_C2_TCAM_DATA4
);
773 c2
->act
= mvpp2_read(priv
, MVPP22_CLS_C2_ACT
);
775 c2
->attr
[0] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR0
);
776 c2
->attr
[1] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR1
);
777 c2
->attr
[2] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR2
);
778 c2
->attr
[3] = mvpp2_read(priv
, MVPP22_CLS_C2_ATTR3
);
781 static void mvpp2_port_c2_cls_init(struct mvpp2_port
*port
)
783 struct mvpp2_cls_c2_entry c2
;
786 memset(&c2
, 0, sizeof(c2
));
788 c2
.index
= MVPP22_CLS_C2_RSS_ENTRY(port
->id
);
790 pmap
= BIT(port
->id
);
791 c2
.tcam
[4] = MVPP22_CLS_C2_PORT_ID(pmap
);
792 c2
.tcam
[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap
));
794 /* Update RSS status after matching this entry */
795 c2
.act
= MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK
);
797 /* Mark packet as "forwarded to software", needed for RSS */
798 c2
.act
|= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK
);
800 /* Configure the default rx queue : Update Queue Low and Queue High, but
801 * don't lock, since the rx queue selection might be overridden by RSS
803 c2
.act
|= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD
) |
804 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD
);
806 qh
= (port
->first_rxq
>> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK
;
807 ql
= port
->first_rxq
& MVPP22_CLS_C2_ATTR0_QLOW_MASK
;
809 c2
.attr
[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh
) |
810 MVPP22_CLS_C2_ATTR0_QLOW(ql
);
812 mvpp2_cls_c2_write(port
->priv
, &c2
);
815 /* Classifier default initialization */
816 void mvpp2_cls_init(struct mvpp2
*priv
)
818 struct mvpp2_cls_lookup_entry le
;
819 struct mvpp2_cls_flow_entry fe
;
822 /* Enable classifier */
823 mvpp2_write(priv
, MVPP2_CLS_MODE_REG
, MVPP2_CLS_MODE_ACTIVE_MASK
);
825 /* Clear classifier flow table */
826 memset(&fe
.data
, 0, sizeof(fe
.data
));
827 for (index
= 0; index
< MVPP2_CLS_FLOWS_TBL_SIZE
; index
++) {
829 mvpp2_cls_flow_write(priv
, &fe
);
832 /* Clear classifier lookup table */
834 for (index
= 0; index
< MVPP2_CLS_LKP_TBL_SIZE
; index
++) {
837 mvpp2_cls_lookup_write(priv
, &le
);
840 mvpp2_cls_lookup_write(priv
, &le
);
843 mvpp2_cls_port_init_flows(priv
);
846 void mvpp2_cls_port_config(struct mvpp2_port
*port
)
848 struct mvpp2_cls_lookup_entry le
;
851 /* Set way for the port */
852 val
= mvpp2_read(port
->priv
, MVPP2_CLS_PORT_WAY_REG
);
853 val
&= ~MVPP2_CLS_PORT_WAY_MASK(port
->id
);
854 mvpp2_write(port
->priv
, MVPP2_CLS_PORT_WAY_REG
, val
);
856 /* Pick the entry to be accessed in lookup ID decoding table
857 * according to the way and lkpid.
863 /* Set initial CPU queue for receiving packets */
864 le
.data
&= ~MVPP2_CLS_LKP_TBL_RXQ_MASK
;
865 le
.data
|= port
->first_rxq
;
867 /* Disable classification engines */
868 le
.data
&= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK
;
870 /* Update lookup ID table entry */
871 mvpp2_cls_lookup_write(port
->priv
, &le
);
873 mvpp2_port_c2_cls_init(port
);
876 u32
mvpp2_cls_c2_hit_count(struct mvpp2
*priv
, int c2_index
)
878 mvpp2_write(priv
, MVPP22_CLS_C2_TCAM_IDX
, c2_index
);
880 return mvpp2_read(priv
, MVPP22_CLS_C2_HIT_CTR
);
883 static void mvpp2_rss_port_c2_enable(struct mvpp2_port
*port
)
885 struct mvpp2_cls_c2_entry c2
;
887 mvpp2_cls_c2_read(port
->priv
, MVPP22_CLS_C2_RSS_ENTRY(port
->id
), &c2
);
889 c2
.attr
[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN
;
891 mvpp2_cls_c2_write(port
->priv
, &c2
);
894 static void mvpp2_rss_port_c2_disable(struct mvpp2_port
*port
)
896 struct mvpp2_cls_c2_entry c2
;
898 mvpp2_cls_c2_read(port
->priv
, MVPP22_CLS_C2_RSS_ENTRY(port
->id
), &c2
);
900 c2
.attr
[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN
;
902 mvpp2_cls_c2_write(port
->priv
, &c2
);
905 void mvpp22_rss_enable(struct mvpp2_port
*port
)
907 mvpp2_rss_port_c2_enable(port
);
910 void mvpp22_rss_disable(struct mvpp2_port
*port
)
912 mvpp2_rss_port_c2_disable(port
);
915 /* Set CPU queue number for oversize packets */
916 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port
*port
)
920 mvpp2_write(port
->priv
, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port
->id
),
921 port
->first_rxq
& MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK
);
923 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_P2HQ_REG(port
->id
),
924 (port
->first_rxq
>> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS
));
926 val
= mvpp2_read(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
);
927 val
|= MVPP2_CLS_SWFWD_PCTRL_MASK(port
->id
);
928 mvpp2_write(port
->priv
, MVPP2_CLS_SWFWD_PCTRL_REG
, val
);
931 static inline u32
mvpp22_rxfh_indir(struct mvpp2_port
*port
, u32 rxq
)
933 int nrxqs
, cpu
, cpus
= num_possible_cpus();
935 /* Number of RXQs per CPU */
936 nrxqs
= port
->nrxqs
/ cpus
;
938 /* CPU that will handle this rx queue */
941 if (!cpu_online(cpu
))
942 return port
->first_rxq
;
944 /* Indirection to better distribute the paquets on the CPUs when
945 * configuring the RSS queues.
947 return port
->first_rxq
+ ((rxq
* nrxqs
+ rxq
/ cpus
) % port
->nrxqs
);
950 void mvpp22_rss_fill_table(struct mvpp2_port
*port
, u32 table
)
952 struct mvpp2
*priv
= port
->priv
;
955 for (i
= 0; i
< MVPP22_RSS_TABLE_ENTRIES
; i
++) {
956 u32 sel
= MVPP22_RSS_INDEX_TABLE(table
) |
957 MVPP22_RSS_INDEX_TABLE_ENTRY(i
);
958 mvpp2_write(priv
, MVPP22_RSS_INDEX
, sel
);
960 mvpp2_write(priv
, MVPP22_RSS_TABLE_ENTRY
,
961 mvpp22_rxfh_indir(port
, port
->indir
[i
]));
965 int mvpp2_ethtool_rxfh_set(struct mvpp2_port
*port
, struct ethtool_rxnfc
*info
)
969 switch (info
->flow_type
) {
974 if (info
->data
& RXH_L4_B_0_1
)
975 hash_opts
|= MVPP22_CLS_HEK_OPT_L4SIP
;
976 if (info
->data
& RXH_L4_B_2_3
)
977 hash_opts
|= MVPP22_CLS_HEK_OPT_L4DIP
;
981 if (info
->data
& RXH_L2DA
)
982 hash_opts
|= MVPP22_CLS_HEK_OPT_MAC_DA
;
983 if (info
->data
& RXH_VLAN
)
984 hash_opts
|= MVPP22_CLS_HEK_OPT_VLAN
;
985 if (info
->data
& RXH_L3_PROTO
)
986 hash_opts
|= MVPP22_CLS_HEK_OPT_L3_PROTO
;
987 if (info
->data
& RXH_IP_SRC
)
988 hash_opts
|= (MVPP22_CLS_HEK_OPT_IP4SA
|
989 MVPP22_CLS_HEK_OPT_IP6SA
);
990 if (info
->data
& RXH_IP_DST
)
991 hash_opts
|= (MVPP22_CLS_HEK_OPT_IP4DA
|
992 MVPP22_CLS_HEK_OPT_IP6DA
);
994 default: return -EOPNOTSUPP
;
997 return mvpp2_port_rss_hash_opts_set(port
, info
->flow_type
, hash_opts
);
1000 int mvpp2_ethtool_rxfh_get(struct mvpp2_port
*port
, struct ethtool_rxnfc
*info
)
1002 unsigned long hash_opts
;
1005 hash_opts
= mvpp2_port_rss_hash_opts_get(port
, info
->flow_type
);
1008 for_each_set_bit(i
, &hash_opts
, MVPP22_CLS_HEK_N_FIELDS
) {
1010 case MVPP22_CLS_HEK_OPT_MAC_DA
:
1011 info
->data
|= RXH_L2DA
;
1013 case MVPP22_CLS_HEK_OPT_VLAN
:
1014 info
->data
|= RXH_VLAN
;
1016 case MVPP22_CLS_HEK_OPT_L3_PROTO
:
1017 info
->data
|= RXH_L3_PROTO
;
1019 case MVPP22_CLS_HEK_OPT_IP4SA
:
1020 case MVPP22_CLS_HEK_OPT_IP6SA
:
1021 info
->data
|= RXH_IP_SRC
;
1023 case MVPP22_CLS_HEK_OPT_IP4DA
:
1024 case MVPP22_CLS_HEK_OPT_IP6DA
:
1025 info
->data
|= RXH_IP_DST
;
1027 case MVPP22_CLS_HEK_OPT_L4SIP
:
1028 info
->data
|= RXH_L4_B_0_1
;
1030 case MVPP22_CLS_HEK_OPT_L4DIP
:
1031 info
->data
|= RXH_L4_B_2_3
;
1040 void mvpp22_rss_port_init(struct mvpp2_port
*port
)
1042 struct mvpp2
*priv
= port
->priv
;
1045 /* Set the table width: replace the whole classifier Rx queue number
1046 * with the ones configured in RSS table entries.
1048 mvpp2_write(priv
, MVPP22_RSS_INDEX
, MVPP22_RSS_INDEX_TABLE(port
->id
));
1049 mvpp2_write(priv
, MVPP22_RSS_WIDTH
, 8);
1051 /* The default RxQ is used as a key to select the RSS table to use.
1052 * We use one RSS table per port.
1054 mvpp2_write(priv
, MVPP22_RSS_INDEX
,
1055 MVPP22_RSS_INDEX_QUEUE(port
->first_rxq
));
1056 mvpp2_write(priv
, MVPP22_RXQ2RSS_TABLE
,
1057 MVPP22_RSS_TABLE_POINTER(port
->id
));
1059 /* Configure the first table to evenly distribute the packets across
1060 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1062 for (i
= 0; i
< MVPP22_RSS_TABLE_ENTRIES
; i
++)
1063 port
->indir
[i
] = ethtool_rxfh_indir_default(i
, port
->nrxqs
);
1065 mvpp22_rss_fill_table(port
, port
->id
);
1067 /* Configure default flows */
1068 mvpp2_port_rss_hash_opts_set(port
, IPV4_FLOW
, MVPP22_CLS_HEK_IP4_2T
);
1069 mvpp2_port_rss_hash_opts_set(port
, IPV6_FLOW
, MVPP22_CLS_HEK_IP6_2T
);
1070 mvpp2_port_rss_hash_opts_set(port
, TCP_V4_FLOW
, MVPP22_CLS_HEK_IP4_5T
);
1071 mvpp2_port_rss_hash_opts_set(port
, TCP_V6_FLOW
, MVPP22_CLS_HEK_IP6_5T
);
1072 mvpp2_port_rss_hash_opts_set(port
, UDP_V4_FLOW
, MVPP22_CLS_HEK_IP4_5T
);
1073 mvpp2_port_rss_hash_opts_set(port
, UDP_V6_FLOW
, MVPP22_CLS_HEK_IP6_5T
);