1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_matchall.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
11 static int cxgb4_matchall_egress_validate(struct net_device
*dev
,
12 struct tc_cls_matchall_offload
*cls
)
14 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
15 struct flow_action
*actions
= &cls
->rule
->action
;
16 struct port_info
*pi
= netdev2pinfo(dev
);
17 struct flow_action_entry
*entry
;
18 struct ch_sched_queue qe
;
19 struct sched_class
*e
;
24 if (!flow_action_has_entries(actions
)) {
25 NL_SET_ERR_MSG_MOD(extack
,
26 "Egress MATCHALL offload needs at least 1 policing action");
28 } else if (!flow_offload_has_one_action(actions
)) {
29 NL_SET_ERR_MSG_MOD(extack
,
30 "Egress MATCHALL offload only supports 1 policing action");
32 } else if (pi
->tc_block_shared
) {
33 NL_SET_ERR_MSG_MOD(extack
,
34 "Egress MATCHALL offload not supported with shared blocks");
38 ret
= t4_get_link_params(pi
, NULL
, &speed
, NULL
);
40 NL_SET_ERR_MSG_MOD(extack
,
41 "Failed to get max speed supported by the link");
45 /* Convert from Mbps to bps */
46 max_link_rate
= (u64
)speed
* 1000 * 1000;
48 flow_action_for_each(i
, entry
, actions
) {
50 case FLOW_ACTION_POLICE
:
51 /* Convert bytes per second to bits per second */
52 if (entry
->police
.rate_bytes_ps
* 8 > max_link_rate
) {
53 NL_SET_ERR_MSG_MOD(extack
,
54 "Specified policing max rate is larger than underlying link speed");
59 NL_SET_ERR_MSG_MOD(extack
,
60 "Only policing action supported with Egress MATCHALL offload");
65 for (i
= 0; i
< pi
->nqsets
; i
++) {
66 memset(&qe
, 0, sizeof(qe
));
69 e
= cxgb4_sched_queue_lookup(dev
, &qe
);
70 if (e
&& e
->info
.u
.params
.level
!= SCHED_CLASS_LEVEL_CH_RL
) {
71 NL_SET_ERR_MSG_MOD(extack
,
72 "Some queues are already bound to different class");
80 static int cxgb4_matchall_tc_bind_queues(struct net_device
*dev
, u32 tc
)
82 struct port_info
*pi
= netdev2pinfo(dev
);
83 struct ch_sched_queue qe
;
87 for (i
= 0; i
< pi
->nqsets
; i
++) {
90 ret
= cxgb4_sched_class_bind(dev
, &qe
, SCHED_QUEUE
);
100 qe
.class = SCHED_CLS_NONE
;
101 cxgb4_sched_class_unbind(dev
, &qe
, SCHED_QUEUE
);
107 static void cxgb4_matchall_tc_unbind_queues(struct net_device
*dev
)
109 struct port_info
*pi
= netdev2pinfo(dev
);
110 struct ch_sched_queue qe
;
113 for (i
= 0; i
< pi
->nqsets
; i
++) {
115 qe
.class = SCHED_CLS_NONE
;
116 cxgb4_sched_class_unbind(dev
, &qe
, SCHED_QUEUE
);
120 static int cxgb4_matchall_alloc_tc(struct net_device
*dev
,
121 struct tc_cls_matchall_offload
*cls
)
123 struct ch_sched_params p
= {
124 .type
= SCHED_CLASS_TYPE_PACKET
,
125 .u
.params
.level
= SCHED_CLASS_LEVEL_CH_RL
,
126 .u
.params
.mode
= SCHED_CLASS_MODE_CLASS
,
127 .u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
,
128 .u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
,
129 .u
.params
.class = SCHED_CLS_NONE
,
130 .u
.params
.minrate
= 0,
131 .u
.params
.weight
= 0,
132 .u
.params
.pktsize
= dev
->mtu
,
134 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
135 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
136 struct port_info
*pi
= netdev2pinfo(dev
);
137 struct adapter
*adap
= netdev2adap(dev
);
138 struct flow_action_entry
*entry
;
139 struct sched_class
*e
;
143 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
145 flow_action_for_each(i
, entry
, &cls
->rule
->action
)
146 if (entry
->id
== FLOW_ACTION_POLICE
)
149 /* Convert from bytes per second to Kbps */
150 p
.u
.params
.maxrate
= div_u64(entry
->police
.rate_bytes_ps
* 8, 1000);
151 p
.u
.params
.channel
= pi
->tx_chan
;
152 e
= cxgb4_sched_class_alloc(dev
, &p
);
154 NL_SET_ERR_MSG_MOD(extack
,
155 "No free traffic class available for policing action");
159 ret
= cxgb4_matchall_tc_bind_queues(dev
, e
->idx
);
161 NL_SET_ERR_MSG_MOD(extack
,
162 "Could not bind queues to traffic class");
166 tc_port_matchall
->egress
.hwtc
= e
->idx
;
167 tc_port_matchall
->egress
.cookie
= cls
->cookie
;
168 tc_port_matchall
->egress
.state
= CXGB4_MATCHALL_STATE_ENABLED
;
172 cxgb4_sched_class_free(dev
, e
->idx
);
176 static void cxgb4_matchall_free_tc(struct net_device
*dev
)
178 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
179 struct port_info
*pi
= netdev2pinfo(dev
);
180 struct adapter
*adap
= netdev2adap(dev
);
182 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
183 cxgb4_matchall_tc_unbind_queues(dev
);
184 cxgb4_sched_class_free(dev
, tc_port_matchall
->egress
.hwtc
);
186 tc_port_matchall
->egress
.hwtc
= SCHED_CLS_NONE
;
187 tc_port_matchall
->egress
.cookie
= 0;
188 tc_port_matchall
->egress
.state
= CXGB4_MATCHALL_STATE_DISABLED
;
191 static int cxgb4_matchall_mirror_alloc(struct net_device
*dev
,
192 struct tc_cls_matchall_offload
*cls
)
194 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
195 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
196 struct port_info
*pi
= netdev2pinfo(dev
);
197 struct adapter
*adap
= netdev2adap(dev
);
198 struct flow_action_entry
*act
;
202 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
203 flow_action_for_each(i
, act
, &cls
->rule
->action
) {
204 if (act
->id
== FLOW_ACTION_MIRRED
) {
205 ret
= cxgb4_port_mirror_alloc(dev
);
207 NL_SET_ERR_MSG_MOD(extack
,
208 "Couldn't allocate mirror");
212 tc_port_matchall
->ingress
.viid_mirror
= pi
->viid_mirror
;
220 static void cxgb4_matchall_mirror_free(struct net_device
*dev
)
222 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
223 struct port_info
*pi
= netdev2pinfo(dev
);
224 struct adapter
*adap
= netdev2adap(dev
);
226 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
227 if (!tc_port_matchall
->ingress
.viid_mirror
)
230 cxgb4_port_mirror_free(dev
);
231 tc_port_matchall
->ingress
.viid_mirror
= 0;
234 static int cxgb4_matchall_del_filter(struct net_device
*dev
, u8 filter_type
)
236 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
237 struct port_info
*pi
= netdev2pinfo(dev
);
238 struct adapter
*adap
= netdev2adap(dev
);
241 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
242 ret
= cxgb4_del_filter(dev
, tc_port_matchall
->ingress
.tid
[filter_type
],
243 &tc_port_matchall
->ingress
.fs
[filter_type
]);
247 tc_port_matchall
->ingress
.tid
[filter_type
] = 0;
251 static int cxgb4_matchall_add_filter(struct net_device
*dev
,
252 struct tc_cls_matchall_offload
*cls
,
255 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
256 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
257 struct port_info
*pi
= netdev2pinfo(dev
);
258 struct adapter
*adap
= netdev2adap(dev
);
259 struct ch_filter_specification
*fs
;
262 /* Get a free filter entry TID, where we can insert this new
263 * rule. Only insert rule if its prio doesn't conflict with
266 fidx
= cxgb4_get_free_ftid(dev
, filter_type
? PF_INET6
: PF_INET
,
267 false, cls
->common
.prio
);
269 NL_SET_ERR_MSG_MOD(extack
,
270 "No free LETCAM index available");
274 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
275 fs
= &tc_port_matchall
->ingress
.fs
[filter_type
];
276 memset(fs
, 0, sizeof(*fs
));
278 if (fidx
< adap
->tids
.nhpftids
)
280 fs
->tc_prio
= cls
->common
.prio
;
281 fs
->tc_cookie
= cls
->cookie
;
282 fs
->type
= filter_type
;
285 fs
->val
.pfvf_vld
= 1;
286 fs
->val
.pf
= adap
->pf
;
287 fs
->val
.vf
= pi
->vin
;
289 cxgb4_process_flow_actions(dev
, &cls
->rule
->action
, fs
);
291 ret
= cxgb4_set_filter(dev
, fidx
, fs
);
295 tc_port_matchall
->ingress
.tid
[filter_type
] = fidx
;
299 static int cxgb4_matchall_alloc_filter(struct net_device
*dev
,
300 struct tc_cls_matchall_offload
*cls
)
302 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
303 struct port_info
*pi
= netdev2pinfo(dev
);
304 struct adapter
*adap
= netdev2adap(dev
);
307 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
309 ret
= cxgb4_matchall_mirror_alloc(dev
, cls
);
313 for (i
= 0; i
< CXGB4_FILTER_TYPE_MAX
; i
++) {
314 ret
= cxgb4_matchall_add_filter(dev
, cls
, i
);
319 tc_port_matchall
->ingress
.state
= CXGB4_MATCHALL_STATE_ENABLED
;
324 cxgb4_matchall_del_filter(dev
, i
);
326 cxgb4_matchall_mirror_free(dev
);
330 static int cxgb4_matchall_free_filter(struct net_device
*dev
)
332 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
333 struct port_info
*pi
= netdev2pinfo(dev
);
334 struct adapter
*adap
= netdev2adap(dev
);
338 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
340 for (i
= 0; i
< CXGB4_FILTER_TYPE_MAX
; i
++) {
341 ret
= cxgb4_matchall_del_filter(dev
, i
);
346 cxgb4_matchall_mirror_free(dev
);
348 tc_port_matchall
->ingress
.packets
= 0;
349 tc_port_matchall
->ingress
.bytes
= 0;
350 tc_port_matchall
->ingress
.last_used
= 0;
351 tc_port_matchall
->ingress
.state
= CXGB4_MATCHALL_STATE_DISABLED
;
355 int cxgb4_tc_matchall_replace(struct net_device
*dev
,
356 struct tc_cls_matchall_offload
*cls_matchall
,
359 struct netlink_ext_ack
*extack
= cls_matchall
->common
.extack
;
360 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
361 struct port_info
*pi
= netdev2pinfo(dev
);
362 struct adapter
*adap
= netdev2adap(dev
);
365 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
367 if (tc_port_matchall
->ingress
.state
==
368 CXGB4_MATCHALL_STATE_ENABLED
) {
369 NL_SET_ERR_MSG_MOD(extack
,
370 "Only 1 Ingress MATCHALL can be offloaded");
374 ret
= cxgb4_validate_flow_actions(dev
,
375 &cls_matchall
->rule
->action
,
380 return cxgb4_matchall_alloc_filter(dev
, cls_matchall
);
383 if (tc_port_matchall
->egress
.state
== CXGB4_MATCHALL_STATE_ENABLED
) {
384 NL_SET_ERR_MSG_MOD(extack
,
385 "Only 1 Egress MATCHALL can be offloaded");
389 ret
= cxgb4_matchall_egress_validate(dev
, cls_matchall
);
393 return cxgb4_matchall_alloc_tc(dev
, cls_matchall
);
396 int cxgb4_tc_matchall_destroy(struct net_device
*dev
,
397 struct tc_cls_matchall_offload
*cls_matchall
,
400 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
401 struct port_info
*pi
= netdev2pinfo(dev
);
402 struct adapter
*adap
= netdev2adap(dev
);
404 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
406 /* All the filter types of this matchall rule save the
407 * same cookie. So, checking for the first one is
410 if (cls_matchall
->cookie
!=
411 tc_port_matchall
->ingress
.fs
[0].tc_cookie
)
414 return cxgb4_matchall_free_filter(dev
);
417 if (cls_matchall
->cookie
!= tc_port_matchall
->egress
.cookie
)
420 cxgb4_matchall_free_tc(dev
);
424 int cxgb4_tc_matchall_stats(struct net_device
*dev
,
425 struct tc_cls_matchall_offload
*cls_matchall
)
427 u64 tmp_packets
, tmp_bytes
, packets
= 0, bytes
= 0;
428 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
429 struct cxgb4_matchall_ingress_entry
*ingress
;
430 struct port_info
*pi
= netdev2pinfo(dev
);
431 struct adapter
*adap
= netdev2adap(dev
);
435 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
436 if (tc_port_matchall
->ingress
.state
== CXGB4_MATCHALL_STATE_DISABLED
)
439 ingress
= &tc_port_matchall
->ingress
;
440 for (i
= 0; i
< CXGB4_FILTER_TYPE_MAX
; i
++) {
441 ret
= cxgb4_get_filter_counters(dev
, ingress
->tid
[i
],
442 &tmp_packets
, &tmp_bytes
,
443 ingress
->fs
[i
].hash
);
447 packets
+= tmp_packets
;
451 if (tc_port_matchall
->ingress
.packets
!= packets
) {
452 flow_stats_update(&cls_matchall
->stats
,
453 bytes
- tc_port_matchall
->ingress
.bytes
,
454 packets
- tc_port_matchall
->ingress
.packets
,
455 0, tc_port_matchall
->ingress
.last_used
,
456 FLOW_ACTION_HW_STATS_IMMEDIATE
);
458 tc_port_matchall
->ingress
.packets
= packets
;
459 tc_port_matchall
->ingress
.bytes
= bytes
;
460 tc_port_matchall
->ingress
.last_used
= jiffies
;
466 static void cxgb4_matchall_disable_offload(struct net_device
*dev
)
468 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
469 struct port_info
*pi
= netdev2pinfo(dev
);
470 struct adapter
*adap
= netdev2adap(dev
);
472 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
473 if (tc_port_matchall
->egress
.state
== CXGB4_MATCHALL_STATE_ENABLED
)
474 cxgb4_matchall_free_tc(dev
);
476 if (tc_port_matchall
->ingress
.state
== CXGB4_MATCHALL_STATE_ENABLED
)
477 cxgb4_matchall_free_filter(dev
);
480 int cxgb4_init_tc_matchall(struct adapter
*adap
)
482 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
483 struct cxgb4_tc_matchall
*tc_matchall
;
486 tc_matchall
= kzalloc(sizeof(*tc_matchall
), GFP_KERNEL
);
490 tc_port_matchall
= kcalloc(adap
->params
.nports
,
491 sizeof(*tc_port_matchall
),
493 if (!tc_port_matchall
) {
495 goto out_free_matchall
;
498 tc_matchall
->port_matchall
= tc_port_matchall
;
499 adap
->tc_matchall
= tc_matchall
;
507 void cxgb4_cleanup_tc_matchall(struct adapter
*adap
)
511 if (adap
->tc_matchall
) {
512 if (adap
->tc_matchall
->port_matchall
) {
513 for (i
= 0; i
< adap
->params
.nports
; i
++) {
514 struct net_device
*dev
= adap
->port
[i
];
517 cxgb4_matchall_disable_offload(dev
);
519 kfree(adap
->tc_matchall
->port_matchall
);
521 kfree(adap
->tc_matchall
);