1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_matchall.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
11 static int cxgb4_matchall_egress_validate(struct net_device
*dev
,
12 struct tc_cls_matchall_offload
*cls
)
14 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
15 struct flow_action
*actions
= &cls
->rule
->action
;
16 struct port_info
*pi
= netdev2pinfo(dev
);
17 struct flow_action_entry
*entry
;
18 struct ch_sched_queue qe
;
19 struct sched_class
*e
;
24 if (!flow_action_has_entries(actions
)) {
25 NL_SET_ERR_MSG_MOD(extack
,
26 "Egress MATCHALL offload needs at least 1 policing action");
28 } else if (!flow_offload_has_one_action(actions
)) {
29 NL_SET_ERR_MSG_MOD(extack
,
30 "Egress MATCHALL offload only supports 1 policing action");
32 } else if (pi
->tc_block_shared
) {
33 NL_SET_ERR_MSG_MOD(extack
,
34 "Egress MATCHALL offload not supported with shared blocks");
38 ret
= t4_get_link_params(pi
, NULL
, &speed
, NULL
);
40 NL_SET_ERR_MSG_MOD(extack
,
41 "Failed to get max speed supported by the link");
45 /* Convert from Mbps to bps */
46 max_link_rate
= (u64
)speed
* 1000 * 1000;
48 flow_action_for_each(i
, entry
, actions
) {
50 case FLOW_ACTION_POLICE
:
51 /* Convert bytes per second to bits per second */
52 if (entry
->police
.rate_bytes_ps
* 8 > max_link_rate
) {
53 NL_SET_ERR_MSG_MOD(extack
,
54 "Specified policing max rate is larger than underlying link speed");
59 NL_SET_ERR_MSG_MOD(extack
,
60 "Only policing action supported with Egress MATCHALL offload");
65 for (i
= 0; i
< pi
->nqsets
; i
++) {
66 memset(&qe
, 0, sizeof(qe
));
69 e
= cxgb4_sched_queue_lookup(dev
, &qe
);
70 if (e
&& e
->info
.u
.params
.level
!= SCHED_CLASS_LEVEL_CH_RL
) {
71 NL_SET_ERR_MSG_MOD(extack
,
72 "Some queues are already bound to different class");
80 static int cxgb4_matchall_tc_bind_queues(struct net_device
*dev
, u32 tc
)
82 struct port_info
*pi
= netdev2pinfo(dev
);
83 struct ch_sched_queue qe
;
87 for (i
= 0; i
< pi
->nqsets
; i
++) {
90 ret
= cxgb4_sched_class_bind(dev
, &qe
, SCHED_QUEUE
);
100 qe
.class = SCHED_CLS_NONE
;
101 cxgb4_sched_class_unbind(dev
, &qe
, SCHED_QUEUE
);
107 static void cxgb4_matchall_tc_unbind_queues(struct net_device
*dev
)
109 struct port_info
*pi
= netdev2pinfo(dev
);
110 struct ch_sched_queue qe
;
113 for (i
= 0; i
< pi
->nqsets
; i
++) {
115 qe
.class = SCHED_CLS_NONE
;
116 cxgb4_sched_class_unbind(dev
, &qe
, SCHED_QUEUE
);
120 static int cxgb4_matchall_alloc_tc(struct net_device
*dev
,
121 struct tc_cls_matchall_offload
*cls
)
123 struct ch_sched_params p
= {
124 .type
= SCHED_CLASS_TYPE_PACKET
,
125 .u
.params
.level
= SCHED_CLASS_LEVEL_CH_RL
,
126 .u
.params
.mode
= SCHED_CLASS_MODE_CLASS
,
127 .u
.params
.rateunit
= SCHED_CLASS_RATEUNIT_BITS
,
128 .u
.params
.ratemode
= SCHED_CLASS_RATEMODE_ABS
,
129 .u
.params
.class = SCHED_CLS_NONE
,
130 .u
.params
.minrate
= 0,
131 .u
.params
.weight
= 0,
132 .u
.params
.pktsize
= dev
->mtu
,
134 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
135 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
136 struct port_info
*pi
= netdev2pinfo(dev
);
137 struct adapter
*adap
= netdev2adap(dev
);
138 struct flow_action_entry
*entry
;
139 struct sched_class
*e
;
143 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
145 flow_action_for_each(i
, entry
, &cls
->rule
->action
)
146 if (entry
->id
== FLOW_ACTION_POLICE
)
149 /* Convert from bytes per second to Kbps */
150 p
.u
.params
.maxrate
= div_u64(entry
->police
.rate_bytes_ps
* 8, 1000);
151 p
.u
.params
.channel
= pi
->tx_chan
;
152 e
= cxgb4_sched_class_alloc(dev
, &p
);
154 NL_SET_ERR_MSG_MOD(extack
,
155 "No free traffic class available for policing action");
159 ret
= cxgb4_matchall_tc_bind_queues(dev
, e
->idx
);
161 NL_SET_ERR_MSG_MOD(extack
,
162 "Could not bind queues to traffic class");
166 tc_port_matchall
->egress
.hwtc
= e
->idx
;
167 tc_port_matchall
->egress
.cookie
= cls
->cookie
;
168 tc_port_matchall
->egress
.state
= CXGB4_MATCHALL_STATE_ENABLED
;
172 cxgb4_sched_class_free(dev
, e
->idx
);
176 static void cxgb4_matchall_free_tc(struct net_device
*dev
)
178 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
179 struct port_info
*pi
= netdev2pinfo(dev
);
180 struct adapter
*adap
= netdev2adap(dev
);
182 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
183 cxgb4_matchall_tc_unbind_queues(dev
);
184 cxgb4_sched_class_free(dev
, tc_port_matchall
->egress
.hwtc
);
186 tc_port_matchall
->egress
.hwtc
= SCHED_CLS_NONE
;
187 tc_port_matchall
->egress
.cookie
= 0;
188 tc_port_matchall
->egress
.state
= CXGB4_MATCHALL_STATE_DISABLED
;
191 static int cxgb4_matchall_alloc_filter(struct net_device
*dev
,
192 struct tc_cls_matchall_offload
*cls
)
194 struct netlink_ext_ack
*extack
= cls
->common
.extack
;
195 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
196 struct port_info
*pi
= netdev2pinfo(dev
);
197 struct adapter
*adap
= netdev2adap(dev
);
198 struct ch_filter_specification
*fs
;
201 /* Note that TC uses prio 0 to indicate stack to generate
202 * automatic prio and hence doesn't pass prio 0 to driver.
203 * However, the hardware TCAM index starts from 0. Hence, the
204 * -1 here. 1 slot is enough to create a wildcard matchall
207 if (cls
->common
.prio
<= (adap
->tids
.nftids
+ adap
->tids
.nhpftids
))
208 fidx
= cls
->common
.prio
- 1;
210 fidx
= cxgb4_get_free_ftid(dev
, PF_INET
);
212 /* Only insert MATCHALL rule if its priority doesn't conflict
213 * with existing rules in the LETCAM.
216 !cxgb4_filter_prio_in_range(dev
, fidx
, cls
->common
.prio
)) {
217 NL_SET_ERR_MSG_MOD(extack
,
218 "No free LETCAM index available");
222 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
223 fs
= &tc_port_matchall
->ingress
.fs
;
224 memset(fs
, 0, sizeof(*fs
));
226 if (fidx
< adap
->tids
.nhpftids
)
228 fs
->tc_prio
= cls
->common
.prio
;
229 fs
->tc_cookie
= cls
->cookie
;
232 fs
->val
.pfvf_vld
= 1;
233 fs
->val
.pf
= adap
->pf
;
234 fs
->val
.vf
= pi
->vin
;
236 cxgb4_process_flow_actions(dev
, &cls
->rule
->action
, fs
);
238 ret
= cxgb4_set_filter(dev
, fidx
, fs
);
242 tc_port_matchall
->ingress
.tid
= fidx
;
243 tc_port_matchall
->ingress
.state
= CXGB4_MATCHALL_STATE_ENABLED
;
247 static int cxgb4_matchall_free_filter(struct net_device
*dev
)
249 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
250 struct port_info
*pi
= netdev2pinfo(dev
);
251 struct adapter
*adap
= netdev2adap(dev
);
254 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
256 ret
= cxgb4_del_filter(dev
, tc_port_matchall
->ingress
.tid
,
257 &tc_port_matchall
->ingress
.fs
);
261 tc_port_matchall
->ingress
.packets
= 0;
262 tc_port_matchall
->ingress
.bytes
= 0;
263 tc_port_matchall
->ingress
.last_used
= 0;
264 tc_port_matchall
->ingress
.tid
= 0;
265 tc_port_matchall
->ingress
.state
= CXGB4_MATCHALL_STATE_DISABLED
;
269 int cxgb4_tc_matchall_replace(struct net_device
*dev
,
270 struct tc_cls_matchall_offload
*cls_matchall
,
273 struct netlink_ext_ack
*extack
= cls_matchall
->common
.extack
;
274 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
275 struct port_info
*pi
= netdev2pinfo(dev
);
276 struct adapter
*adap
= netdev2adap(dev
);
279 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
281 if (tc_port_matchall
->ingress
.state
==
282 CXGB4_MATCHALL_STATE_ENABLED
) {
283 NL_SET_ERR_MSG_MOD(extack
,
284 "Only 1 Ingress MATCHALL can be offloaded");
288 ret
= cxgb4_validate_flow_actions(dev
,
289 &cls_matchall
->rule
->action
);
293 return cxgb4_matchall_alloc_filter(dev
, cls_matchall
);
296 if (tc_port_matchall
->egress
.state
== CXGB4_MATCHALL_STATE_ENABLED
) {
297 NL_SET_ERR_MSG_MOD(extack
,
298 "Only 1 Egress MATCHALL can be offloaded");
302 ret
= cxgb4_matchall_egress_validate(dev
, cls_matchall
);
306 return cxgb4_matchall_alloc_tc(dev
, cls_matchall
);
309 int cxgb4_tc_matchall_destroy(struct net_device
*dev
,
310 struct tc_cls_matchall_offload
*cls_matchall
,
313 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
314 struct port_info
*pi
= netdev2pinfo(dev
);
315 struct adapter
*adap
= netdev2adap(dev
);
317 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
319 if (cls_matchall
->cookie
!=
320 tc_port_matchall
->ingress
.fs
.tc_cookie
)
323 return cxgb4_matchall_free_filter(dev
);
326 if (cls_matchall
->cookie
!= tc_port_matchall
->egress
.cookie
)
329 cxgb4_matchall_free_tc(dev
);
333 int cxgb4_tc_matchall_stats(struct net_device
*dev
,
334 struct tc_cls_matchall_offload
*cls_matchall
)
336 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
337 struct port_info
*pi
= netdev2pinfo(dev
);
338 struct adapter
*adap
= netdev2adap(dev
);
342 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
343 if (tc_port_matchall
->ingress
.state
== CXGB4_MATCHALL_STATE_DISABLED
)
346 ret
= cxgb4_get_filter_counters(dev
, tc_port_matchall
->ingress
.tid
,
348 tc_port_matchall
->ingress
.fs
.hash
);
352 if (tc_port_matchall
->ingress
.packets
!= packets
) {
353 flow_stats_update(&cls_matchall
->stats
,
354 bytes
- tc_port_matchall
->ingress
.bytes
,
355 packets
- tc_port_matchall
->ingress
.packets
,
356 tc_port_matchall
->ingress
.last_used
);
358 tc_port_matchall
->ingress
.packets
= packets
;
359 tc_port_matchall
->ingress
.bytes
= bytes
;
360 tc_port_matchall
->ingress
.last_used
= jiffies
;
366 static void cxgb4_matchall_disable_offload(struct net_device
*dev
)
368 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
369 struct port_info
*pi
= netdev2pinfo(dev
);
370 struct adapter
*adap
= netdev2adap(dev
);
372 tc_port_matchall
= &adap
->tc_matchall
->port_matchall
[pi
->port_id
];
373 if (tc_port_matchall
->egress
.state
== CXGB4_MATCHALL_STATE_ENABLED
)
374 cxgb4_matchall_free_tc(dev
);
376 if (tc_port_matchall
->ingress
.state
== CXGB4_MATCHALL_STATE_ENABLED
)
377 cxgb4_matchall_free_filter(dev
);
380 int cxgb4_init_tc_matchall(struct adapter
*adap
)
382 struct cxgb4_tc_port_matchall
*tc_port_matchall
;
383 struct cxgb4_tc_matchall
*tc_matchall
;
386 tc_matchall
= kzalloc(sizeof(*tc_matchall
), GFP_KERNEL
);
390 tc_port_matchall
= kcalloc(adap
->params
.nports
,
391 sizeof(*tc_port_matchall
),
393 if (!tc_port_matchall
) {
395 goto out_free_matchall
;
398 tc_matchall
->port_matchall
= tc_port_matchall
;
399 adap
->tc_matchall
= tc_matchall
;
407 void cxgb4_cleanup_tc_matchall(struct adapter
*adap
)
411 if (adap
->tc_matchall
) {
412 if (adap
->tc_matchall
->port_matchall
) {
413 for (i
= 0; i
< adap
->params
.nports
; i
++) {
414 struct net_device
*dev
= adap
->port
[i
];
417 cxgb4_matchall_disable_offload(dev
);
419 kfree(adap
->tc_matchall
->port_matchall
);
421 kfree(adap
->tc_matchall
);