treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_tc_matchall.c
blob1b7681a4eb328906eca9f43cdd5e0a3bf13f470d
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
4 #include "cxgb4.h"
5 #include "cxgb4_tc_matchall.h"
6 #include "sched.h"
7 #include "cxgb4_uld.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
11 static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 struct tc_cls_matchall_offload *cls)
14 struct netlink_ext_ack *extack = cls->common.extack;
15 struct flow_action *actions = &cls->rule->action;
16 struct port_info *pi = netdev2pinfo(dev);
17 struct flow_action_entry *entry;
18 struct ch_sched_queue qe;
19 struct sched_class *e;
20 u64 max_link_rate;
21 u32 i, speed;
22 int ret;
24 if (!flow_action_has_entries(actions)) {
25 NL_SET_ERR_MSG_MOD(extack,
26 "Egress MATCHALL offload needs at least 1 policing action");
27 return -EINVAL;
28 } else if (!flow_offload_has_one_action(actions)) {
29 NL_SET_ERR_MSG_MOD(extack,
30 "Egress MATCHALL offload only supports 1 policing action");
31 return -EINVAL;
32 } else if (pi->tc_block_shared) {
33 NL_SET_ERR_MSG_MOD(extack,
34 "Egress MATCHALL offload not supported with shared blocks");
35 return -EINVAL;
38 ret = t4_get_link_params(pi, NULL, &speed, NULL);
39 if (ret) {
40 NL_SET_ERR_MSG_MOD(extack,
41 "Failed to get max speed supported by the link");
42 return -EINVAL;
45 /* Convert from Mbps to bps */
46 max_link_rate = (u64)speed * 1000 * 1000;
48 flow_action_for_each(i, entry, actions) {
49 switch (entry->id) {
50 case FLOW_ACTION_POLICE:
51 /* Convert bytes per second to bits per second */
52 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
53 NL_SET_ERR_MSG_MOD(extack,
54 "Specified policing max rate is larger than underlying link speed");
55 return -ERANGE;
57 break;
58 default:
59 NL_SET_ERR_MSG_MOD(extack,
60 "Only policing action supported with Egress MATCHALL offload");
61 return -EOPNOTSUPP;
65 for (i = 0; i < pi->nqsets; i++) {
66 memset(&qe, 0, sizeof(qe));
67 qe.queue = i;
69 e = cxgb4_sched_queue_lookup(dev, &qe);
70 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
71 NL_SET_ERR_MSG_MOD(extack,
72 "Some queues are already bound to different class");
73 return -EBUSY;
77 return 0;
80 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
82 struct port_info *pi = netdev2pinfo(dev);
83 struct ch_sched_queue qe;
84 int ret;
85 u32 i;
87 for (i = 0; i < pi->nqsets; i++) {
88 qe.queue = i;
89 qe.class = tc;
90 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
91 if (ret)
92 goto out_free;
95 return 0;
97 out_free:
98 while (i--) {
99 qe.queue = i;
100 qe.class = SCHED_CLS_NONE;
101 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
104 return ret;
107 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
109 struct port_info *pi = netdev2pinfo(dev);
110 struct ch_sched_queue qe;
111 u32 i;
113 for (i = 0; i < pi->nqsets; i++) {
114 qe.queue = i;
115 qe.class = SCHED_CLS_NONE;
116 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
120 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
121 struct tc_cls_matchall_offload *cls)
123 struct ch_sched_params p = {
124 .type = SCHED_CLASS_TYPE_PACKET,
125 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
126 .u.params.mode = SCHED_CLASS_MODE_CLASS,
127 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
128 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
129 .u.params.class = SCHED_CLS_NONE,
130 .u.params.minrate = 0,
131 .u.params.weight = 0,
132 .u.params.pktsize = dev->mtu,
134 struct netlink_ext_ack *extack = cls->common.extack;
135 struct cxgb4_tc_port_matchall *tc_port_matchall;
136 struct port_info *pi = netdev2pinfo(dev);
137 struct adapter *adap = netdev2adap(dev);
138 struct flow_action_entry *entry;
139 struct sched_class *e;
140 int ret;
141 u32 i;
143 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
145 flow_action_for_each(i, entry, &cls->rule->action)
146 if (entry->id == FLOW_ACTION_POLICE)
147 break;
149 /* Convert from bytes per second to Kbps */
150 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
151 p.u.params.channel = pi->tx_chan;
152 e = cxgb4_sched_class_alloc(dev, &p);
153 if (!e) {
154 NL_SET_ERR_MSG_MOD(extack,
155 "No free traffic class available for policing action");
156 return -ENOMEM;
159 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
160 if (ret) {
161 NL_SET_ERR_MSG_MOD(extack,
162 "Could not bind queues to traffic class");
163 goto out_free;
166 tc_port_matchall->egress.hwtc = e->idx;
167 tc_port_matchall->egress.cookie = cls->cookie;
168 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
169 return 0;
171 out_free:
172 cxgb4_sched_class_free(dev, e->idx);
173 return ret;
176 static void cxgb4_matchall_free_tc(struct net_device *dev)
178 struct cxgb4_tc_port_matchall *tc_port_matchall;
179 struct port_info *pi = netdev2pinfo(dev);
180 struct adapter *adap = netdev2adap(dev);
182 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
183 cxgb4_matchall_tc_unbind_queues(dev);
184 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
186 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
187 tc_port_matchall->egress.cookie = 0;
188 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
191 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
192 struct tc_cls_matchall_offload *cls)
194 struct netlink_ext_ack *extack = cls->common.extack;
195 struct cxgb4_tc_port_matchall *tc_port_matchall;
196 struct port_info *pi = netdev2pinfo(dev);
197 struct adapter *adap = netdev2adap(dev);
198 struct ch_filter_specification *fs;
199 int ret, fidx;
201 /* Note that TC uses prio 0 to indicate stack to generate
202 * automatic prio and hence doesn't pass prio 0 to driver.
203 * However, the hardware TCAM index starts from 0. Hence, the
204 * -1 here. 1 slot is enough to create a wildcard matchall
205 * VIID rule.
207 if (cls->common.prio <= (adap->tids.nftids + adap->tids.nhpftids))
208 fidx = cls->common.prio - 1;
209 else
210 fidx = cxgb4_get_free_ftid(dev, PF_INET);
212 /* Only insert MATCHALL rule if its priority doesn't conflict
213 * with existing rules in the LETCAM.
215 if (fidx < 0 ||
216 !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
217 NL_SET_ERR_MSG_MOD(extack,
218 "No free LETCAM index available");
219 return -ENOMEM;
222 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
223 fs = &tc_port_matchall->ingress.fs;
224 memset(fs, 0, sizeof(*fs));
226 if (fidx < adap->tids.nhpftids)
227 fs->prio = 1;
228 fs->tc_prio = cls->common.prio;
229 fs->tc_cookie = cls->cookie;
230 fs->hitcnts = 1;
232 fs->val.pfvf_vld = 1;
233 fs->val.pf = adap->pf;
234 fs->val.vf = pi->vin;
236 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
238 ret = cxgb4_set_filter(dev, fidx, fs);
239 if (ret)
240 return ret;
242 tc_port_matchall->ingress.tid = fidx;
243 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
244 return 0;
247 static int cxgb4_matchall_free_filter(struct net_device *dev)
249 struct cxgb4_tc_port_matchall *tc_port_matchall;
250 struct port_info *pi = netdev2pinfo(dev);
251 struct adapter *adap = netdev2adap(dev);
252 int ret;
254 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
256 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
257 &tc_port_matchall->ingress.fs);
258 if (ret)
259 return ret;
261 tc_port_matchall->ingress.packets = 0;
262 tc_port_matchall->ingress.bytes = 0;
263 tc_port_matchall->ingress.last_used = 0;
264 tc_port_matchall->ingress.tid = 0;
265 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
266 return 0;
269 int cxgb4_tc_matchall_replace(struct net_device *dev,
270 struct tc_cls_matchall_offload *cls_matchall,
271 bool ingress)
273 struct netlink_ext_ack *extack = cls_matchall->common.extack;
274 struct cxgb4_tc_port_matchall *tc_port_matchall;
275 struct port_info *pi = netdev2pinfo(dev);
276 struct adapter *adap = netdev2adap(dev);
277 int ret;
279 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
280 if (ingress) {
281 if (tc_port_matchall->ingress.state ==
282 CXGB4_MATCHALL_STATE_ENABLED) {
283 NL_SET_ERR_MSG_MOD(extack,
284 "Only 1 Ingress MATCHALL can be offloaded");
285 return -ENOMEM;
288 ret = cxgb4_validate_flow_actions(dev,
289 &cls_matchall->rule->action);
290 if (ret)
291 return ret;
293 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
296 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
297 NL_SET_ERR_MSG_MOD(extack,
298 "Only 1 Egress MATCHALL can be offloaded");
299 return -ENOMEM;
302 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
303 if (ret)
304 return ret;
306 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
309 int cxgb4_tc_matchall_destroy(struct net_device *dev,
310 struct tc_cls_matchall_offload *cls_matchall,
311 bool ingress)
313 struct cxgb4_tc_port_matchall *tc_port_matchall;
314 struct port_info *pi = netdev2pinfo(dev);
315 struct adapter *adap = netdev2adap(dev);
317 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
318 if (ingress) {
319 if (cls_matchall->cookie !=
320 tc_port_matchall->ingress.fs.tc_cookie)
321 return -ENOENT;
323 return cxgb4_matchall_free_filter(dev);
326 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
327 return -ENOENT;
329 cxgb4_matchall_free_tc(dev);
330 return 0;
333 int cxgb4_tc_matchall_stats(struct net_device *dev,
334 struct tc_cls_matchall_offload *cls_matchall)
336 struct cxgb4_tc_port_matchall *tc_port_matchall;
337 struct port_info *pi = netdev2pinfo(dev);
338 struct adapter *adap = netdev2adap(dev);
339 u64 packets, bytes;
340 int ret;
342 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
343 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
344 return -ENOENT;
346 ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
347 &packets, &bytes,
348 tc_port_matchall->ingress.fs.hash);
349 if (ret)
350 return ret;
352 if (tc_port_matchall->ingress.packets != packets) {
353 flow_stats_update(&cls_matchall->stats,
354 bytes - tc_port_matchall->ingress.bytes,
355 packets - tc_port_matchall->ingress.packets,
356 tc_port_matchall->ingress.last_used);
358 tc_port_matchall->ingress.packets = packets;
359 tc_port_matchall->ingress.bytes = bytes;
360 tc_port_matchall->ingress.last_used = jiffies;
363 return 0;
366 static void cxgb4_matchall_disable_offload(struct net_device *dev)
368 struct cxgb4_tc_port_matchall *tc_port_matchall;
369 struct port_info *pi = netdev2pinfo(dev);
370 struct adapter *adap = netdev2adap(dev);
372 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
373 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
374 cxgb4_matchall_free_tc(dev);
376 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
377 cxgb4_matchall_free_filter(dev);
380 int cxgb4_init_tc_matchall(struct adapter *adap)
382 struct cxgb4_tc_port_matchall *tc_port_matchall;
383 struct cxgb4_tc_matchall *tc_matchall;
384 int ret;
386 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
387 if (!tc_matchall)
388 return -ENOMEM;
390 tc_port_matchall = kcalloc(adap->params.nports,
391 sizeof(*tc_port_matchall),
392 GFP_KERNEL);
393 if (!tc_port_matchall) {
394 ret = -ENOMEM;
395 goto out_free_matchall;
398 tc_matchall->port_matchall = tc_port_matchall;
399 adap->tc_matchall = tc_matchall;
400 return 0;
402 out_free_matchall:
403 kfree(tc_matchall);
404 return ret;
407 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
409 u8 i;
411 if (adap->tc_matchall) {
412 if (adap->tc_matchall->port_matchall) {
413 for (i = 0; i < adap->params.nports; i++) {
414 struct net_device *dev = adap->port[i];
416 if (dev)
417 cxgb4_matchall_disable_offload(dev);
419 kfree(adap->tc_matchall->port_matchall);
421 kfree(adap->tc_matchall);