1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
4 #include <linux/math64.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
10 #include "../nfp_port.h"
12 #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
14 struct nfp_police_cfg_head
{
19 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
20 * See RFC 2698 for more details.
21 * ----------------------------------------------------------------
23 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
24 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
26 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
28 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
29 * | Token Bucket Peak |
30 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
31 * | Token Bucket Committed |
32 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
34 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
35 * | Committed Burst Size |
36 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
37 * | Peak Information Rate |
38 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
39 * | Committed Information Rate |
40 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
42 struct nfp_police_config
{
43 struct nfp_police_cfg_head head
;
52 struct nfp_police_stats_reply
{
53 struct nfp_police_cfg_head head
;
61 nfp_flower_install_rate_limiter(struct nfp_app
*app
, struct net_device
*netdev
,
62 struct tc_cls_matchall_offload
*flow
,
63 struct netlink_ext_ack
*extack
)
65 struct flow_action_entry
*action
= &flow
->rule
->action
.entries
[0];
66 struct nfp_flower_priv
*fl_priv
= app
->priv
;
67 struct nfp_flower_repr_priv
*repr_priv
;
68 struct nfp_police_config
*config
;
69 struct nfp_repr
*repr
;
75 if (!nfp_netdev_is_nfp_repr(netdev
)) {
76 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload not supported on higher level port");
79 repr
= netdev_priv(netdev
);
80 repr_priv
= repr
->app_priv
;
82 if (repr_priv
->block_shared
) {
83 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload not supported on shared blocks");
87 if (repr
->port
->type
!= NFP_PORT_VF_PORT
) {
88 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload not supported on non-VF ports");
92 if (!flow_offload_has_one_action(&flow
->rule
->action
)) {
93 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload requires a single action");
97 if (flow
->common
.prio
!= 1) {
98 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload requires highest priority");
102 if (action
->id
!= FLOW_ACTION_POLICE
) {
103 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload requires police action");
107 rate
= action
->police
.rate_bytes_ps
;
108 burst
= action
->police
.burst
;
109 netdev_port_id
= nfp_repr_get_port_id(netdev
);
111 skb
= nfp_flower_cmsg_alloc(repr
->app
, sizeof(struct nfp_police_config
),
112 NFP_FLOWER_CMSG_TYPE_QOS_MOD
, GFP_KERNEL
);
116 config
= nfp_flower_cmsg_get_data(skb
);
117 memset(config
, 0, sizeof(struct nfp_police_config
));
118 config
->head
.port
= cpu_to_be32(netdev_port_id
);
119 config
->bkt_tkn_p
= cpu_to_be32(burst
);
120 config
->bkt_tkn_c
= cpu_to_be32(burst
);
121 config
->pbs
= cpu_to_be32(burst
);
122 config
->cbs
= cpu_to_be32(burst
);
123 config
->pir
= cpu_to_be32(rate
);
124 config
->cir
= cpu_to_be32(rate
);
125 nfp_ctrl_tx(repr
->app
->ctrl
, skb
);
127 repr_priv
->qos_table
.netdev_port_id
= netdev_port_id
;
128 fl_priv
->qos_rate_limiters
++;
129 if (fl_priv
->qos_rate_limiters
== 1)
130 schedule_delayed_work(&fl_priv
->qos_stats_work
,
137 nfp_flower_remove_rate_limiter(struct nfp_app
*app
, struct net_device
*netdev
,
138 struct tc_cls_matchall_offload
*flow
,
139 struct netlink_ext_ack
*extack
)
141 struct nfp_flower_priv
*fl_priv
= app
->priv
;
142 struct nfp_flower_repr_priv
*repr_priv
;
143 struct nfp_police_config
*config
;
144 struct nfp_repr
*repr
;
148 if (!nfp_netdev_is_nfp_repr(netdev
)) {
149 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload not supported on higher level port");
152 repr
= netdev_priv(netdev
);
154 netdev_port_id
= nfp_repr_get_port_id(netdev
);
155 repr_priv
= repr
->app_priv
;
157 if (!repr_priv
->qos_table
.netdev_port_id
) {
158 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: cannot remove qos entry that does not exist");
162 skb
= nfp_flower_cmsg_alloc(repr
->app
, sizeof(struct nfp_police_config
),
163 NFP_FLOWER_CMSG_TYPE_QOS_DEL
, GFP_KERNEL
);
167 /* Clear all qos associate data for this interface */
168 memset(&repr_priv
->qos_table
, 0, sizeof(struct nfp_fl_qos
));
169 fl_priv
->qos_rate_limiters
--;
170 if (!fl_priv
->qos_rate_limiters
)
171 cancel_delayed_work_sync(&fl_priv
->qos_stats_work
);
173 config
= nfp_flower_cmsg_get_data(skb
);
174 memset(config
, 0, sizeof(struct nfp_police_config
));
175 config
->head
.port
= cpu_to_be32(netdev_port_id
);
176 nfp_ctrl_tx(repr
->app
->ctrl
, skb
);
181 void nfp_flower_stats_rlim_reply(struct nfp_app
*app
, struct sk_buff
*skb
)
183 struct nfp_flower_priv
*fl_priv
= app
->priv
;
184 struct nfp_flower_repr_priv
*repr_priv
;
185 struct nfp_police_stats_reply
*msg
;
186 struct nfp_stat_pair
*curr_stats
;
187 struct nfp_stat_pair
*prev_stats
;
188 struct net_device
*netdev
;
189 struct nfp_repr
*repr
;
192 msg
= nfp_flower_cmsg_get_data(skb
);
193 netdev_port_id
= be32_to_cpu(msg
->head
.port
);
195 netdev
= nfp_app_dev_get(app
, netdev_port_id
, NULL
);
197 goto exit_unlock_rcu
;
199 repr
= netdev_priv(netdev
);
200 repr_priv
= repr
->app_priv
;
201 curr_stats
= &repr_priv
->qos_table
.curr_stats
;
202 prev_stats
= &repr_priv
->qos_table
.prev_stats
;
204 spin_lock_bh(&fl_priv
->qos_stats_lock
);
205 curr_stats
->pkts
= be64_to_cpu(msg
->pass_pkts
) +
206 be64_to_cpu(msg
->drop_pkts
);
207 curr_stats
->bytes
= be64_to_cpu(msg
->pass_bytes
) +
208 be64_to_cpu(msg
->drop_bytes
);
210 if (!repr_priv
->qos_table
.last_update
) {
211 prev_stats
->pkts
= curr_stats
->pkts
;
212 prev_stats
->bytes
= curr_stats
->bytes
;
215 repr_priv
->qos_table
.last_update
= jiffies
;
216 spin_unlock_bh(&fl_priv
->qos_stats_lock
);
223 nfp_flower_stats_rlim_request(struct nfp_flower_priv
*fl_priv
,
226 struct nfp_police_cfg_head
*head
;
229 skb
= nfp_flower_cmsg_alloc(fl_priv
->app
,
230 sizeof(struct nfp_police_cfg_head
),
231 NFP_FLOWER_CMSG_TYPE_QOS_STATS
,
236 head
= nfp_flower_cmsg_get_data(skb
);
237 memset(head
, 0, sizeof(struct nfp_police_cfg_head
));
238 head
->port
= cpu_to_be32(netdev_port_id
);
240 nfp_ctrl_tx(fl_priv
->app
->ctrl
, skb
);
244 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv
*fl_priv
)
246 struct nfp_reprs
*repr_set
;
250 repr_set
= rcu_dereference(fl_priv
->app
->reprs
[NFP_REPR_TYPE_VF
]);
252 goto exit_unlock_rcu
;
254 for (i
= 0; i
< repr_set
->num_reprs
; i
++) {
255 struct net_device
*netdev
;
257 netdev
= rcu_dereference(repr_set
->reprs
[i
]);
259 struct nfp_repr
*priv
= netdev_priv(netdev
);
260 struct nfp_flower_repr_priv
*repr_priv
;
263 repr_priv
= priv
->app_priv
;
264 netdev_port_id
= repr_priv
->qos_table
.netdev_port_id
;
268 nfp_flower_stats_rlim_request(fl_priv
, netdev_port_id
);
276 static void update_stats_cache(struct work_struct
*work
)
278 struct delayed_work
*delayed_work
;
279 struct nfp_flower_priv
*fl_priv
;
281 delayed_work
= to_delayed_work(work
);
282 fl_priv
= container_of(delayed_work
, struct nfp_flower_priv
,
285 nfp_flower_stats_rlim_request_all(fl_priv
);
286 schedule_delayed_work(&fl_priv
->qos_stats_work
, NFP_FL_QOS_UPDATE
);
290 nfp_flower_stats_rate_limiter(struct nfp_app
*app
, struct net_device
*netdev
,
291 struct tc_cls_matchall_offload
*flow
,
292 struct netlink_ext_ack
*extack
)
294 struct nfp_flower_priv
*fl_priv
= app
->priv
;
295 struct nfp_flower_repr_priv
*repr_priv
;
296 struct nfp_stat_pair
*curr_stats
;
297 struct nfp_stat_pair
*prev_stats
;
298 u64 diff_bytes
, diff_pkts
;
299 struct nfp_repr
*repr
;
301 if (!nfp_netdev_is_nfp_repr(netdev
)) {
302 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: qos rate limit offload not supported on higher level port");
305 repr
= netdev_priv(netdev
);
307 repr_priv
= repr
->app_priv
;
308 if (!repr_priv
->qos_table
.netdev_port_id
) {
309 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: cannot find qos entry for stats update");
313 spin_lock_bh(&fl_priv
->qos_stats_lock
);
314 curr_stats
= &repr_priv
->qos_table
.curr_stats
;
315 prev_stats
= &repr_priv
->qos_table
.prev_stats
;
316 diff_pkts
= curr_stats
->pkts
- prev_stats
->pkts
;
317 diff_bytes
= curr_stats
->bytes
- prev_stats
->bytes
;
318 prev_stats
->pkts
= curr_stats
->pkts
;
319 prev_stats
->bytes
= curr_stats
->bytes
;
320 spin_unlock_bh(&fl_priv
->qos_stats_lock
);
322 flow_stats_update(&flow
->stats
, diff_bytes
, diff_pkts
, 0,
323 repr_priv
->qos_table
.last_update
,
324 FLOW_ACTION_HW_STATS_DELAYED
);
328 void nfp_flower_qos_init(struct nfp_app
*app
)
330 struct nfp_flower_priv
*fl_priv
= app
->priv
;
332 spin_lock_init(&fl_priv
->qos_stats_lock
);
333 INIT_DELAYED_WORK(&fl_priv
->qos_stats_work
, &update_stats_cache
);
336 void nfp_flower_qos_cleanup(struct nfp_app
*app
)
338 struct nfp_flower_priv
*fl_priv
= app
->priv
;
340 cancel_delayed_work_sync(&fl_priv
->qos_stats_work
);
343 int nfp_flower_setup_qos_offload(struct nfp_app
*app
, struct net_device
*netdev
,
344 struct tc_cls_matchall_offload
*flow
)
346 struct netlink_ext_ack
*extack
= flow
->common
.extack
;
347 struct nfp_flower_priv
*fl_priv
= app
->priv
;
349 if (!(fl_priv
->flower_ext_feats
& NFP_FL_FEATS_VF_RLIM
)) {
350 NL_SET_ERR_MSG_MOD(extack
, "unsupported offload: loaded firmware does not support qos rate limit offload");
354 switch (flow
->command
) {
355 case TC_CLSMATCHALL_REPLACE
:
356 return nfp_flower_install_rate_limiter(app
, netdev
, flow
,
358 case TC_CLSMATCHALL_DESTROY
:
359 return nfp_flower_remove_rate_limiter(app
, netdev
, flow
,
361 case TC_CLSMATCHALL_STATS
:
362 return nfp_flower_stats_rate_limiter(app
, netdev
, flow
,