1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac TC Handling (HW only)
7 #include <net/pkt_cls.h>
8 #include <net/tc_act/tc_gact.h>
14 static void tc_fill_all_pass_entry(struct stmmac_tc_entry
*entry
)
16 memset(entry
, 0, sizeof(*entry
));
18 entry
->is_last
= true;
19 entry
->is_frag
= false;
22 entry
->val
.match_data
= 0x0;
23 entry
->val
.match_en
= 0x0;
25 entry
->val
.dma_ch_no
= 0x0;
28 static struct stmmac_tc_entry
*tc_find_entry(struct stmmac_priv
*priv
,
29 struct tc_cls_u32_offload
*cls
,
32 struct stmmac_tc_entry
*entry
, *first
= NULL
, *dup
= NULL
;
33 u32 loc
= cls
->knode
.handle
;
36 for (i
= 0; i
< priv
->tc_entries_max
; i
++) {
37 entry
= &priv
->tc_entries
[i
];
38 if (!entry
->in_use
&& !first
&& free
)
40 if ((entry
->handle
== loc
) && !free
&& !entry
->is_frag
)
51 memset(&first
->val
, 0, sizeof(first
->val
));
57 static int tc_fill_actions(struct stmmac_tc_entry
*entry
,
58 struct stmmac_tc_entry
*frag
,
59 struct tc_cls_u32_offload
*cls
)
61 struct stmmac_tc_entry
*action_entry
= entry
;
62 const struct tc_action
*act
;
63 struct tcf_exts
*exts
;
66 exts
= cls
->knode
.exts
;
67 if (!tcf_exts_has_actions(exts
))
72 tcf_exts_for_each_action(i
, act
, exts
) {
74 if (is_tcf_gact_ok(act
)) {
75 action_entry
->val
.af
= 1;
79 if (is_tcf_gact_shot(act
)) {
80 action_entry
->val
.rf
= 1;
91 static int tc_fill_entry(struct stmmac_priv
*priv
,
92 struct tc_cls_u32_offload
*cls
)
94 struct stmmac_tc_entry
*entry
, *frag
= NULL
;
95 struct tc_u32_sel
*sel
= cls
->knode
.sel
;
96 u32 off
, data
, mask
, real_off
, rem
;
97 u32 prio
= cls
->common
.prio
<< 16;
100 /* Only 1 match per entry */
101 if (sel
->nkeys
<= 0 || sel
->nkeys
> 1)
104 off
= sel
->keys
[0].off
<< sel
->offshift
;
105 data
= sel
->keys
[0].val
;
106 mask
= sel
->keys
[0].mask
;
108 switch (ntohs(cls
->common
.protocol
)) {
118 if (off
> priv
->tc_off_max
)
124 entry
= tc_find_entry(priv
, cls
, true);
129 frag
= tc_find_entry(priv
, cls
, true);
135 entry
->frag_ptr
= frag
;
136 entry
->val
.match_en
= (mask
<< (rem
* 8)) &
137 GENMASK(31, rem
* 8);
138 entry
->val
.match_data
= (data
<< (rem
* 8)) &
139 GENMASK(31, rem
* 8);
140 entry
->val
.frame_offset
= real_off
;
143 frag
->val
.match_en
= (mask
>> (rem
* 8)) &
144 GENMASK(rem
* 8 - 1, 0);
145 frag
->val
.match_data
= (data
>> (rem
* 8)) &
146 GENMASK(rem
* 8 - 1, 0);
147 frag
->val
.frame_offset
= real_off
+ 1;
149 frag
->is_frag
= true;
151 entry
->frag_ptr
= NULL
;
152 entry
->val
.match_en
= mask
;
153 entry
->val
.match_data
= data
;
154 entry
->val
.frame_offset
= real_off
;
158 ret
= tc_fill_actions(entry
, frag
, cls
);
166 frag
->in_use
= false;
167 entry
->in_use
= false;
171 static void tc_unfill_entry(struct stmmac_priv
*priv
,
172 struct tc_cls_u32_offload
*cls
)
174 struct stmmac_tc_entry
*entry
;
176 entry
= tc_find_entry(priv
, cls
, false);
180 entry
->in_use
= false;
181 if (entry
->frag_ptr
) {
182 entry
= entry
->frag_ptr
;
183 entry
->is_frag
= false;
184 entry
->in_use
= false;
188 static int tc_config_knode(struct stmmac_priv
*priv
,
189 struct tc_cls_u32_offload
*cls
)
193 ret
= tc_fill_entry(priv
, cls
);
197 ret
= stmmac_rxp_config(priv
, priv
->hw
->pcsr
, priv
->tc_entries
,
198 priv
->tc_entries_max
);
205 tc_unfill_entry(priv
, cls
);
209 static int tc_delete_knode(struct stmmac_priv
*priv
,
210 struct tc_cls_u32_offload
*cls
)
212 /* Set entry and fragments as not used */
213 tc_unfill_entry(priv
, cls
);
215 return stmmac_rxp_config(priv
, priv
->hw
->pcsr
, priv
->tc_entries
,
216 priv
->tc_entries_max
);
219 static int tc_setup_cls_u32(struct stmmac_priv
*priv
,
220 struct tc_cls_u32_offload
*cls
)
222 switch (cls
->command
) {
223 case TC_CLSU32_REPLACE_KNODE
:
224 tc_unfill_entry(priv
, cls
);
226 case TC_CLSU32_NEW_KNODE
:
227 return tc_config_knode(priv
, cls
);
228 case TC_CLSU32_DELETE_KNODE
:
229 return tc_delete_knode(priv
, cls
);
235 static int tc_init(struct stmmac_priv
*priv
)
237 struct dma_features
*dma_cap
= &priv
->dma_cap
;
241 if (dma_cap
->l3l4fnum
) {
242 priv
->flow_entries_max
= dma_cap
->l3l4fnum
;
243 priv
->flow_entries
= devm_kcalloc(priv
->device
,
245 sizeof(*priv
->flow_entries
),
247 if (!priv
->flow_entries
)
250 for (i
= 0; i
< priv
->flow_entries_max
; i
++)
251 priv
->flow_entries
[i
].idx
= i
;
253 dev_info(priv
->device
, "Enabled Flow TC (entries=%d)\n",
254 priv
->flow_entries_max
);
257 /* Fail silently as we can still use remaining features, e.g. CBS */
258 if (!dma_cap
->frpsel
)
261 switch (dma_cap
->frpbs
) {
263 priv
->tc_off_max
= 64;
266 priv
->tc_off_max
= 128;
269 priv
->tc_off_max
= 256;
275 switch (dma_cap
->frpes
) {
289 /* Reserve one last filter which lets all pass */
290 priv
->tc_entries_max
= count
;
291 priv
->tc_entries
= devm_kcalloc(priv
->device
,
292 count
, sizeof(*priv
->tc_entries
), GFP_KERNEL
);
293 if (!priv
->tc_entries
)
296 tc_fill_all_pass_entry(&priv
->tc_entries
[count
- 1]);
298 dev_info(priv
->device
, "Enabling HW TC (entries=%d, max_off=%d)\n",
299 priv
->tc_entries_max
, priv
->tc_off_max
);
303 static int tc_setup_cbs(struct stmmac_priv
*priv
,
304 struct tc_cbs_qopt_offload
*qopt
)
306 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
307 u32 queue
= qopt
->queue
;
313 /* Queue 0 is not AVB capable */
314 if (queue
<= 0 || queue
>= tx_queues_count
)
316 if (!priv
->dma_cap
.av
)
319 mode_to_use
= priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
;
320 if (mode_to_use
== MTL_QUEUE_DCB
&& qopt
->enable
) {
321 ret
= stmmac_dma_qmode(priv
, priv
->ioaddr
, queue
, MTL_QUEUE_AVB
);
325 priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
= MTL_QUEUE_AVB
;
326 } else if (!qopt
->enable
) {
327 return stmmac_dma_qmode(priv
, priv
->ioaddr
, queue
, MTL_QUEUE_DCB
);
330 /* Port Transmit Rate and Speed Divider */
331 ptr
= (priv
->speed
== SPEED_100
) ? 4 : 8;
332 speed_div
= (priv
->speed
== SPEED_100
) ? 100000 : 1000000;
334 /* Final adjustments for HW */
335 value
= div_s64(qopt
->idleslope
* 1024ll * ptr
, speed_div
);
336 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
= value
& GENMASK(31, 0);
338 value
= div_s64(-qopt
->sendslope
* 1024ll * ptr
, speed_div
);
339 priv
->plat
->tx_queues_cfg
[queue
].send_slope
= value
& GENMASK(31, 0);
341 value
= qopt
->hicredit
* 1024ll * 8;
342 priv
->plat
->tx_queues_cfg
[queue
].high_credit
= value
& GENMASK(31, 0);
344 value
= qopt
->locredit
* 1024ll * 8;
345 priv
->plat
->tx_queues_cfg
[queue
].low_credit
= value
& GENMASK(31, 0);
347 ret
= stmmac_config_cbs(priv
, priv
->hw
,
348 priv
->plat
->tx_queues_cfg
[queue
].send_slope
,
349 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
,
350 priv
->plat
->tx_queues_cfg
[queue
].high_credit
,
351 priv
->plat
->tx_queues_cfg
[queue
].low_credit
,
356 dev_info(priv
->device
, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
357 queue
, qopt
->sendslope
, qopt
->idleslope
,
358 qopt
->hicredit
, qopt
->locredit
);
362 static int tc_parse_flow_actions(struct stmmac_priv
*priv
,
363 struct flow_action
*action
,
364 struct stmmac_flow_entry
*entry
,
365 struct netlink_ext_ack
*extack
)
367 struct flow_action_entry
*act
;
370 if (!flow_action_has_entries(action
))
373 if (!flow_action_basic_hw_stats_check(action
, extack
))
376 flow_action_for_each(i
, act
, action
) {
378 case FLOW_ACTION_DROP
:
379 entry
->action
|= STMMAC_FLOW_ACTION_DROP
;
386 /* Nothing to do, maybe inverse filter ? */
390 static int tc_add_basic_flow(struct stmmac_priv
*priv
,
391 struct flow_cls_offload
*cls
,
392 struct stmmac_flow_entry
*entry
)
394 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
395 struct flow_dissector
*dissector
= rule
->match
.dissector
;
396 struct flow_match_basic match
;
398 /* Nothing to do here */
399 if (!dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_BASIC
))
402 flow_rule_match_basic(rule
, &match
);
403 entry
->ip_proto
= match
.key
->ip_proto
;
407 static int tc_add_ip4_flow(struct stmmac_priv
*priv
,
408 struct flow_cls_offload
*cls
,
409 struct stmmac_flow_entry
*entry
)
411 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
412 struct flow_dissector
*dissector
= rule
->match
.dissector
;
413 bool inv
= entry
->action
& STMMAC_FLOW_ACTION_DROP
;
414 struct flow_match_ipv4_addrs match
;
418 /* Nothing to do here */
419 if (!dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_IPV4_ADDRS
))
422 flow_rule_match_ipv4_addrs(rule
, &match
);
423 hw_match
= ntohl(match
.key
->src
) & ntohl(match
.mask
->src
);
425 ret
= stmmac_config_l3_filter(priv
, priv
->hw
, entry
->idx
, true,
426 false, true, inv
, hw_match
);
431 hw_match
= ntohl(match
.key
->dst
) & ntohl(match
.mask
->dst
);
433 ret
= stmmac_config_l3_filter(priv
, priv
->hw
, entry
->idx
, true,
434 false, false, inv
, hw_match
);
442 static int tc_add_ports_flow(struct stmmac_priv
*priv
,
443 struct flow_cls_offload
*cls
,
444 struct stmmac_flow_entry
*entry
)
446 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
447 struct flow_dissector
*dissector
= rule
->match
.dissector
;
448 bool inv
= entry
->action
& STMMAC_FLOW_ACTION_DROP
;
449 struct flow_match_ports match
;
454 /* Nothing to do here */
455 if (!dissector_uses_key(dissector
, FLOW_DISSECTOR_KEY_PORTS
))
458 switch (entry
->ip_proto
) {
469 flow_rule_match_ports(rule
, &match
);
471 hw_match
= ntohs(match
.key
->src
) & ntohs(match
.mask
->src
);
473 ret
= stmmac_config_l4_filter(priv
, priv
->hw
, entry
->idx
, true,
474 is_udp
, true, inv
, hw_match
);
479 hw_match
= ntohs(match
.key
->dst
) & ntohs(match
.mask
->dst
);
481 ret
= stmmac_config_l4_filter(priv
, priv
->hw
, entry
->idx
, true,
482 is_udp
, false, inv
, hw_match
);
491 static struct stmmac_flow_entry
*tc_find_flow(struct stmmac_priv
*priv
,
492 struct flow_cls_offload
*cls
,
497 for (i
= 0; i
< priv
->flow_entries_max
; i
++) {
498 struct stmmac_flow_entry
*entry
= &priv
->flow_entries
[i
];
500 if (entry
->cookie
== cls
->cookie
)
502 if (get_free
&& (entry
->in_use
== false))
510 int (*fn
)(struct stmmac_priv
*priv
, struct flow_cls_offload
*cls
,
511 struct stmmac_flow_entry
*entry
);
512 } tc_flow_parsers
[] = {
513 { .fn
= tc_add_basic_flow
},
514 { .fn
= tc_add_ip4_flow
},
515 { .fn
= tc_add_ports_flow
},
518 static int tc_add_flow(struct stmmac_priv
*priv
,
519 struct flow_cls_offload
*cls
)
521 struct stmmac_flow_entry
*entry
= tc_find_flow(priv
, cls
, false);
522 struct flow_rule
*rule
= flow_cls_offload_flow_rule(cls
);
526 entry
= tc_find_flow(priv
, cls
, true);
531 ret
= tc_parse_flow_actions(priv
, &rule
->action
, entry
,
536 for (i
= 0; i
< ARRAY_SIZE(tc_flow_parsers
); i
++) {
537 ret
= tc_flow_parsers
[i
].fn(priv
, cls
, entry
);
539 entry
->in_use
= true;
547 entry
->cookie
= cls
->cookie
;
551 static int tc_del_flow(struct stmmac_priv
*priv
,
552 struct flow_cls_offload
*cls
)
554 struct stmmac_flow_entry
*entry
= tc_find_flow(priv
, cls
, false);
557 if (!entry
|| !entry
->in_use
)
561 ret
= stmmac_config_l4_filter(priv
, priv
->hw
, entry
->idx
, false,
562 false, false, false, 0);
564 ret
= stmmac_config_l3_filter(priv
, priv
->hw
, entry
->idx
, false,
565 false, false, false, 0);
568 entry
->in_use
= false;
570 entry
->is_l4
= false;
574 static int tc_setup_cls(struct stmmac_priv
*priv
,
575 struct flow_cls_offload
*cls
)
579 /* When RSS is enabled, the filtering will be bypassed */
580 if (priv
->rss
.enable
)
583 switch (cls
->command
) {
584 case FLOW_CLS_REPLACE
:
585 ret
= tc_add_flow(priv
, cls
);
587 case FLOW_CLS_DESTROY
:
588 ret
= tc_del_flow(priv
, cls
);
597 static int tc_setup_taprio(struct stmmac_priv
*priv
,
598 struct tc_taprio_qopt_offload
*qopt
)
600 u32 size
, wid
= priv
->dma_cap
.estwid
, dep
= priv
->dma_cap
.estdep
;
601 struct plat_stmmacenet_data
*plat
= priv
->plat
;
602 struct timespec64 time
;
607 if (!priv
->dma_cap
.estsel
)
646 if (qopt
->num_entries
>= dep
)
648 if (!qopt
->base_time
)
650 if (!qopt
->cycle_time
)
654 plat
->est
= devm_kzalloc(priv
->device
, sizeof(*plat
->est
),
659 memset(plat
->est
, 0, sizeof(*plat
->est
));
662 size
= qopt
->num_entries
;
664 priv
->plat
->est
->gcl_size
= size
;
665 priv
->plat
->est
->enable
= qopt
->enable
;
667 for (i
= 0; i
< size
; i
++) {
668 s64 delta_ns
= qopt
->entries
[i
].interval
;
669 u32 gates
= qopt
->entries
[i
].gate_mask
;
671 if (delta_ns
> GENMASK(wid
, 0))
673 if (gates
> GENMASK(31 - wid
, 0))
676 switch (qopt
->entries
[i
].command
) {
677 case TC_TAPRIO_CMD_SET_GATES
:
681 case TC_TAPRIO_CMD_SET_AND_HOLD
:
685 case TC_TAPRIO_CMD_SET_AND_RELEASE
:
693 priv
->plat
->est
->gcl
[i
] = delta_ns
| (gates
<< wid
);
696 /* Adjust for real system time */
697 time
= ktime_to_timespec64(qopt
->base_time
);
698 priv
->plat
->est
->btr
[0] = (u32
)time
.tv_nsec
;
699 priv
->plat
->est
->btr
[1] = (u32
)time
.tv_sec
;
701 ctr
= qopt
->cycle_time
;
702 priv
->plat
->est
->ctr
[0] = do_div(ctr
, NSEC_PER_SEC
);
703 priv
->plat
->est
->ctr
[1] = (u32
)ctr
;
705 if (fpe
&& !priv
->dma_cap
.fpesel
)
708 ret
= stmmac_fpe_configure(priv
, priv
->ioaddr
,
709 priv
->plat
->tx_queues_to_use
,
710 priv
->plat
->rx_queues_to_use
, fpe
);
712 netdev_err(priv
->dev
, "failed to enable Frame Preemption\n");
716 ret
= stmmac_est_configure(priv
, priv
->ioaddr
, priv
->plat
->est
,
717 priv
->plat
->clk_ptp_rate
);
719 netdev_err(priv
->dev
, "failed to configure EST\n");
723 netdev_info(priv
->dev
, "configured EST\n");
727 priv
->plat
->est
->enable
= false;
728 stmmac_est_configure(priv
, priv
->ioaddr
, priv
->plat
->est
,
729 priv
->plat
->clk_ptp_rate
);
733 static int tc_setup_etf(struct stmmac_priv
*priv
,
734 struct tc_etf_qopt_offload
*qopt
)
736 if (!priv
->dma_cap
.tbssel
)
738 if (qopt
->queue
>= priv
->plat
->tx_queues_to_use
)
740 if (!(priv
->tx_queue
[qopt
->queue
].tbs
& STMMAC_TBS_AVAIL
))
744 priv
->tx_queue
[qopt
->queue
].tbs
|= STMMAC_TBS_EN
;
746 priv
->tx_queue
[qopt
->queue
].tbs
&= ~STMMAC_TBS_EN
;
748 netdev_info(priv
->dev
, "%s ETF for Queue %d\n",
749 qopt
->enable
? "enabled" : "disabled", qopt
->queue
);
753 const struct stmmac_tc_ops dwmac510_tc_ops
= {
755 .setup_cls_u32
= tc_setup_cls_u32
,
756 .setup_cbs
= tc_setup_cbs
,
757 .setup_cls
= tc_setup_cls
,
758 .setup_taprio
= tc_setup_taprio
,
759 .setup_etf
= tc_setup_etf
,