1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
7 #include <linux/net_tstamp.h>
8 #include <linux/nospec.h>
10 #include "dpni.h" /* DPNI_LINK_OPT_* */
11 #include "dpaa2-eth.h"
13 /* To be kept in sync with DPNI statistics */
14 static char dpaa2_ethtool_stats
[][ETH_GSTRING_LEN
] = {
17 "[hw] rx mcast frames",
18 "[hw] rx mcast bytes",
19 "[hw] rx bcast frames",
20 "[hw] rx bcast bytes",
23 "[hw] tx mcast frames",
24 "[hw] tx mcast bytes",
25 "[hw] tx bcast frames",
26 "[hw] tx bcast bytes",
27 "[hw] rx filtered frames",
28 "[hw] rx discarded frames",
29 "[hw] rx nobuffer discards",
30 "[hw] tx discarded frames",
31 "[hw] tx confirmed frames",
32 "[hw] tx dequeued bytes",
33 "[hw] tx dequeued frames",
34 "[hw] tx rejected bytes",
35 "[hw] tx rejected frames",
36 "[hw] tx pending frames",
39 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
41 static char dpaa2_ethtool_extras
[][ETH_GSTRING_LEN
] = {
43 "[drv] tx conf frames",
44 "[drv] tx conf bytes",
49 "[drv] tx converted sg frames",
50 "[drv] tx converted sg bytes",
51 "[drv] enqueue portal busy",
53 "[drv] dequeue portal busy",
54 "[drv] channel pull errors",
58 "[drv] xdp tx errors",
61 "[qbman] rx pending frames",
62 "[qbman] rx pending bytes",
63 "[qbman] tx conf pending frames",
64 "[qbman] tx conf pending bytes",
65 "[qbman] buffer count",
68 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
70 static void dpaa2_eth_get_drvinfo(struct net_device
*net_dev
,
71 struct ethtool_drvinfo
*drvinfo
)
73 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
75 strlcpy(drvinfo
->driver
, KBUILD_MODNAME
, sizeof(drvinfo
->driver
));
77 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
78 "%u.%u", priv
->dpni_ver_major
, priv
->dpni_ver_minor
);
80 strlcpy(drvinfo
->bus_info
, dev_name(net_dev
->dev
.parent
->parent
),
81 sizeof(drvinfo
->bus_info
));
84 static int dpaa2_eth_nway_reset(struct net_device
*net_dev
)
86 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
89 return phylink_ethtool_nway_reset(priv
->mac
->phylink
);
95 dpaa2_eth_get_link_ksettings(struct net_device
*net_dev
,
96 struct ethtool_link_ksettings
*link_settings
)
98 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
101 return phylink_ethtool_ksettings_get(priv
->mac
->phylink
,
104 link_settings
->base
.autoneg
= AUTONEG_DISABLE
;
105 if (!(priv
->link_state
.options
& DPNI_LINK_OPT_HALF_DUPLEX
))
106 link_settings
->base
.duplex
= DUPLEX_FULL
;
107 link_settings
->base
.speed
= priv
->link_state
.rate
;
113 dpaa2_eth_set_link_ksettings(struct net_device
*net_dev
,
114 const struct ethtool_link_ksettings
*link_settings
)
116 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
121 return phylink_ethtool_ksettings_set(priv
->mac
->phylink
, link_settings
);
124 static void dpaa2_eth_get_pauseparam(struct net_device
*net_dev
,
125 struct ethtool_pauseparam
*pause
)
127 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
128 u64 link_options
= priv
->link_state
.options
;
131 phylink_ethtool_get_pauseparam(priv
->mac
->phylink
, pause
);
135 pause
->rx_pause
= dpaa2_eth_rx_pause_enabled(link_options
);
136 pause
->tx_pause
= dpaa2_eth_tx_pause_enabled(link_options
);
137 pause
->autoneg
= AUTONEG_DISABLE
;
140 static int dpaa2_eth_set_pauseparam(struct net_device
*net_dev
,
141 struct ethtool_pauseparam
*pause
)
143 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
144 struct dpni_link_cfg cfg
= {0};
147 if (!dpaa2_eth_has_pause_support(priv
)) {
148 netdev_info(net_dev
, "No pause frame support for DPNI version < %d.%d\n",
149 DPNI_PAUSE_VER_MAJOR
, DPNI_PAUSE_VER_MINOR
);
154 return phylink_ethtool_set_pauseparam(priv
->mac
->phylink
,
159 cfg
.rate
= priv
->link_state
.rate
;
160 cfg
.options
= priv
->link_state
.options
;
162 cfg
.options
|= DPNI_LINK_OPT_PAUSE
;
164 cfg
.options
&= ~DPNI_LINK_OPT_PAUSE
;
165 if (!!pause
->rx_pause
^ !!pause
->tx_pause
)
166 cfg
.options
|= DPNI_LINK_OPT_ASYM_PAUSE
;
168 cfg
.options
&= ~DPNI_LINK_OPT_ASYM_PAUSE
;
170 if (cfg
.options
== priv
->link_state
.options
)
173 err
= dpni_set_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &cfg
);
175 netdev_err(net_dev
, "dpni_set_link_state failed\n");
179 priv
->link_state
.options
= cfg
.options
;
184 static void dpaa2_eth_get_strings(struct net_device
*netdev
, u32 stringset
,
187 struct dpaa2_eth_priv
*priv
= netdev_priv(netdev
);
193 for (i
= 0; i
< DPAA2_ETH_NUM_STATS
; i
++) {
194 strlcpy(p
, dpaa2_ethtool_stats
[i
], ETH_GSTRING_LEN
);
195 p
+= ETH_GSTRING_LEN
;
197 for (i
= 0; i
< DPAA2_ETH_NUM_EXTRA_STATS
; i
++) {
198 strlcpy(p
, dpaa2_ethtool_extras
[i
], ETH_GSTRING_LEN
);
199 p
+= ETH_GSTRING_LEN
;
202 dpaa2_mac_get_strings(p
);
207 static int dpaa2_eth_get_sset_count(struct net_device
*net_dev
, int sset
)
209 int num_ss_stats
= DPAA2_ETH_NUM_STATS
+ DPAA2_ETH_NUM_EXTRA_STATS
;
210 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
213 case ETH_SS_STATS
: /* ethtool_get_stats(), ethtool_get_drvinfo() */
215 num_ss_stats
+= dpaa2_mac_get_sset_count();
222 /** Fill in hardware counters, as returned by MC.
224 static void dpaa2_eth_get_ethtool_stats(struct net_device
*net_dev
,
225 struct ethtool_stats
*stats
,
231 union dpni_statistics dpni_stats
;
233 u32 fcnt_rx_total
= 0, fcnt_tx_total
= 0;
234 u32 bcnt_rx_total
= 0, bcnt_tx_total
= 0;
236 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
237 struct dpaa2_eth_drv_stats
*extras
;
238 struct dpaa2_eth_ch_stats
*ch_stats
;
239 int dpni_stats_page_size
[DPNI_STATISTICS_CNT
] = {
240 sizeof(dpni_stats
.page_0
),
241 sizeof(dpni_stats
.page_1
),
242 sizeof(dpni_stats
.page_2
),
243 sizeof(dpni_stats
.page_3
),
244 sizeof(dpni_stats
.page_4
),
245 sizeof(dpni_stats
.page_5
),
246 sizeof(dpni_stats
.page_6
),
250 sizeof(u64
) * (DPAA2_ETH_NUM_STATS
+ DPAA2_ETH_NUM_EXTRA_STATS
));
252 /* Print standard counters, from DPNI statistics */
253 for (j
= 0; j
<= 6; j
++) {
254 /* We're not interested in pages 4 & 5 for now */
255 if (j
== 4 || j
== 5)
257 err
= dpni_get_statistics(priv
->mc_io
, 0, priv
->mc_token
,
260 /* Older firmware versions don't support all pages */
261 memset(&dpni_stats
, 0, sizeof(dpni_stats
));
263 netdev_warn(net_dev
, "dpni_get_stats(%d) failed\n", j
);
265 num_cnt
= dpni_stats_page_size
[j
] / sizeof(u64
);
266 for (k
= 0; k
< num_cnt
; k
++)
267 *(data
+ i
++) = dpni_stats
.raw
.counter
[k
];
270 /* Print per-cpu extra stats */
271 for_each_online_cpu(k
) {
272 extras
= per_cpu_ptr(priv
->percpu_extras
, k
);
273 for (j
= 0; j
< sizeof(*extras
) / sizeof(__u64
); j
++)
274 *((__u64
*)data
+ i
+ j
) += *((__u64
*)extras
+ j
);
278 /* Per-channel stats */
279 for (k
= 0; k
< priv
->num_channels
; k
++) {
280 ch_stats
= &priv
->channel
[k
]->stats
;
281 for (j
= 0; j
< sizeof(*ch_stats
) / sizeof(__u64
) - 1; j
++)
282 *((__u64
*)data
+ i
+ j
) += *((__u64
*)ch_stats
+ j
);
286 for (j
= 0; j
< priv
->num_fqs
; j
++) {
287 /* Print FQ instantaneous counts */
288 err
= dpaa2_io_query_fq_count(NULL
, priv
->fq
[j
].fqid
,
291 netdev_warn(net_dev
, "FQ query error %d", err
);
295 if (priv
->fq
[j
].type
== DPAA2_TX_CONF_FQ
) {
296 fcnt_tx_total
+= fcnt
;
297 bcnt_tx_total
+= bcnt
;
299 fcnt_rx_total
+= fcnt
;
300 bcnt_rx_total
+= bcnt
;
304 *(data
+ i
++) = fcnt_rx_total
;
305 *(data
+ i
++) = bcnt_rx_total
;
306 *(data
+ i
++) = fcnt_tx_total
;
307 *(data
+ i
++) = bcnt_tx_total
;
309 err
= dpaa2_io_query_bp_count(NULL
, priv
->bpid
, &buf_cnt
);
311 netdev_warn(net_dev
, "Buffer count query error %d\n", err
);
314 *(data
+ i
++) = buf_cnt
;
317 dpaa2_mac_get_ethtool_stats(priv
->mac
, data
+ i
);
320 static int dpaa2_eth_prep_eth_rule(struct ethhdr
*eth_value
, struct ethhdr
*eth_mask
,
321 void *key
, void *mask
, u64
*fields
)
325 if (eth_mask
->h_proto
) {
326 off
= dpaa2_eth_cls_fld_off(NET_PROT_ETH
, NH_FLD_ETH_TYPE
);
327 *(__be16
*)(key
+ off
) = eth_value
->h_proto
;
328 *(__be16
*)(mask
+ off
) = eth_mask
->h_proto
;
329 *fields
|= DPAA2_ETH_DIST_ETHTYPE
;
332 if (!is_zero_ether_addr(eth_mask
->h_source
)) {
333 off
= dpaa2_eth_cls_fld_off(NET_PROT_ETH
, NH_FLD_ETH_SA
);
334 ether_addr_copy(key
+ off
, eth_value
->h_source
);
335 ether_addr_copy(mask
+ off
, eth_mask
->h_source
);
336 *fields
|= DPAA2_ETH_DIST_ETHSRC
;
339 if (!is_zero_ether_addr(eth_mask
->h_dest
)) {
340 off
= dpaa2_eth_cls_fld_off(NET_PROT_ETH
, NH_FLD_ETH_DA
);
341 ether_addr_copy(key
+ off
, eth_value
->h_dest
);
342 ether_addr_copy(mask
+ off
, eth_mask
->h_dest
);
343 *fields
|= DPAA2_ETH_DIST_ETHDST
;
349 static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec
*uip_value
,
350 struct ethtool_usrip4_spec
*uip_mask
,
351 void *key
, void *mask
, u64
*fields
)
354 u32 tmp_value
, tmp_mask
;
356 if (uip_mask
->tos
|| uip_mask
->ip_ver
)
359 if (uip_mask
->ip4src
) {
360 off
= dpaa2_eth_cls_fld_off(NET_PROT_IP
, NH_FLD_IP_SRC
);
361 *(__be32
*)(key
+ off
) = uip_value
->ip4src
;
362 *(__be32
*)(mask
+ off
) = uip_mask
->ip4src
;
363 *fields
|= DPAA2_ETH_DIST_IPSRC
;
366 if (uip_mask
->ip4dst
) {
367 off
= dpaa2_eth_cls_fld_off(NET_PROT_IP
, NH_FLD_IP_DST
);
368 *(__be32
*)(key
+ off
) = uip_value
->ip4dst
;
369 *(__be32
*)(mask
+ off
) = uip_mask
->ip4dst
;
370 *fields
|= DPAA2_ETH_DIST_IPDST
;
373 if (uip_mask
->proto
) {
374 off
= dpaa2_eth_cls_fld_off(NET_PROT_IP
, NH_FLD_IP_PROTO
);
375 *(u8
*)(key
+ off
) = uip_value
->proto
;
376 *(u8
*)(mask
+ off
) = uip_mask
->proto
;
377 *fields
|= DPAA2_ETH_DIST_IPPROTO
;
380 if (uip_mask
->l4_4_bytes
) {
381 tmp_value
= be32_to_cpu(uip_value
->l4_4_bytes
);
382 tmp_mask
= be32_to_cpu(uip_mask
->l4_4_bytes
);
384 off
= dpaa2_eth_cls_fld_off(NET_PROT_UDP
, NH_FLD_UDP_PORT_SRC
);
385 *(__be16
*)(key
+ off
) = htons(tmp_value
>> 16);
386 *(__be16
*)(mask
+ off
) = htons(tmp_mask
>> 16);
387 *fields
|= DPAA2_ETH_DIST_L4SRC
;
389 off
= dpaa2_eth_cls_fld_off(NET_PROT_UDP
, NH_FLD_UDP_PORT_DST
);
390 *(__be16
*)(key
+ off
) = htons(tmp_value
& 0xFFFF);
391 *(__be16
*)(mask
+ off
) = htons(tmp_mask
& 0xFFFF);
392 *fields
|= DPAA2_ETH_DIST_L4DST
;
395 /* Only apply the rule for IPv4 frames */
396 off
= dpaa2_eth_cls_fld_off(NET_PROT_ETH
, NH_FLD_ETH_TYPE
);
397 *(__be16
*)(key
+ off
) = htons(ETH_P_IP
);
398 *(__be16
*)(mask
+ off
) = htons(0xFFFF);
399 *fields
|= DPAA2_ETH_DIST_ETHTYPE
;
404 static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec
*l4_value
,
405 struct ethtool_tcpip4_spec
*l4_mask
,
406 void *key
, void *mask
, u8 l4_proto
, u64
*fields
)
413 if (l4_mask
->ip4src
) {
414 off
= dpaa2_eth_cls_fld_off(NET_PROT_IP
, NH_FLD_IP_SRC
);
415 *(__be32
*)(key
+ off
) = l4_value
->ip4src
;
416 *(__be32
*)(mask
+ off
) = l4_mask
->ip4src
;
417 *fields
|= DPAA2_ETH_DIST_IPSRC
;
420 if (l4_mask
->ip4dst
) {
421 off
= dpaa2_eth_cls_fld_off(NET_PROT_IP
, NH_FLD_IP_DST
);
422 *(__be32
*)(key
+ off
) = l4_value
->ip4dst
;
423 *(__be32
*)(mask
+ off
) = l4_mask
->ip4dst
;
424 *fields
|= DPAA2_ETH_DIST_IPDST
;
428 off
= dpaa2_eth_cls_fld_off(NET_PROT_UDP
, NH_FLD_UDP_PORT_SRC
);
429 *(__be16
*)(key
+ off
) = l4_value
->psrc
;
430 *(__be16
*)(mask
+ off
) = l4_mask
->psrc
;
431 *fields
|= DPAA2_ETH_DIST_L4SRC
;
435 off
= dpaa2_eth_cls_fld_off(NET_PROT_UDP
, NH_FLD_UDP_PORT_DST
);
436 *(__be16
*)(key
+ off
) = l4_value
->pdst
;
437 *(__be16
*)(mask
+ off
) = l4_mask
->pdst
;
438 *fields
|= DPAA2_ETH_DIST_L4DST
;
441 /* Only apply the rule for IPv4 frames with the specified L4 proto */
442 off
= dpaa2_eth_cls_fld_off(NET_PROT_ETH
, NH_FLD_ETH_TYPE
);
443 *(__be16
*)(key
+ off
) = htons(ETH_P_IP
);
444 *(__be16
*)(mask
+ off
) = htons(0xFFFF);
445 *fields
|= DPAA2_ETH_DIST_ETHTYPE
;
447 off
= dpaa2_eth_cls_fld_off(NET_PROT_IP
, NH_FLD_IP_PROTO
);
448 *(u8
*)(key
+ off
) = l4_proto
;
449 *(u8
*)(mask
+ off
) = 0xFF;
450 *fields
|= DPAA2_ETH_DIST_IPPROTO
;
455 static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext
*ext_value
,
456 struct ethtool_flow_ext
*ext_mask
,
457 void *key
, void *mask
, u64
*fields
)
461 if (ext_mask
->vlan_etype
)
464 if (ext_mask
->vlan_tci
) {
465 off
= dpaa2_eth_cls_fld_off(NET_PROT_VLAN
, NH_FLD_VLAN_TCI
);
466 *(__be16
*)(key
+ off
) = ext_value
->vlan_tci
;
467 *(__be16
*)(mask
+ off
) = ext_mask
->vlan_tci
;
468 *fields
|= DPAA2_ETH_DIST_VLAN
;
474 static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext
*ext_value
,
475 struct ethtool_flow_ext
*ext_mask
,
476 void *key
, void *mask
, u64
*fields
)
480 if (!is_zero_ether_addr(ext_mask
->h_dest
)) {
481 off
= dpaa2_eth_cls_fld_off(NET_PROT_ETH
, NH_FLD_ETH_DA
);
482 ether_addr_copy(key
+ off
, ext_value
->h_dest
);
483 ether_addr_copy(mask
+ off
, ext_mask
->h_dest
);
484 *fields
|= DPAA2_ETH_DIST_ETHDST
;
490 static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec
*fs
, void *key
,
491 void *mask
, u64
*fields
)
495 switch (fs
->flow_type
& 0xFF) {
497 err
= dpaa2_eth_prep_eth_rule(&fs
->h_u
.ether_spec
, &fs
->m_u
.ether_spec
,
501 err
= dpaa2_eth_prep_uip_rule(&fs
->h_u
.usr_ip4_spec
,
502 &fs
->m_u
.usr_ip4_spec
, key
, mask
, fields
);
505 err
= dpaa2_eth_prep_l4_rule(&fs
->h_u
.tcp_ip4_spec
, &fs
->m_u
.tcp_ip4_spec
,
506 key
, mask
, IPPROTO_TCP
, fields
);
509 err
= dpaa2_eth_prep_l4_rule(&fs
->h_u
.udp_ip4_spec
, &fs
->m_u
.udp_ip4_spec
,
510 key
, mask
, IPPROTO_UDP
, fields
);
513 err
= dpaa2_eth_prep_l4_rule(&fs
->h_u
.sctp_ip4_spec
,
514 &fs
->m_u
.sctp_ip4_spec
, key
, mask
,
515 IPPROTO_SCTP
, fields
);
524 if (fs
->flow_type
& FLOW_EXT
) {
525 err
= dpaa2_eth_prep_ext_rule(&fs
->h_ext
, &fs
->m_ext
, key
, mask
, fields
);
530 if (fs
->flow_type
& FLOW_MAC_EXT
) {
531 err
= dpaa2_eth_prep_mac_ext_rule(&fs
->h_ext
, &fs
->m_ext
, key
,
540 static int dpaa2_eth_do_cls_rule(struct net_device
*net_dev
,
541 struct ethtool_rx_flow_spec
*fs
,
544 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
545 struct device
*dev
= net_dev
->dev
.parent
;
546 struct dpni_rule_cfg rule_cfg
= { 0 };
547 struct dpni_fs_action_cfg fs_act
= { 0 };
553 if (fs
->ring_cookie
!= RX_CLS_FLOW_DISC
&&
554 fs
->ring_cookie
>= dpaa2_eth_queue_count(priv
))
557 rule_cfg
.key_size
= dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL
);
559 /* allocate twice the key size, for the actual key and for mask */
560 key_buf
= kzalloc(rule_cfg
.key_size
* 2, GFP_KERNEL
);
564 /* Fill the key and mask memory areas */
565 err
= dpaa2_eth_prep_cls_rule(fs
, key_buf
, key_buf
+ rule_cfg
.key_size
, &fields
);
569 if (!dpaa2_eth_fs_mask_enabled(priv
)) {
570 /* Masking allows us to configure a maximal key during init and
571 * use it for all flow steering rules. Without it, we include
572 * in the key only the fields actually used, so we need to
573 * extract the others from the final key buffer.
575 * Program the FS key if needed, or return error if previously
576 * set key can't be used for the current rule. User needs to
577 * delete existing rules in this case to allow for the new one.
579 if (!priv
->rx_cls_fields
) {
580 err
= dpaa2_eth_set_cls(net_dev
, fields
);
584 priv
->rx_cls_fields
= fields
;
585 } else if (priv
->rx_cls_fields
!= fields
) {
586 netdev_err(net_dev
, "No support for multiple FS keys, need to delete existing rules\n");
591 dpaa2_eth_cls_trim_rule(key_buf
, fields
);
592 rule_cfg
.key_size
= dpaa2_eth_cls_key_size(fields
);
595 key_iova
= dma_map_single(dev
, key_buf
, rule_cfg
.key_size
* 2,
597 if (dma_mapping_error(dev
, key_iova
)) {
602 rule_cfg
.key_iova
= key_iova
;
603 if (dpaa2_eth_fs_mask_enabled(priv
))
604 rule_cfg
.mask_iova
= key_iova
+ rule_cfg
.key_size
;
607 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
)
608 fs_act
.options
|= DPNI_FS_OPT_DISCARD
;
610 fs_act
.flow_id
= fs
->ring_cookie
;
612 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
614 err
= dpni_add_fs_entry(priv
->mc_io
, 0, priv
->mc_token
,
615 i
, fs
->location
, &rule_cfg
,
618 err
= dpni_remove_fs_entry(priv
->mc_io
, 0,
621 if (err
|| priv
->dpni_attrs
.options
& DPNI_OPT_SHARED_FS
)
625 dma_unmap_single(dev
, key_iova
, rule_cfg
.key_size
* 2, DMA_TO_DEVICE
);
633 static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv
*priv
)
637 for (i
= 0; i
< dpaa2_eth_fs_count(priv
); i
++)
638 if (priv
->cls_rules
[i
].in_use
)
644 static int dpaa2_eth_update_cls_rule(struct net_device
*net_dev
,
645 struct ethtool_rx_flow_spec
*new_fs
,
646 unsigned int location
)
648 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
649 struct dpaa2_eth_cls_rule
*rule
;
652 if (!priv
->rx_cls_enabled
)
655 if (location
>= dpaa2_eth_fs_count(priv
))
658 rule
= &priv
->cls_rules
[location
];
660 /* If a rule is present at the specified location, delete it. */
662 err
= dpaa2_eth_do_cls_rule(net_dev
, &rule
->fs
, false);
668 if (!dpaa2_eth_fs_mask_enabled(priv
) &&
669 !dpaa2_eth_num_cls_rules(priv
))
670 priv
->rx_cls_fields
= 0;
673 /* If no new entry to add, return here */
677 err
= dpaa2_eth_do_cls_rule(net_dev
, new_fs
, true);
687 static int dpaa2_eth_get_rxnfc(struct net_device
*net_dev
,
688 struct ethtool_rxnfc
*rxnfc
, u32
*rule_locs
)
690 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
691 int max_rules
= dpaa2_eth_fs_count(priv
);
694 switch (rxnfc
->cmd
) {
696 /* we purposely ignore cmd->flow_type for now, because the
697 * classifier only supports a single set of fields for all
700 rxnfc
->data
= priv
->rx_hash_fields
;
702 case ETHTOOL_GRXRINGS
:
703 rxnfc
->data
= dpaa2_eth_queue_count(priv
);
705 case ETHTOOL_GRXCLSRLCNT
:
707 rxnfc
->rule_cnt
= dpaa2_eth_num_cls_rules(priv
);
708 rxnfc
->data
= max_rules
;
710 case ETHTOOL_GRXCLSRULE
:
711 if (rxnfc
->fs
.location
>= max_rules
)
713 rxnfc
->fs
.location
= array_index_nospec(rxnfc
->fs
.location
,
715 if (!priv
->cls_rules
[rxnfc
->fs
.location
].in_use
)
717 rxnfc
->fs
= priv
->cls_rules
[rxnfc
->fs
.location
].fs
;
719 case ETHTOOL_GRXCLSRLALL
:
720 for (i
= 0; i
< max_rules
; i
++) {
721 if (!priv
->cls_rules
[i
].in_use
)
723 if (j
== rxnfc
->rule_cnt
)
728 rxnfc
->data
= max_rules
;
737 static int dpaa2_eth_set_rxnfc(struct net_device
*net_dev
,
738 struct ethtool_rxnfc
*rxnfc
)
742 switch (rxnfc
->cmd
) {
744 if ((rxnfc
->data
& DPAA2_RXH_SUPPORTED
) != rxnfc
->data
)
746 err
= dpaa2_eth_set_hash(net_dev
, rxnfc
->data
);
748 case ETHTOOL_SRXCLSRLINS
:
749 err
= dpaa2_eth_update_cls_rule(net_dev
, &rxnfc
->fs
, rxnfc
->fs
.location
);
751 case ETHTOOL_SRXCLSRLDEL
:
752 err
= dpaa2_eth_update_cls_rule(net_dev
, NULL
, rxnfc
->fs
.location
);
761 int dpaa2_phc_index
= -1;
762 EXPORT_SYMBOL(dpaa2_phc_index
);
764 static int dpaa2_eth_get_ts_info(struct net_device
*dev
,
765 struct ethtool_ts_info
*info
)
768 return ethtool_op_get_ts_info(dev
, info
);
770 info
->so_timestamping
= SOF_TIMESTAMPING_TX_HARDWARE
|
771 SOF_TIMESTAMPING_RX_HARDWARE
|
772 SOF_TIMESTAMPING_RAW_HARDWARE
;
774 info
->phc_index
= dpaa2_phc_index
;
776 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) |
777 (1 << HWTSTAMP_TX_ON
) |
778 (1 << HWTSTAMP_TX_ONESTEP_SYNC
);
780 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
781 (1 << HWTSTAMP_FILTER_ALL
);
785 const struct ethtool_ops dpaa2_ethtool_ops
= {
786 .get_drvinfo
= dpaa2_eth_get_drvinfo
,
787 .nway_reset
= dpaa2_eth_nway_reset
,
788 .get_link
= ethtool_op_get_link
,
789 .get_link_ksettings
= dpaa2_eth_get_link_ksettings
,
790 .set_link_ksettings
= dpaa2_eth_set_link_ksettings
,
791 .get_pauseparam
= dpaa2_eth_get_pauseparam
,
792 .set_pauseparam
= dpaa2_eth_set_pauseparam
,
793 .get_sset_count
= dpaa2_eth_get_sset_count
,
794 .get_ethtool_stats
= dpaa2_eth_get_ethtool_stats
,
795 .get_strings
= dpaa2_eth_get_strings
,
796 .get_rxnfc
= dpaa2_eth_get_rxnfc
,
797 .set_rxnfc
= dpaa2_eth_set_rxnfc
,
798 .get_ts_info
= dpaa2_eth_get_ts_info
,