1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
8 #include <linux/bpf_trace.h>
10 #include <linux/etherdevice.h>
11 #include <linux/if_vlan.h>
12 #include <linux/interrupt.h>
13 #include <linux/irqdomain.h>
14 #include <linux/kernel.h>
15 #include <linux/kmemleak.h>
16 #include <linux/module.h>
17 #include <linux/netdevice.h>
18 #include <linux/net_tstamp.h>
20 #include <linux/of_mdio.h>
21 #include <linux/of_net.h>
22 #include <linux/of_device.h>
23 #include <linux/of_platform.h>
24 #include <linux/phylink.h>
25 #include <linux/phy/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/regmap.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/mfd/syscon.h>
31 #include <linux/sys_soc.h>
32 #include <linux/dma/ti-cppi5.h>
33 #include <linux/dma/k3-udma-glue.h>
34 #include <net/page_pool/helpers.h>
35 #include <net/switchdev.h>
39 #include "am65-cpsw-nuss.h"
40 #include "am65-cpsw-switchdev.h"
41 #include "k3-cppi-desc-pool.h"
42 #include "am65-cpts.h"
44 #define AM65_CPSW_SS_BASE 0x0
45 #define AM65_CPSW_SGMII_BASE 0x100
46 #define AM65_CPSW_XGMII_BASE 0x2100
47 #define AM65_CPSW_CPSW_NU_BASE 0x20000
48 #define AM65_CPSW_NU_PORTS_BASE 0x1000
49 #define AM65_CPSW_NU_FRAM_BASE 0x12000
50 #define AM65_CPSW_NU_STATS_BASE 0x1a000
51 #define AM65_CPSW_NU_ALE_BASE 0x1e000
52 #define AM65_CPSW_NU_CPTS_BASE 0x1d000
54 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000
55 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
56 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
58 #define AM65_CPSW_MAX_PORTS 8
60 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
61 #define AM65_CPSW_MAX_PACKET_SIZE 2024
63 #define AM65_CPSW_REG_CTL 0x004
64 #define AM65_CPSW_REG_STAT_PORT_EN 0x014
65 #define AM65_CPSW_REG_PTYPE 0x018
67 #define AM65_CPSW_P0_REG_CTL 0x004
68 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
70 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
71 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
72 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
74 #define AM65_CPSW_PORTN_REG_CTL 0x004
75 #define AM65_CPSW_PORTN_REG_DSCP_MAP 0x120
76 #define AM65_CPSW_PORTN_REG_SA_L 0x308
77 #define AM65_CPSW_PORTN_REG_SA_H 0x30c
78 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310
79 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
80 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
81 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
83 #define AM65_CPSW_SGMII_CONTROL_REG 0x010
84 #define AM65_CPSW_SGMII_MR_ADV_ABILITY_REG 0x018
85 #define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
87 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
88 #define AM65_CPSW_CTL_P0_ENABLE BIT(2)
89 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
90 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
92 /* AM65_CPSW_P0_REG_CTL */
93 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
94 #define AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN BIT(16)
96 /* AM65_CPSW_PORT_REG_PRI_CTL */
97 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
99 /* AM65_CPSW_PN_REG_CTL */
100 #define AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN BIT(1)
101 #define AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN BIT(2)
103 /* AM65_CPSW_PN_TS_CTL register fields */
104 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
105 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
106 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
107 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
108 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
109 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
110 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
112 #define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0)
113 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1)
114 #define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2)
115 #define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3)
116 #define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9)
118 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
119 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
121 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
122 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
123 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
124 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
125 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
126 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
127 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
128 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
129 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
131 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
132 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
134 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
136 #define AM65_CPSW_TS_TX_ANX_ALL_EN \
137 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
138 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
139 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
141 #define AM65_CPSW_TS_RX_ANX_ALL_EN \
142 (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \
143 AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \
144 AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
146 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
147 /* Number of TX/RX descriptors per channel/flow */
148 #define AM65_CPSW_MAX_TX_DESC 500
149 #define AM65_CPSW_MAX_RX_DESC 500
151 #define AM65_CPSW_NAV_PS_DATA_SIZE 16
152 #define AM65_CPSW_NAV_SW_DATA_SIZE 16
154 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
155 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
156 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
158 #define AM65_CPSW_DEFAULT_TX_CHNS 8
159 #define AM65_CPSW_DEFAULT_RX_CHN_FLOWS 1
161 /* CPPI streaming packet interface */
162 #define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
163 #define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
166 #define AM65_CPSW_XDP_CONSUMED BIT(1)
167 #define AM65_CPSW_XDP_REDIRECT BIT(0)
168 #define AM65_CPSW_XDP_PASS 0
170 /* Include headroom compatible with both skb and xdpf */
171 #define AM65_CPSW_HEADROOM_NA (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
172 #define AM65_CPSW_HEADROOM ALIGN(AM65_CPSW_HEADROOM_NA, sizeof(long))
174 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port
*slave
,
177 u32 mac_hi
= (dev_addr
[0] << 0) | (dev_addr
[1] << 8) |
178 (dev_addr
[2] << 16) | (dev_addr
[3] << 24);
179 u32 mac_lo
= (dev_addr
[4] << 0) | (dev_addr
[5] << 8);
181 writel(mac_hi
, slave
->port_base
+ AM65_CPSW_PORTN_REG_SA_H
);
182 writel(mac_lo
, slave
->port_base
+ AM65_CPSW_PORTN_REG_SA_L
);
185 #define AM65_CPSW_DSCP_MAX GENMASK(5, 0)
186 #define AM65_CPSW_PRI_MAX GENMASK(2, 0)
187 #define AM65_CPSW_DSCP_PRI_PER_REG 8
188 #define AM65_CPSW_DSCP_PRI_SIZE 4 /* in bits */
189 static int am65_cpsw_port_set_dscp_map(struct am65_cpsw_port
*slave
, u8 dscp
, u8 pri
)
195 if (dscp
> AM65_CPSW_DSCP_MAX
)
198 if (pri
> AM65_CPSW_PRI_MAX
)
201 /* 32-bit register offset to this dscp */
202 reg_ofs
= (dscp
/ AM65_CPSW_DSCP_PRI_PER_REG
) * 4;
203 /* bit field offset to this dscp */
204 bit_ofs
= AM65_CPSW_DSCP_PRI_SIZE
* (dscp
% AM65_CPSW_DSCP_PRI_PER_REG
);
206 val
= readl(slave
->port_base
+ AM65_CPSW_PORTN_REG_DSCP_MAP
+ reg_ofs
);
207 val
&= ~(AM65_CPSW_PRI_MAX
<< bit_ofs
); /* clear */
208 val
|= pri
<< bit_ofs
; /* set */
209 writel(val
, slave
->port_base
+ AM65_CPSW_PORTN_REG_DSCP_MAP
+ reg_ofs
);
214 static void am65_cpsw_port_enable_dscp_map(struct am65_cpsw_port
*slave
)
219 /* Default DSCP to User Priority mapping as per:
220 * https://datatracker.ietf.org/doc/html/rfc8325#section-4.3
222 * https://datatracker.ietf.org/doc/html/rfc8622#section-11
224 for (dscp
= 0; dscp
<= AM65_CPSW_DSCP_MAX
; dscp
++) {
268 am65_cpsw_port_set_dscp_map(slave
, dscp
, pri
);
271 /* enable port IPV4 and IPV6 DSCP for this port */
272 val
= readl(slave
->port_base
+ AM65_CPSW_PORTN_REG_CTL
);
273 val
|= AM65_CPSW_PN_REG_CTL_DSCP_IPV4_EN
|
274 AM65_CPSW_PN_REG_CTL_DSCP_IPV6_EN
;
275 writel(val
, slave
->port_base
+ AM65_CPSW_PORTN_REG_CTL
);
278 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port
*port
)
280 cpsw_sl_reset(port
->slave
.mac_sl
, 100);
281 /* Max length register has to be restored after MAC SL reset */
282 writel(AM65_CPSW_MAX_PACKET_SIZE
,
283 port
->port_base
+ AM65_CPSW_PORT_REG_RX_MAXLEN
);
286 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common
*common
)
288 common
->nuss_ver
= readl(common
->ss_base
);
289 common
->cpsw_ver
= readl(common
->cpsw_base
);
290 dev_info(common
->dev
,
291 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
294 common
->port_num
+ 1,
295 common
->pdata
.quirks
);
298 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device
*ndev
,
299 __be16 proto
, u16 vid
)
301 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
302 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
303 u32 port_mask
, unreg_mcast
= 0;
306 if (!common
->is_emac_mode
)
309 if (!netif_running(ndev
) || !vid
)
312 ret
= pm_runtime_resume_and_get(common
->dev
);
316 port_mask
= BIT(port
->port_id
) | ALE_PORT_HOST
;
318 unreg_mcast
= port_mask
;
319 dev_info(common
->dev
, "Adding vlan %d to vlan filter\n", vid
);
320 ret
= cpsw_ale_vlan_add_modify(common
->ale
, vid
, port_mask
,
321 unreg_mcast
, port_mask
, 0);
323 pm_runtime_put(common
->dev
);
327 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device
*ndev
,
328 __be16 proto
, u16 vid
)
330 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
331 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
334 if (!common
->is_emac_mode
)
337 if (!netif_running(ndev
) || !vid
)
340 ret
= pm_runtime_resume_and_get(common
->dev
);
344 dev_info(common
->dev
, "Removing vlan %d from vlan filter\n", vid
);
345 ret
= cpsw_ale_del_vlan(common
->ale
, vid
,
346 BIT(port
->port_id
) | ALE_PORT_HOST
);
348 pm_runtime_put(common
->dev
);
352 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port
*port
,
355 struct am65_cpsw_common
*common
= port
->common
;
357 if (promisc
&& !common
->is_emac_mode
) {
358 dev_dbg(common
->dev
, "promisc mode requested in switch mode");
363 /* Enable promiscuous mode */
364 cpsw_ale_control_set(common
->ale
, port
->port_id
,
365 ALE_PORT_MACONLY_CAF
, 1);
366 dev_dbg(common
->dev
, "promisc enabled\n");
368 /* Disable promiscuous mode */
369 cpsw_ale_control_set(common
->ale
, port
->port_id
,
370 ALE_PORT_MACONLY_CAF
, 0);
371 dev_dbg(common
->dev
, "promisc disabled\n");
375 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device
*ndev
)
377 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
378 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
382 promisc
= !!(ndev
->flags
& IFF_PROMISC
);
383 am65_cpsw_slave_set_promisc(port
, promisc
);
388 /* Restore allmulti on vlans if necessary */
389 cpsw_ale_set_allmulti(common
->ale
,
390 ndev
->flags
& IFF_ALLMULTI
, port
->port_id
);
392 port_mask
= ALE_PORT_HOST
;
393 /* Clear all mcast from ALE */
394 cpsw_ale_flush_multicast(common
->ale
, port_mask
, -1);
396 if (!netdev_mc_empty(ndev
)) {
397 struct netdev_hw_addr
*ha
;
399 /* program multicast address list into ALE register */
400 netdev_for_each_mc_addr(ha
, ndev
) {
401 cpsw_ale_add_mcast(common
->ale
, ha
->addr
,
407 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device
*ndev
,
408 unsigned int txqueue
)
410 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
411 struct am65_cpsw_tx_chn
*tx_chn
;
412 struct netdev_queue
*netif_txq
;
413 unsigned long trans_start
;
415 netif_txq
= netdev_get_tx_queue(ndev
, txqueue
);
416 tx_chn
= &common
->tx_chns
[txqueue
];
417 trans_start
= READ_ONCE(netif_txq
->trans_start
);
419 netdev_err(ndev
, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
421 netif_tx_queue_stopped(netif_txq
),
422 jiffies_to_msecs(jiffies
- trans_start
),
423 netdev_queue_dql_avail(netif_txq
),
424 k3_cppi_desc_pool_avail(tx_chn
->desc_pool
));
426 if (netif_tx_queue_stopped(netif_txq
)) {
427 /* try recover if stopped by us */
428 txq_trans_update(netif_txq
);
429 netif_tx_wake_queue(netif_txq
);
433 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common
*common
,
434 struct page
*page
, u32 flow_idx
)
436 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
437 struct cppi5_host_desc_t
*desc_rx
;
438 struct device
*dev
= common
->dev
;
439 struct am65_cpsw_swdata
*swdata
;
443 desc_rx
= k3_cppi_desc_pool_alloc(rx_chn
->desc_pool
);
445 dev_err(dev
, "Failed to allocate RXFDQ descriptor\n");
448 desc_dma
= k3_cppi_desc_pool_virt2dma(rx_chn
->desc_pool
, desc_rx
);
450 buf_dma
= dma_map_single(rx_chn
->dma_dev
,
451 page_address(page
) + AM65_CPSW_HEADROOM
,
452 AM65_CPSW_MAX_PACKET_SIZE
, DMA_FROM_DEVICE
);
453 if (unlikely(dma_mapping_error(rx_chn
->dma_dev
, buf_dma
))) {
454 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
455 dev_err(dev
, "Failed to map rx buffer\n");
459 cppi5_hdesc_init(desc_rx
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
460 AM65_CPSW_NAV_PS_DATA_SIZE
);
461 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn
->rx_chn
, &buf_dma
);
462 cppi5_hdesc_attach_buf(desc_rx
, buf_dma
, AM65_CPSW_MAX_PACKET_SIZE
,
463 buf_dma
, AM65_CPSW_MAX_PACKET_SIZE
);
464 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
466 swdata
->flow_id
= flow_idx
;
468 return k3_udma_glue_push_rx_chn(rx_chn
->rx_chn
, flow_idx
,
472 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common
*common
)
474 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
477 /* P0 set Receive Priority Type */
478 val
= readl(host_p
->port_base
+ AM65_CPSW_PORT_REG_PRI_CTL
);
480 if (common
->pf_p0_rx_ptype_rrobin
) {
481 val
|= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN
;
482 /* Enet Ports fifos works in fixed priority mode only, so
483 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
487 val
&= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN
;
488 /* restore P0_Rx_Pri_Map */
489 pri_map
= 0x76543210;
492 writel(pri_map
, host_p
->port_base
+ AM65_CPSW_PORT_REG_RX_PRI_MAP
);
493 writel(val
, host_p
->port_base
+ AM65_CPSW_PORT_REG_PRI_CTL
);
496 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common
*common
);
497 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common
*common
);
498 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port
*port
);
499 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port
*port
);
501 static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common
*common
)
503 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
504 struct am65_cpsw_rx_flow
*flow
;
505 struct xdp_rxq_info
*rxq
;
508 for (id
= 0; id
< common
->rx_ch_num_flows
; id
++) {
509 flow
= &rx_chn
->flows
[id
];
511 for (port
= 0; port
< common
->port_num
; port
++) {
512 if (!common
->ports
[port
].ndev
)
515 rxq
= &common
->ports
[port
].xdp_rxq
[id
];
517 if (xdp_rxq_info_is_reg(rxq
))
518 xdp_rxq_info_unreg(rxq
);
521 if (flow
->page_pool
) {
522 page_pool_destroy(flow
->page_pool
);
523 flow
->page_pool
= NULL
;
528 static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common
*common
)
530 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
531 struct page_pool_params pp_params
= {
532 .flags
= PP_FLAG_DMA_MAP
,
534 .pool_size
= AM65_CPSW_MAX_RX_DESC
,
535 .nid
= dev_to_node(common
->dev
),
537 .dma_dir
= DMA_BIDIRECTIONAL
,
538 /* .napi set dynamically */
540 struct am65_cpsw_rx_flow
*flow
;
541 struct xdp_rxq_info
*rxq
;
542 struct page_pool
*pool
;
545 for (id
= 0; id
< common
->rx_ch_num_flows
; id
++) {
546 flow
= &rx_chn
->flows
[id
];
547 pp_params
.napi
= &flow
->napi_rx
;
548 pool
= page_pool_create(&pp_params
);
554 flow
->page_pool
= pool
;
556 /* using same page pool is allowed as no running rx handlers
557 * simultaneously for both ndevs
559 for (port
= 0; port
< common
->port_num
; port
++) {
560 if (!common
->ports
[port
].ndev
)
563 rxq
= &common
->ports
[port
].xdp_rxq
[id
];
565 ret
= xdp_rxq_info_reg(rxq
, common
->ports
[port
].ndev
,
566 id
, flow
->napi_rx
.napi_id
);
570 ret
= xdp_rxq_info_reg_mem_model(rxq
,
581 am65_cpsw_destroy_xdp_rxqs(common
);
585 static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool
*desc_pool
,
587 unsigned char dsize_log2
)
589 void *pool_addr
= k3_cppi_desc_pool_cpuaddr(desc_pool
);
591 return (desc
- pool_addr
) >> dsize_log2
;
594 static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn
*tx_chn
,
595 struct cppi5_host_desc_t
*desc
,
596 enum am65_cpsw_tx_buf_type buf_type
)
600 desc_idx
= am65_cpsw_nuss_desc_idx(tx_chn
->desc_pool
, desc
,
602 k3_cppi_desc_pool_desc_info_set(tx_chn
->desc_pool
, desc_idx
,
606 static enum am65_cpsw_tx_buf_type
am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn
*tx_chn
,
609 struct cppi5_host_desc_t
*desc_tx
;
612 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
, desc_dma
);
613 desc_idx
= am65_cpsw_nuss_desc_idx(tx_chn
->desc_pool
, desc_tx
,
616 return (enum am65_cpsw_tx_buf_type
)k3_cppi_desc_pool_desc_info(tx_chn
->desc_pool
,
620 static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow
*flow
,
624 page_pool_put_full_page(flow
->page_pool
, page
, allow_direct
);
627 static void am65_cpsw_nuss_rx_cleanup(void *data
, dma_addr_t desc_dma
)
629 struct am65_cpsw_rx_chn
*rx_chn
= data
;
630 struct cppi5_host_desc_t
*desc_rx
;
631 struct am65_cpsw_swdata
*swdata
;
637 desc_rx
= k3_cppi_desc_pool_dma2virt(rx_chn
->desc_pool
, desc_dma
);
638 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
640 flow_id
= swdata
->flow_id
;
641 cppi5_hdesc_get_obuf(desc_rx
, &buf_dma
, &buf_dma_len
);
642 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn
->rx_chn
, &buf_dma
);
643 dma_unmap_single(rx_chn
->dma_dev
, buf_dma
, buf_dma_len
, DMA_FROM_DEVICE
);
644 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
646 am65_cpsw_put_page(&rx_chn
->flows
[flow_id
], page
, false);
649 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn
*tx_chn
,
650 struct cppi5_host_desc_t
*desc
)
652 struct cppi5_host_desc_t
*first_desc
, *next_desc
;
653 dma_addr_t buf_dma
, next_desc_dma
;
657 next_desc
= first_desc
;
659 cppi5_hdesc_get_obuf(first_desc
, &buf_dma
, &buf_dma_len
);
660 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &buf_dma
);
662 dma_unmap_single(tx_chn
->dma_dev
, buf_dma
, buf_dma_len
, DMA_TO_DEVICE
);
664 next_desc_dma
= cppi5_hdesc_get_next_hbdesc(first_desc
);
665 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &next_desc_dma
);
666 while (next_desc_dma
) {
667 next_desc
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
,
669 cppi5_hdesc_get_obuf(next_desc
, &buf_dma
, &buf_dma_len
);
670 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &buf_dma
);
672 dma_unmap_page(tx_chn
->dma_dev
, buf_dma
, buf_dma_len
,
675 next_desc_dma
= cppi5_hdesc_get_next_hbdesc(next_desc
);
676 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &next_desc_dma
);
678 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, next_desc
);
681 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, first_desc
);
684 static void am65_cpsw_nuss_tx_cleanup(void *data
, dma_addr_t desc_dma
)
686 struct am65_cpsw_tx_chn
*tx_chn
= data
;
687 struct cppi5_host_desc_t
*desc_tx
;
691 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
, desc_dma
);
692 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
694 am65_cpsw_nuss_xmit_free(tx_chn
, desc_tx
);
696 dev_kfree_skb_any(skb
);
699 static struct sk_buff
*am65_cpsw_build_skb(void *page_addr
,
700 struct net_device
*ndev
,
705 len
+= AM65_CPSW_HEADROOM
;
707 skb
= build_skb(page_addr
, len
);
711 skb_reserve(skb
, AM65_CPSW_HEADROOM
);
717 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common
*common
)
719 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
720 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
721 struct am65_cpsw_tx_chn
*tx_chn
= common
->tx_chns
;
722 int port_idx
, i
, ret
, tx
, flow_idx
;
723 struct am65_cpsw_rx_flow
*flow
;
727 if (common
->usage_count
)
730 /* Control register */
731 writel(AM65_CPSW_CTL_P0_ENABLE
| AM65_CPSW_CTL_P0_TX_CRC_REMOVE
|
732 AM65_CPSW_CTL_VLAN_AWARE
| AM65_CPSW_CTL_P0_RX_PAD
,
733 common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
734 /* Max length register */
735 writel(AM65_CPSW_MAX_PACKET_SIZE
,
736 host_p
->port_base
+ AM65_CPSW_PORT_REG_RX_MAXLEN
);
737 /* set base flow_id */
738 writel(common
->rx_flow_id_base
,
739 host_p
->port_base
+ AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET
);
740 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN
| AM65_CPSW_P0_REG_CTL_RX_REMAP_VLAN
,
741 host_p
->port_base
+ AM65_CPSW_P0_REG_CTL
);
743 am65_cpsw_nuss_set_p0_ptype(common
);
745 /* enable statistic */
746 val
= BIT(HOST_PORT_NUM
);
747 for (port_idx
= 0; port_idx
< common
->port_num
; port_idx
++) {
748 struct am65_cpsw_port
*port
= &common
->ports
[port_idx
];
751 val
|= BIT(port
->port_id
);
753 writel(val
, common
->cpsw_base
+ AM65_CPSW_REG_STAT_PORT_EN
);
755 /* disable priority elevation */
756 writel(0, common
->cpsw_base
+ AM65_CPSW_REG_PTYPE
);
758 cpsw_ale_start(common
->ale
);
760 /* limit to one RX flow only */
761 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
762 ALE_DEFAULT_THREAD_ID
, 0);
763 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
764 ALE_DEFAULT_THREAD_ENABLE
, 1);
765 /* switch to vlan unaware mode */
766 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
, ALE_VLAN_AWARE
, 1);
767 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
768 ALE_PORT_STATE
, ALE_PORT_STATE_FORWARD
);
770 /* default vlan cfg: create mask based on enabled ports */
771 port_mask
= GENMASK(common
->port_num
, 0) &
772 ~common
->disabled_ports_mask
;
774 cpsw_ale_add_vlan(common
->ale
, 0, port_mask
,
775 port_mask
, port_mask
,
776 port_mask
& ~ALE_PORT_HOST
);
778 if (common
->is_emac_mode
)
779 am65_cpsw_init_host_port_emac(common
);
781 am65_cpsw_init_host_port_switch(common
);
783 am65_cpsw_qos_tx_p0_rate_init(common
);
785 ret
= am65_cpsw_create_xdp_rxqs(common
);
787 dev_err(common
->dev
, "Failed to create XDP rx queues\n");
791 for (flow_idx
= 0; flow_idx
< common
->rx_ch_num_flows
; flow_idx
++) {
792 flow
= &rx_chn
->flows
[flow_idx
];
793 for (i
= 0; i
< AM65_CPSW_MAX_RX_DESC
; i
++) {
794 page
= page_pool_dev_alloc_pages(flow
->page_pool
);
796 dev_err(common
->dev
, "cannot allocate page in flow %d\n",
802 ret
= am65_cpsw_nuss_rx_push(common
, page
, flow_idx
);
805 "cannot submit page to rx channel flow %d, error %d\n",
807 am65_cpsw_put_page(flow
, page
, false);
813 ret
= k3_udma_glue_enable_rx_chn(rx_chn
->rx_chn
);
815 dev_err(common
->dev
, "couldn't enable rx chn: %d\n", ret
);
819 for (i
= 0; i
< common
->rx_ch_num_flows
; i
++) {
820 napi_enable(&rx_chn
->flows
[i
].napi_rx
);
821 if (rx_chn
->flows
[i
].irq_disabled
) {
822 rx_chn
->flows
[i
].irq_disabled
= false;
823 enable_irq(rx_chn
->flows
[i
].irq
);
827 for (tx
= 0; tx
< common
->tx_ch_num
; tx
++) {
828 ret
= k3_udma_glue_enable_tx_chn(tx_chn
[tx
].tx_chn
);
830 dev_err(common
->dev
, "couldn't enable tx chn %d: %d\n",
835 napi_enable(&tx_chn
[tx
].napi_tx
);
838 dev_dbg(common
->dev
, "cpsw_nuss started\n");
843 napi_disable(&tx_chn
[tx
].napi_tx
);
844 k3_udma_glue_disable_tx_chn(tx_chn
[tx
].tx_chn
);
848 for (flow_idx
= 0; i
< common
->rx_ch_num_flows
; flow_idx
++) {
849 flow
= &rx_chn
->flows
[flow_idx
];
850 if (!flow
->irq_disabled
) {
851 disable_irq(flow
->irq
);
852 flow
->irq_disabled
= true;
854 napi_disable(&flow
->napi_rx
);
857 k3_udma_glue_disable_rx_chn(rx_chn
->rx_chn
);
860 for (i
= 0; i
< common
->rx_ch_num_flows
; i
++)
861 k3_udma_glue_reset_rx_chn(rx_chn
->rx_chn
, i
, rx_chn
,
862 am65_cpsw_nuss_rx_cleanup
, !!i
);
864 am65_cpsw_destroy_xdp_rxqs(common
);
869 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common
*common
)
871 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
872 struct am65_cpsw_tx_chn
*tx_chn
= common
->tx_chns
;
875 if (common
->usage_count
!= 1)
878 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
879 ALE_PORT_STATE
, ALE_PORT_STATE_DISABLE
);
881 /* shutdown tx channels */
882 atomic_set(&common
->tdown_cnt
, common
->tx_ch_num
);
883 /* ensure new tdown_cnt value is visible */
884 smp_mb__after_atomic();
885 reinit_completion(&common
->tdown_complete
);
887 for (i
= 0; i
< common
->tx_ch_num
; i
++)
888 k3_udma_glue_tdown_tx_chn(tx_chn
[i
].tx_chn
, false);
890 i
= wait_for_completion_timeout(&common
->tdown_complete
,
891 msecs_to_jiffies(1000));
893 dev_err(common
->dev
, "tx timeout\n");
894 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
895 napi_disable(&tx_chn
[i
].napi_tx
);
896 hrtimer_cancel(&tx_chn
[i
].tx_hrtimer
);
899 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
900 k3_udma_glue_reset_tx_chn(tx_chn
[i
].tx_chn
, &tx_chn
[i
],
901 am65_cpsw_nuss_tx_cleanup
);
902 k3_udma_glue_disable_tx_chn(tx_chn
[i
].tx_chn
);
905 reinit_completion(&common
->tdown_complete
);
906 k3_udma_glue_tdown_rx_chn(rx_chn
->rx_chn
, true);
908 if (common
->pdata
.quirks
& AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ
) {
909 i
= wait_for_completion_timeout(&common
->tdown_complete
, msecs_to_jiffies(1000));
911 dev_err(common
->dev
, "rx teardown timeout\n");
914 for (i
= common
->rx_ch_num_flows
- 1; i
>= 0; i
--) {
915 napi_disable(&rx_chn
->flows
[i
].napi_rx
);
916 hrtimer_cancel(&rx_chn
->flows
[i
].rx_hrtimer
);
917 k3_udma_glue_reset_rx_chn(rx_chn
->rx_chn
, i
, rx_chn
,
918 am65_cpsw_nuss_rx_cleanup
, !!i
);
921 k3_udma_glue_disable_rx_chn(rx_chn
->rx_chn
);
923 cpsw_ale_stop(common
->ale
);
925 writel(0, common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
926 writel(0, common
->cpsw_base
+ AM65_CPSW_REG_STAT_PORT_EN
);
928 am65_cpsw_destroy_xdp_rxqs(common
);
930 dev_dbg(common
->dev
, "cpsw_nuss stopped\n");
934 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device
*ndev
)
936 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
937 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
940 phylink_stop(port
->slave
.phylink
);
942 netif_tx_stop_all_queues(ndev
);
944 phylink_disconnect_phy(port
->slave
.phylink
);
946 ret
= am65_cpsw_nuss_common_stop(common
);
950 common
->usage_count
--;
951 pm_runtime_put(common
->dev
);
955 static int cpsw_restore_vlans(struct net_device
*vdev
, int vid
, void *arg
)
957 struct am65_cpsw_port
*port
= arg
;
962 return am65_cpsw_nuss_ndo_slave_add_vid(port
->ndev
, 0, vid
);
965 static int am65_cpsw_nuss_ndo_slave_open(struct net_device
*ndev
)
967 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
968 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
972 ret
= pm_runtime_resume_and_get(common
->dev
);
977 cpsw_sl_ctl_set(port
->slave
.mac_sl
, CPSW_SL_CTL_CMD_IDLE
);
978 cpsw_sl_wait_for_idle(port
->slave
.mac_sl
, 100);
979 cpsw_sl_ctl_reset(port
->slave
.mac_sl
);
982 cpsw_sl_reg_write(port
->slave
.mac_sl
, CPSW_SL_SOFT_RESET
, 1);
984 reg
= cpsw_sl_reg_read(port
->slave
.mac_sl
, CPSW_SL_SOFT_RESET
);
986 dev_err(common
->dev
, "soft RESET didn't complete\n");
991 /* Notify the stack of the actual queue counts. */
992 ret
= netif_set_real_num_tx_queues(ndev
, common
->tx_ch_num
);
994 dev_err(common
->dev
, "cannot set real number of tx queues\n");
998 ret
= netif_set_real_num_rx_queues(ndev
, common
->rx_ch_num_flows
);
1000 dev_err(common
->dev
, "cannot set real number of rx queues\n");
1004 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
1005 struct netdev_queue
*txq
= netdev_get_tx_queue(ndev
, i
);
1007 netdev_tx_reset_queue(txq
);
1008 txq
->tx_maxrate
= common
->tx_chns
[i
].rate_mbps
;
1011 ret
= am65_cpsw_nuss_common_open(common
);
1015 common
->usage_count
++;
1017 am65_cpsw_port_set_sl_mac(port
, ndev
->dev_addr
);
1018 am65_cpsw_port_enable_dscp_map(port
);
1020 if (common
->is_emac_mode
)
1021 am65_cpsw_init_port_emac_ale(port
);
1023 am65_cpsw_init_port_switch_ale(port
);
1025 /* mac_sl should be configured via phy-link interface */
1026 am65_cpsw_sl_ctl_reset(port
);
1028 ret
= phylink_of_phy_connect(port
->slave
.phylink
, port
->slave
.port_np
, 0);
1032 /* restore vlan configurations */
1033 vlan_for_each(ndev
, cpsw_restore_vlans
, port
);
1035 phylink_start(port
->slave
.phylink
);
1040 am65_cpsw_nuss_ndo_slave_stop(ndev
);
1044 pm_runtime_put(common
->dev
);
1048 static int am65_cpsw_xdp_tx_frame(struct net_device
*ndev
,
1049 struct am65_cpsw_tx_chn
*tx_chn
,
1050 struct xdp_frame
*xdpf
,
1051 enum am65_cpsw_tx_buf_type buf_type
)
1053 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1054 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1055 struct cppi5_host_desc_t
*host_desc
;
1056 struct netdev_queue
*netif_txq
;
1057 dma_addr_t dma_desc
, dma_buf
;
1058 u32 pkt_len
= xdpf
->len
;
1062 host_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
1063 if (unlikely(!host_desc
)) {
1064 ndev
->stats
.tx_dropped
++;
1065 return AM65_CPSW_XDP_CONSUMED
; /* drop */
1068 am65_cpsw_nuss_set_buf_type(tx_chn
, host_desc
, buf_type
);
1070 dma_buf
= dma_map_single(tx_chn
->dma_dev
, xdpf
->data
,
1071 pkt_len
, DMA_TO_DEVICE
);
1072 if (unlikely(dma_mapping_error(tx_chn
->dma_dev
, dma_buf
))) {
1073 ndev
->stats
.tx_dropped
++;
1074 ret
= AM65_CPSW_XDP_CONSUMED
; /* drop */
1078 cppi5_hdesc_init(host_desc
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
1079 AM65_CPSW_NAV_PS_DATA_SIZE
);
1080 cppi5_hdesc_set_pkttype(host_desc
, AM65_CPSW_CPPI_TX_PKT_TYPE
);
1081 cppi5_hdesc_set_pktlen(host_desc
, pkt_len
);
1082 cppi5_desc_set_pktids(&host_desc
->hdr
, 0, AM65_CPSW_CPPI_TX_FLOW_ID
);
1083 cppi5_desc_set_tags_ids(&host_desc
->hdr
, 0, port
->port_id
);
1085 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &dma_buf
);
1086 cppi5_hdesc_attach_buf(host_desc
, dma_buf
, pkt_len
, dma_buf
, pkt_len
);
1088 swdata
= cppi5_hdesc_get_swdata(host_desc
);
1091 /* Report BQL before sending the packet */
1092 netif_txq
= netdev_get_tx_queue(ndev
, tx_chn
->id
);
1093 netdev_tx_sent_queue(netif_txq
, pkt_len
);
1095 dma_desc
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
, host_desc
);
1096 if (AM65_CPSW_IS_CPSW2G(common
)) {
1097 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, host_desc
,
1100 spin_lock_bh(&tx_chn
->lock
);
1101 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, host_desc
,
1103 spin_unlock_bh(&tx_chn
->lock
);
1107 netdev_tx_completed_queue(netif_txq
, 1, pkt_len
);
1108 ndev
->stats
.tx_errors
++;
1109 ret
= AM65_CPSW_XDP_CONSUMED
; /* drop */
1116 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &dma_buf
);
1117 dma_unmap_single(tx_chn
->dma_dev
, dma_buf
, pkt_len
, DMA_TO_DEVICE
);
1119 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, host_desc
);
1123 static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow
*flow
,
1124 struct am65_cpsw_port
*port
,
1125 struct xdp_buff
*xdp
,
1128 struct am65_cpsw_common
*common
= flow
->common
;
1129 struct net_device
*ndev
= port
->ndev
;
1130 int ret
= AM65_CPSW_XDP_CONSUMED
;
1131 struct am65_cpsw_tx_chn
*tx_chn
;
1132 struct netdev_queue
*netif_txq
;
1133 struct xdp_frame
*xdpf
;
1134 struct bpf_prog
*prog
;
1139 prog
= READ_ONCE(port
->xdp_prog
);
1141 return AM65_CPSW_XDP_PASS
;
1143 act
= bpf_prog_run_xdp(prog
, xdp
);
1144 /* XDP prog might have changed packet data and boundaries */
1145 *len
= xdp
->data_end
- xdp
->data
;
1149 ret
= AM65_CPSW_XDP_PASS
;
1152 tx_chn
= &common
->tx_chns
[cpu
% AM65_CPSW_MAX_QUEUES
];
1153 netif_txq
= netdev_get_tx_queue(ndev
, tx_chn
->id
);
1155 xdpf
= xdp_convert_buff_to_frame(xdp
);
1156 if (unlikely(!xdpf
))
1159 __netif_tx_lock(netif_txq
, cpu
);
1160 err
= am65_cpsw_xdp_tx_frame(ndev
, tx_chn
, xdpf
,
1161 AM65_CPSW_TX_BUF_TYPE_XDP_TX
);
1162 __netif_tx_unlock(netif_txq
);
1166 dev_sw_netstats_tx_add(ndev
, 1, *len
);
1167 ret
= AM65_CPSW_XDP_CONSUMED
;
1170 if (unlikely(xdp_do_redirect(ndev
, xdp
, prog
)))
1173 dev_sw_netstats_rx_add(ndev
, *len
);
1174 ret
= AM65_CPSW_XDP_REDIRECT
;
1177 bpf_warn_invalid_xdp_action(ndev
, prog
, act
);
1181 trace_xdp_exception(ndev
, prog
, act
);
1184 ndev
->stats
.rx_dropped
++;
1187 page
= virt_to_head_page(xdp
->data
);
1188 am65_cpsw_put_page(flow
, page
, true);
1194 /* RX psdata[2] word format - checksum information */
1195 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
1196 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
1197 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
1198 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
1199 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
1200 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
1202 static void am65_cpsw_nuss_rx_csum(struct sk_buff
*skb
, u32 csum_info
)
1204 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
1205 * csum information provides in psdata[2] word:
1206 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
1207 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
1208 * bits - indicates IPv4/IPv6 packet
1209 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
1210 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
1211 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
1213 skb_checksum_none_assert(skb
);
1215 if (unlikely(!(skb
->dev
->features
& NETIF_F_RXCSUM
)))
1218 if ((csum_info
& (AM65_CPSW_RX_PSD_IPV6_VALID
|
1219 AM65_CPSW_RX_PSD_IPV4_VALID
)) &&
1220 !(csum_info
& AM65_CPSW_RX_PSD_CSUM_ERR
)) {
1221 /* csum for fragmented packets is unsupported */
1222 if (!(csum_info
& AM65_CPSW_RX_PSD_IS_FRAGMENT
))
1223 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1227 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow
*flow
,
1228 int cpu
, int *xdp_state
)
1230 struct am65_cpsw_rx_chn
*rx_chn
= &flow
->common
->rx_chns
;
1231 u32 buf_dma_len
, pkt_len
, port_id
= 0, csum_info
;
1232 struct am65_cpsw_common
*common
= flow
->common
;
1233 struct am65_cpsw_ndev_priv
*ndev_priv
;
1234 struct cppi5_host_desc_t
*desc_rx
;
1235 struct device
*dev
= common
->dev
;
1236 struct am65_cpsw_swdata
*swdata
;
1237 struct page
*page
, *new_page
;
1238 dma_addr_t desc_dma
, buf_dma
;
1239 struct am65_cpsw_port
*port
;
1240 struct net_device
*ndev
;
1241 u32 flow_idx
= flow
->id
;
1242 struct sk_buff
*skb
;
1243 struct xdp_buff xdp
;
1248 *xdp_state
= AM65_CPSW_XDP_PASS
;
1249 ret
= k3_udma_glue_pop_rx_chn(rx_chn
->rx_chn
, flow_idx
, &desc_dma
);
1251 if (ret
!= -ENODATA
)
1252 dev_err(dev
, "RX: pop chn fail %d\n", ret
);
1256 if (cppi5_desc_is_tdcm(desc_dma
)) {
1257 dev_dbg(dev
, "%s RX tdown flow: %u\n", __func__
, flow_idx
);
1258 if (common
->pdata
.quirks
& AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ
)
1259 complete(&common
->tdown_complete
);
1263 desc_rx
= k3_cppi_desc_pool_dma2virt(rx_chn
->desc_pool
, desc_dma
);
1264 dev_dbg(dev
, "%s flow_idx: %u desc %pad\n",
1265 __func__
, flow_idx
, &desc_dma
);
1267 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
1268 page
= swdata
->page
;
1269 page_addr
= page_address(page
);
1270 cppi5_hdesc_get_obuf(desc_rx
, &buf_dma
, &buf_dma_len
);
1271 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn
->rx_chn
, &buf_dma
);
1272 pkt_len
= cppi5_hdesc_get_pktlen(desc_rx
);
1273 cppi5_desc_get_tags_ids(&desc_rx
->hdr
, &port_id
, NULL
);
1274 dev_dbg(dev
, "%s rx port_id:%d\n", __func__
, port_id
);
1275 port
= am65_common_get_port(common
, port_id
);
1277 psdata
= cppi5_hdesc_get_psdata(desc_rx
);
1278 csum_info
= psdata
[2];
1279 dev_dbg(dev
, "%s rx csum_info:%#x\n", __func__
, csum_info
);
1281 dma_unmap_single(rx_chn
->dma_dev
, buf_dma
, buf_dma_len
, DMA_FROM_DEVICE
);
1283 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
1285 skb
= am65_cpsw_build_skb(page_addr
, ndev
,
1286 AM65_CPSW_MAX_PACKET_SIZE
);
1287 if (unlikely(!skb
)) {
1292 if (port
->xdp_prog
) {
1293 xdp_init_buff(&xdp
, PAGE_SIZE
, &port
->xdp_rxq
[flow
->id
]);
1294 xdp_prepare_buff(&xdp
, page_addr
, AM65_CPSW_HEADROOM
,
1296 *xdp_state
= am65_cpsw_run_xdp(flow
, port
, &xdp
,
1298 if (*xdp_state
!= AM65_CPSW_XDP_PASS
)
1301 /* Compute additional headroom to be reserved */
1302 headroom
= (xdp
.data
- xdp
.data_hard_start
) - skb_headroom(skb
);
1303 skb_reserve(skb
, headroom
);
1306 ndev_priv
= netdev_priv(ndev
);
1307 am65_cpsw_nuss_set_offload_fwd_mark(skb
, ndev_priv
->offload_fwd_mark
);
1308 skb_put(skb
, pkt_len
);
1309 if (port
->rx_ts_enabled
)
1310 am65_cpts_rx_timestamp(common
->cpts
, skb
);
1311 skb_mark_for_recycle(skb
);
1312 skb
->protocol
= eth_type_trans(skb
, ndev
);
1313 am65_cpsw_nuss_rx_csum(skb
, csum_info
);
1314 napi_gro_receive(&flow
->napi_rx
, skb
);
1316 dev_sw_netstats_rx_add(ndev
, pkt_len
);
1319 new_page
= page_pool_dev_alloc_pages(flow
->page_pool
);
1320 if (unlikely(!new_page
)) {
1321 dev_err(dev
, "page alloc failed\n");
1325 if (netif_dormant(ndev
)) {
1326 am65_cpsw_put_page(flow
, new_page
, true);
1327 ndev
->stats
.rx_dropped
++;
1332 ret
= am65_cpsw_nuss_rx_push(common
, new_page
, flow_idx
);
1333 if (WARN_ON(ret
< 0)) {
1334 am65_cpsw_put_page(flow
, new_page
, true);
1335 ndev
->stats
.rx_errors
++;
1336 ndev
->stats
.rx_dropped
++;
1342 static enum hrtimer_restart
am65_cpsw_nuss_rx_timer_callback(struct hrtimer
*timer
)
1344 struct am65_cpsw_rx_flow
*flow
= container_of(timer
,
1345 struct am65_cpsw_rx_flow
,
1348 enable_irq(flow
->irq
);
1349 return HRTIMER_NORESTART
;
1352 static int am65_cpsw_nuss_rx_poll(struct napi_struct
*napi_rx
, int budget
)
1354 struct am65_cpsw_rx_flow
*flow
= am65_cpsw_napi_to_rx_flow(napi_rx
);
1355 struct am65_cpsw_common
*common
= flow
->common
;
1356 int cpu
= smp_processor_id();
1357 int xdp_state_or
= 0;
1358 int cur_budget
, ret
;
1362 /* process only this flow */
1363 cur_budget
= budget
;
1364 while (cur_budget
--) {
1365 ret
= am65_cpsw_nuss_rx_packets(flow
, cpu
, &xdp_state
);
1366 xdp_state_or
|= xdp_state
;
1372 if (xdp_state_or
& AM65_CPSW_XDP_REDIRECT
)
1375 dev_dbg(common
->dev
, "%s num_rx:%d %d\n", __func__
, num_rx
, budget
);
1377 if (num_rx
< budget
&& napi_complete_done(napi_rx
, num_rx
)) {
1378 if (flow
->irq_disabled
) {
1379 flow
->irq_disabled
= false;
1380 if (unlikely(flow
->rx_pace_timeout
)) {
1381 hrtimer_start(&flow
->rx_hrtimer
,
1382 ns_to_ktime(flow
->rx_pace_timeout
),
1383 HRTIMER_MODE_REL_PINNED
);
1385 enable_irq(flow
->irq
);
1393 static struct sk_buff
*
1394 am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn
*tx_chn
,
1395 dma_addr_t desc_dma
)
1397 struct cppi5_host_desc_t
*desc_tx
;
1398 struct sk_buff
*skb
;
1401 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
,
1403 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
1405 am65_cpsw_nuss_xmit_free(tx_chn
, desc_tx
);
1407 am65_cpts_tx_timestamp(tx_chn
->common
->cpts
, skb
);
1409 dev_sw_netstats_tx_add(skb
->dev
, 1, skb
->len
);
1414 static struct xdp_frame
*
1415 am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common
*common
,
1416 struct am65_cpsw_tx_chn
*tx_chn
,
1417 dma_addr_t desc_dma
,
1418 struct net_device
**ndev
)
1420 struct cppi5_host_desc_t
*desc_tx
;
1421 struct am65_cpsw_port
*port
;
1422 struct xdp_frame
*xdpf
;
1426 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
, desc_dma
);
1427 cppi5_desc_get_tags_ids(&desc_tx
->hdr
, NULL
, &port_id
);
1428 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
1430 am65_cpsw_nuss_xmit_free(tx_chn
, desc_tx
);
1432 port
= am65_common_get_port(common
, port_id
);
1433 dev_sw_netstats_tx_add(port
->ndev
, 1, xdpf
->len
);
1439 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn
*tx_chn
, struct net_device
*ndev
,
1440 struct netdev_queue
*netif_txq
)
1442 if (netif_tx_queue_stopped(netif_txq
)) {
1443 /* Check whether the queue is stopped due to stalled
1444 * tx dma, if the queue is stopped then wake the queue
1445 * as we have free desc for tx
1447 __netif_tx_lock(netif_txq
, smp_processor_id());
1448 if (netif_running(ndev
) &&
1449 (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) >= MAX_SKB_FRAGS
))
1450 netif_tx_wake_queue(netif_txq
);
1452 __netif_tx_unlock(netif_txq
);
1456 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common
*common
,
1457 int chn
, unsigned int budget
, bool *tdown
)
1459 enum am65_cpsw_tx_buf_type buf_type
;
1460 struct device
*dev
= common
->dev
;
1461 struct am65_cpsw_tx_chn
*tx_chn
;
1462 struct netdev_queue
*netif_txq
;
1463 unsigned int total_bytes
= 0;
1464 struct net_device
*ndev
;
1465 struct xdp_frame
*xdpf
;
1466 struct sk_buff
*skb
;
1467 dma_addr_t desc_dma
;
1468 int res
, num_tx
= 0;
1470 tx_chn
= &common
->tx_chns
[chn
];
1473 spin_lock(&tx_chn
->lock
);
1474 res
= k3_udma_glue_pop_tx_chn(tx_chn
->tx_chn
, &desc_dma
);
1475 spin_unlock(&tx_chn
->lock
);
1476 if (res
== -ENODATA
)
1479 if (cppi5_desc_is_tdcm(desc_dma
)) {
1480 if (atomic_dec_and_test(&common
->tdown_cnt
))
1481 complete(&common
->tdown_complete
);
1486 buf_type
= am65_cpsw_nuss_buf_type(tx_chn
, desc_dma
);
1487 if (buf_type
== AM65_CPSW_TX_BUF_TYPE_SKB
) {
1488 skb
= am65_cpsw_nuss_tx_compl_packet_skb(tx_chn
, desc_dma
);
1490 total_bytes
= skb
->len
;
1491 napi_consume_skb(skb
, budget
);
1493 xdpf
= am65_cpsw_nuss_tx_compl_packet_xdp(common
, tx_chn
,
1495 total_bytes
= xdpf
->len
;
1496 if (buf_type
== AM65_CPSW_TX_BUF_TYPE_XDP_TX
)
1497 xdp_return_frame_rx_napi(xdpf
);
1499 xdp_return_frame(xdpf
);
1503 netif_txq
= netdev_get_tx_queue(ndev
, chn
);
1505 netdev_tx_completed_queue(netif_txq
, num_tx
, total_bytes
);
1507 am65_cpsw_nuss_tx_wake(tx_chn
, ndev
, netif_txq
);
1510 dev_dbg(dev
, "%s:%u pkt:%d\n", __func__
, chn
, num_tx
);
1515 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common
*common
,
1516 int chn
, unsigned int budget
, bool *tdown
)
1518 enum am65_cpsw_tx_buf_type buf_type
;
1519 struct device
*dev
= common
->dev
;
1520 struct am65_cpsw_tx_chn
*tx_chn
;
1521 struct netdev_queue
*netif_txq
;
1522 unsigned int total_bytes
= 0;
1523 struct net_device
*ndev
;
1524 struct xdp_frame
*xdpf
;
1525 struct sk_buff
*skb
;
1526 dma_addr_t desc_dma
;
1527 int res
, num_tx
= 0;
1529 tx_chn
= &common
->tx_chns
[chn
];
1532 res
= k3_udma_glue_pop_tx_chn(tx_chn
->tx_chn
, &desc_dma
);
1533 if (res
== -ENODATA
)
1536 if (cppi5_desc_is_tdcm(desc_dma
)) {
1537 if (atomic_dec_and_test(&common
->tdown_cnt
))
1538 complete(&common
->tdown_complete
);
1543 buf_type
= am65_cpsw_nuss_buf_type(tx_chn
, desc_dma
);
1544 if (buf_type
== AM65_CPSW_TX_BUF_TYPE_SKB
) {
1545 skb
= am65_cpsw_nuss_tx_compl_packet_skb(tx_chn
, desc_dma
);
1547 total_bytes
+= skb
->len
;
1548 napi_consume_skb(skb
, budget
);
1550 xdpf
= am65_cpsw_nuss_tx_compl_packet_xdp(common
, tx_chn
,
1552 total_bytes
+= xdpf
->len
;
1553 if (buf_type
== AM65_CPSW_TX_BUF_TYPE_XDP_TX
)
1554 xdp_return_frame_rx_napi(xdpf
);
1556 xdp_return_frame(xdpf
);
1564 netif_txq
= netdev_get_tx_queue(ndev
, chn
);
1566 netdev_tx_completed_queue(netif_txq
, num_tx
, total_bytes
);
1568 am65_cpsw_nuss_tx_wake(tx_chn
, ndev
, netif_txq
);
1570 dev_dbg(dev
, "%s:%u pkt:%d\n", __func__
, chn
, num_tx
);
1575 static enum hrtimer_restart
am65_cpsw_nuss_tx_timer_callback(struct hrtimer
*timer
)
1577 struct am65_cpsw_tx_chn
*tx_chns
=
1578 container_of(timer
, struct am65_cpsw_tx_chn
, tx_hrtimer
);
1580 enable_irq(tx_chns
->irq
);
1581 return HRTIMER_NORESTART
;
1584 static int am65_cpsw_nuss_tx_poll(struct napi_struct
*napi_tx
, int budget
)
1586 struct am65_cpsw_tx_chn
*tx_chn
= am65_cpsw_napi_to_tx_chn(napi_tx
);
1590 if (AM65_CPSW_IS_CPSW2G(tx_chn
->common
))
1591 num_tx
= am65_cpsw_nuss_tx_compl_packets_2g(tx_chn
->common
, tx_chn
->id
,
1594 num_tx
= am65_cpsw_nuss_tx_compl_packets(tx_chn
->common
,
1595 tx_chn
->id
, budget
, &tdown
);
1597 if (num_tx
>= budget
)
1600 if (napi_complete_done(napi_tx
, num_tx
)) {
1601 if (unlikely(tx_chn
->tx_pace_timeout
&& !tdown
)) {
1602 hrtimer_start(&tx_chn
->tx_hrtimer
,
1603 ns_to_ktime(tx_chn
->tx_pace_timeout
),
1604 HRTIMER_MODE_REL_PINNED
);
1606 enable_irq(tx_chn
->irq
);
1613 static irqreturn_t
am65_cpsw_nuss_rx_irq(int irq
, void *dev_id
)
1615 struct am65_cpsw_rx_flow
*flow
= dev_id
;
1617 flow
->irq_disabled
= true;
1618 disable_irq_nosync(irq
);
1619 napi_schedule(&flow
->napi_rx
);
1624 static irqreturn_t
am65_cpsw_nuss_tx_irq(int irq
, void *dev_id
)
1626 struct am65_cpsw_tx_chn
*tx_chn
= dev_id
;
1628 disable_irq_nosync(irq
);
1629 napi_schedule(&tx_chn
->napi_tx
);
1634 static netdev_tx_t
am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff
*skb
,
1635 struct net_device
*ndev
)
1637 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1638 struct cppi5_host_desc_t
*first_desc
, *next_desc
, *cur_desc
;
1639 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1640 struct device
*dev
= common
->dev
;
1641 struct am65_cpsw_tx_chn
*tx_chn
;
1642 struct netdev_queue
*netif_txq
;
1643 dma_addr_t desc_dma
, buf_dma
;
1649 /* padding enabled in hw */
1650 pkt_len
= skb_headlen(skb
);
1652 /* SKB TX timestamp */
1653 if (port
->tx_ts_enabled
)
1654 am65_cpts_prep_tx_timestamp(common
->cpts
, skb
);
1656 q_idx
= skb_get_queue_mapping(skb
);
1657 dev_dbg(dev
, "%s skb_queue:%d\n", __func__
, q_idx
);
1659 tx_chn
= &common
->tx_chns
[q_idx
];
1660 netif_txq
= netdev_get_tx_queue(ndev
, q_idx
);
1662 /* Map the linear buffer */
1663 buf_dma
= dma_map_single(tx_chn
->dma_dev
, skb
->data
, pkt_len
,
1665 if (unlikely(dma_mapping_error(tx_chn
->dma_dev
, buf_dma
))) {
1666 dev_err(dev
, "Failed to map tx skb buffer\n");
1667 ndev
->stats
.tx_errors
++;
1671 first_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
1673 dev_dbg(dev
, "Failed to allocate descriptor\n");
1674 dma_unmap_single(tx_chn
->dma_dev
, buf_dma
, pkt_len
,
1679 am65_cpsw_nuss_set_buf_type(tx_chn
, first_desc
,
1680 AM65_CPSW_TX_BUF_TYPE_SKB
);
1682 cppi5_hdesc_init(first_desc
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
1683 AM65_CPSW_NAV_PS_DATA_SIZE
);
1684 cppi5_desc_set_pktids(&first_desc
->hdr
, 0, AM65_CPSW_CPPI_TX_FLOW_ID
);
1685 cppi5_hdesc_set_pkttype(first_desc
, AM65_CPSW_CPPI_TX_PKT_TYPE
);
1686 cppi5_desc_set_tags_ids(&first_desc
->hdr
, 0, port
->port_id
);
1688 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &buf_dma
);
1689 cppi5_hdesc_attach_buf(first_desc
, buf_dma
, pkt_len
, buf_dma
, pkt_len
);
1690 swdata
= cppi5_hdesc_get_swdata(first_desc
);
1692 psdata
= cppi5_hdesc_get_psdata(first_desc
);
1694 /* HW csum offload if enabled */
1696 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1697 unsigned int cs_start
, cs_offset
;
1699 cs_start
= skb_transport_offset(skb
);
1700 cs_offset
= cs_start
+ skb
->csum_offset
;
1701 /* HW numerates bytes starting from 1 */
1702 psdata
[2] = ((cs_offset
+ 1) << 24) |
1703 ((cs_start
+ 1) << 16) | (skb
->len
- cs_start
);
1704 dev_dbg(dev
, "%s tx psdata:%#x\n", __func__
, psdata
[2]);
1707 if (!skb_is_nonlinear(skb
))
1710 dev_dbg(dev
, "fragmented SKB\n");
1712 /* Handle the case where skb is fragmented in pages */
1713 cur_desc
= first_desc
;
1714 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1715 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1716 u32 frag_size
= skb_frag_size(frag
);
1718 next_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
1720 dev_err(dev
, "Failed to allocate descriptor\n");
1721 goto busy_free_descs
;
1724 am65_cpsw_nuss_set_buf_type(tx_chn
, next_desc
,
1725 AM65_CPSW_TX_BUF_TYPE_SKB
);
1727 buf_dma
= skb_frag_dma_map(tx_chn
->dma_dev
, frag
, 0, frag_size
,
1729 if (unlikely(dma_mapping_error(tx_chn
->dma_dev
, buf_dma
))) {
1730 dev_err(dev
, "Failed to map tx skb page\n");
1731 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, next_desc
);
1732 ndev
->stats
.tx_errors
++;
1733 goto err_free_descs
;
1736 cppi5_hdesc_reset_hbdesc(next_desc
);
1737 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &buf_dma
);
1738 cppi5_hdesc_attach_buf(next_desc
,
1739 buf_dma
, frag_size
, buf_dma
, frag_size
);
1741 desc_dma
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
,
1743 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &desc_dma
);
1744 cppi5_hdesc_link_hbdesc(cur_desc
, desc_dma
);
1746 pkt_len
+= frag_size
;
1747 cur_desc
= next_desc
;
1749 WARN_ON(pkt_len
!= skb
->len
);
1752 skb_tx_timestamp(skb
);
1754 /* report bql before sending packet */
1755 netdev_tx_sent_queue(netif_txq
, pkt_len
);
1757 cppi5_hdesc_set_pktlen(first_desc
, pkt_len
);
1758 desc_dma
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
, first_desc
);
1759 if (AM65_CPSW_IS_CPSW2G(common
)) {
1760 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, first_desc
, desc_dma
);
1762 spin_lock_bh(&tx_chn
->lock
);
1763 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, first_desc
, desc_dma
);
1764 spin_unlock_bh(&tx_chn
->lock
);
1767 dev_err(dev
, "can't push desc %d\n", ret
);
1769 netdev_tx_completed_queue(netif_txq
, 1, pkt_len
);
1770 ndev
->stats
.tx_errors
++;
1771 goto err_free_descs
;
1774 if (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) < MAX_SKB_FRAGS
) {
1775 netif_tx_stop_queue(netif_txq
);
1776 /* Barrier, so that stop_queue visible to other cpus */
1777 smp_mb__after_atomic();
1778 dev_dbg(dev
, "netif_tx_stop_queue %d\n", q_idx
);
1780 /* re-check for smp */
1781 if (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) >=
1783 netif_tx_wake_queue(netif_txq
);
1784 dev_dbg(dev
, "netif_tx_wake_queue %d\n", q_idx
);
1788 return NETDEV_TX_OK
;
1791 am65_cpsw_nuss_xmit_free(tx_chn
, first_desc
);
1793 ndev
->stats
.tx_dropped
++;
1794 dev_kfree_skb_any(skb
);
1795 return NETDEV_TX_OK
;
1798 am65_cpsw_nuss_xmit_free(tx_chn
, first_desc
);
1800 netif_tx_stop_queue(netif_txq
);
1801 return NETDEV_TX_BUSY
;
1804 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device
*ndev
,
1807 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1808 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1809 struct sockaddr
*sockaddr
= (struct sockaddr
*)addr
;
1812 ret
= eth_prepare_mac_addr_change(ndev
, addr
);
1816 ret
= pm_runtime_resume_and_get(common
->dev
);
1820 cpsw_ale_del_ucast(common
->ale
, ndev
->dev_addr
,
1821 HOST_PORT_NUM
, 0, 0);
1822 cpsw_ale_add_ucast(common
->ale
, sockaddr
->sa_data
,
1823 HOST_PORT_NUM
, ALE_SECURE
, 0);
1825 am65_cpsw_port_set_sl_mac(port
, addr
);
1826 eth_commit_mac_addr_change(ndev
, sockaddr
);
1828 pm_runtime_put(common
->dev
);
1833 static int am65_cpsw_nuss_hwtstamp_set(struct net_device
*ndev
,
1836 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1837 u32 ts_ctrl
, seq_id
, ts_ctrl_ltype2
, ts_vlan_ltype
;
1838 struct hwtstamp_config cfg
;
1840 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS
))
1843 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1846 /* TX HW timestamp */
1847 switch (cfg
.tx_type
) {
1848 case HWTSTAMP_TX_OFF
:
1849 case HWTSTAMP_TX_ON
:
1855 switch (cfg
.rx_filter
) {
1856 case HWTSTAMP_FILTER_NONE
:
1857 port
->rx_ts_enabled
= false;
1859 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1860 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1861 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1862 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1863 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1864 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1865 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1866 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1867 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1868 port
->rx_ts_enabled
= true;
1869 cfg
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
1871 case HWTSTAMP_FILTER_ALL
:
1872 case HWTSTAMP_FILTER_SOME
:
1873 case HWTSTAMP_FILTER_NTP_ALL
:
1879 port
->tx_ts_enabled
= (cfg
.tx_type
== HWTSTAMP_TX_ON
);
1881 /* cfg TX timestamp */
1882 seq_id
= (AM65_CPSW_TS_SEQ_ID_OFFSET
<<
1883 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT
) | ETH_P_1588
;
1885 ts_vlan_ltype
= ETH_P_8021Q
;
1887 ts_ctrl_ltype2
= ETH_P_1588
|
1888 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107
|
1889 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129
|
1890 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130
|
1891 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131
|
1892 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132
|
1893 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319
|
1894 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320
|
1895 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO
;
1897 ts_ctrl
= AM65_CPSW_TS_EVENT_MSG_TYPE_BITS
<<
1898 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT
;
1900 if (port
->tx_ts_enabled
)
1901 ts_ctrl
|= AM65_CPSW_TS_TX_ANX_ALL_EN
|
1902 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN
;
1904 if (port
->rx_ts_enabled
)
1905 ts_ctrl
|= AM65_CPSW_TS_RX_ANX_ALL_EN
|
1906 AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN
;
1908 writel(seq_id
, port
->port_base
+ AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG
);
1909 writel(ts_vlan_ltype
, port
->port_base
+
1910 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG
);
1911 writel(ts_ctrl_ltype2
, port
->port_base
+
1912 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2
);
1913 writel(ts_ctrl
, port
->port_base
+ AM65_CPSW_PORTN_REG_TS_CTL
);
1915 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1918 static int am65_cpsw_nuss_hwtstamp_get(struct net_device
*ndev
,
1921 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1922 struct hwtstamp_config cfg
;
1924 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS
))
1928 cfg
.tx_type
= port
->tx_ts_enabled
?
1929 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
1930 cfg
.rx_filter
= port
->rx_ts_enabled
?
1931 HWTSTAMP_FILTER_PTP_V2_EVENT
: HWTSTAMP_FILTER_NONE
;
1933 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1936 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device
*ndev
,
1937 struct ifreq
*req
, int cmd
)
1939 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1941 if (!netif_running(ndev
))
1946 return am65_cpsw_nuss_hwtstamp_set(ndev
, req
);
1948 return am65_cpsw_nuss_hwtstamp_get(ndev
, req
);
1951 return phylink_mii_ioctl(port
->slave
.phylink
, req
, cmd
);
1954 static void am65_cpsw_nuss_ndo_get_stats(struct net_device
*dev
,
1955 struct rtnl_link_stats64
*stats
)
1957 dev_fetch_sw_netstats(stats
, dev
->tstats
);
1959 stats
->rx_errors
= dev
->stats
.rx_errors
;
1960 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1961 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1964 static int am65_cpsw_xdp_prog_setup(struct net_device
*ndev
,
1965 struct bpf_prog
*prog
)
1967 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1968 bool running
= netif_running(ndev
);
1969 struct bpf_prog
*old_prog
;
1972 am65_cpsw_nuss_ndo_slave_stop(ndev
);
1974 old_prog
= xchg(&port
->xdp_prog
, prog
);
1976 bpf_prog_put(old_prog
);
1979 return am65_cpsw_nuss_ndo_slave_open(ndev
);
1984 static int am65_cpsw_ndo_bpf(struct net_device
*ndev
, struct netdev_bpf
*bpf
)
1986 switch (bpf
->command
) {
1987 case XDP_SETUP_PROG
:
1988 return am65_cpsw_xdp_prog_setup(ndev
, bpf
->prog
);
1994 static int am65_cpsw_ndo_xdp_xmit(struct net_device
*ndev
, int n
,
1995 struct xdp_frame
**frames
, u32 flags
)
1997 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1998 struct am65_cpsw_tx_chn
*tx_chn
;
1999 struct netdev_queue
*netif_txq
;
2000 int cpu
= smp_processor_id();
2003 tx_chn
= &common
->tx_chns
[cpu
% common
->tx_ch_num
];
2004 netif_txq
= netdev_get_tx_queue(ndev
, tx_chn
->id
);
2006 __netif_tx_lock(netif_txq
, cpu
);
2007 for (i
= 0; i
< n
; i
++) {
2008 if (am65_cpsw_xdp_tx_frame(ndev
, tx_chn
, frames
[i
],
2009 AM65_CPSW_TX_BUF_TYPE_XDP_NDO
))
2013 __netif_tx_unlock(netif_txq
);
2018 static const struct net_device_ops am65_cpsw_nuss_netdev_ops
= {
2019 .ndo_open
= am65_cpsw_nuss_ndo_slave_open
,
2020 .ndo_stop
= am65_cpsw_nuss_ndo_slave_stop
,
2021 .ndo_start_xmit
= am65_cpsw_nuss_ndo_slave_xmit
,
2022 .ndo_set_rx_mode
= am65_cpsw_nuss_ndo_slave_set_rx_mode
,
2023 .ndo_get_stats64
= am65_cpsw_nuss_ndo_get_stats
,
2024 .ndo_validate_addr
= eth_validate_addr
,
2025 .ndo_set_mac_address
= am65_cpsw_nuss_ndo_slave_set_mac_address
,
2026 .ndo_tx_timeout
= am65_cpsw_nuss_ndo_host_tx_timeout
,
2027 .ndo_vlan_rx_add_vid
= am65_cpsw_nuss_ndo_slave_add_vid
,
2028 .ndo_vlan_rx_kill_vid
= am65_cpsw_nuss_ndo_slave_kill_vid
,
2029 .ndo_eth_ioctl
= am65_cpsw_nuss_ndo_slave_ioctl
,
2030 .ndo_setup_tc
= am65_cpsw_qos_ndo_setup_tc
,
2031 .ndo_set_tx_maxrate
= am65_cpsw_qos_ndo_tx_p0_set_maxrate
,
2032 .ndo_bpf
= am65_cpsw_ndo_bpf
,
2033 .ndo_xdp_xmit
= am65_cpsw_ndo_xdp_xmit
,
2036 static void am65_cpsw_disable_phy(struct phy
*phy
)
2042 static int am65_cpsw_enable_phy(struct phy
*phy
)
2046 ret
= phy_init(phy
);
2050 ret
= phy_power_on(phy
);
2059 static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common
*common
)
2061 struct am65_cpsw_port
*port
;
2065 for (i
= 0; i
< common
->port_num
; i
++) {
2066 port
= &common
->ports
[i
];
2067 phy
= port
->slave
.serdes_phy
;
2069 am65_cpsw_disable_phy(phy
);
2073 static int am65_cpsw_init_serdes_phy(struct device
*dev
, struct device_node
*port_np
,
2074 struct am65_cpsw_port
*port
)
2076 const char *name
= "serdes";
2080 phy
= devm_of_phy_optional_get(dev
, port_np
, name
);
2081 if (IS_ERR_OR_NULL(phy
))
2082 return PTR_ERR_OR_ZERO(phy
);
2084 /* Serdes PHY exists. Store it. */
2085 port
->slave
.serdes_phy
= phy
;
2087 ret
= am65_cpsw_enable_phy(phy
);
2094 devm_phy_put(dev
, phy
);
2098 static void am65_cpsw_nuss_mac_config(struct phylink_config
*config
, unsigned int mode
,
2099 const struct phylink_link_state
*state
)
2101 struct am65_cpsw_slave_data
*slave
= container_of(config
, struct am65_cpsw_slave_data
,
2103 struct am65_cpsw_port
*port
= container_of(slave
, struct am65_cpsw_port
, slave
);
2104 struct am65_cpsw_common
*common
= port
->common
;
2106 if (common
->pdata
.extra_modes
& BIT(state
->interface
)) {
2107 if (state
->interface
== PHY_INTERFACE_MODE_SGMII
) {
2108 writel(ADVERTISE_SGMII
,
2109 port
->sgmii_base
+ AM65_CPSW_SGMII_MR_ADV_ABILITY_REG
);
2110 cpsw_sl_ctl_set(port
->slave
.mac_sl
, CPSW_SL_CTL_EXT_EN
);
2112 cpsw_sl_ctl_clr(port
->slave
.mac_sl
, CPSW_SL_CTL_EXT_EN
);
2115 if (state
->interface
== PHY_INTERFACE_MODE_USXGMII
) {
2116 cpsw_sl_ctl_set(port
->slave
.mac_sl
,
2117 CPSW_SL_CTL_XGIG
| CPSW_SL_CTL_XGMII_EN
);
2119 cpsw_sl_ctl_clr(port
->slave
.mac_sl
,
2120 CPSW_SL_CTL_XGIG
| CPSW_SL_CTL_XGMII_EN
);
2123 writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE
,
2124 port
->sgmii_base
+ AM65_CPSW_SGMII_CONTROL_REG
);
2128 static void am65_cpsw_nuss_mac_link_down(struct phylink_config
*config
, unsigned int mode
,
2129 phy_interface_t interface
)
2131 struct am65_cpsw_slave_data
*slave
= container_of(config
, struct am65_cpsw_slave_data
,
2133 struct am65_cpsw_port
*port
= container_of(slave
, struct am65_cpsw_port
, slave
);
2134 struct am65_cpsw_common
*common
= port
->common
;
2135 struct net_device
*ndev
= port
->ndev
;
2139 /* disable forwarding */
2140 cpsw_ale_control_set(common
->ale
, port
->port_id
, ALE_PORT_STATE
, ALE_PORT_STATE_DISABLE
);
2142 cpsw_sl_ctl_set(port
->slave
.mac_sl
, CPSW_SL_CTL_CMD_IDLE
);
2144 tmo
= cpsw_sl_wait_for_idle(port
->slave
.mac_sl
, 100);
2145 dev_dbg(common
->dev
, "down msc_sl %08x tmo %d\n",
2146 cpsw_sl_reg_read(port
->slave
.mac_sl
, CPSW_SL_MACSTATUS
), tmo
);
2148 /* All the bits that am65_cpsw_nuss_mac_link_up() can possibly set */
2149 mac_control
= CPSW_SL_CTL_GMII_EN
| CPSW_SL_CTL_GIG
| CPSW_SL_CTL_IFCTL_A
|
2150 CPSW_SL_CTL_FULLDUPLEX
| CPSW_SL_CTL_RX_FLOW_EN
| CPSW_SL_CTL_TX_FLOW_EN
;
2151 /* If interface mode is RGMII, CPSW_SL_CTL_EXT_EN might have been set for 10 Mbps */
2152 if (phy_interface_mode_is_rgmii(interface
))
2153 mac_control
|= CPSW_SL_CTL_EXT_EN
;
2154 /* Only clear those bits that can be set by am65_cpsw_nuss_mac_link_up() */
2155 cpsw_sl_ctl_clr(port
->slave
.mac_sl
, mac_control
);
2157 am65_cpsw_qos_link_down(ndev
);
2158 netif_tx_stop_all_queues(ndev
);
2161 static void am65_cpsw_nuss_mac_link_up(struct phylink_config
*config
, struct phy_device
*phy
,
2162 unsigned int mode
, phy_interface_t interface
, int speed
,
2163 int duplex
, bool tx_pause
, bool rx_pause
)
2165 struct am65_cpsw_slave_data
*slave
= container_of(config
, struct am65_cpsw_slave_data
,
2167 struct am65_cpsw_port
*port
= container_of(slave
, struct am65_cpsw_port
, slave
);
2168 struct am65_cpsw_common
*common
= port
->common
;
2169 u32 mac_control
= CPSW_SL_CTL_GMII_EN
;
2170 struct net_device
*ndev
= port
->ndev
;
2172 /* Bring the port out of idle state */
2173 cpsw_sl_ctl_clr(port
->slave
.mac_sl
, CPSW_SL_CTL_CMD_IDLE
);
2175 if (speed
== SPEED_1000
)
2176 mac_control
|= CPSW_SL_CTL_GIG
;
2177 /* TODO: Verify whether in-band is necessary for 10 Mbps RGMII */
2178 if (speed
== SPEED_10
&& phy_interface_mode_is_rgmii(interface
))
2179 /* Can be used with in band mode only */
2180 mac_control
|= CPSW_SL_CTL_EXT_EN
;
2181 if (speed
== SPEED_100
&& interface
== PHY_INTERFACE_MODE_RMII
)
2182 mac_control
|= CPSW_SL_CTL_IFCTL_A
;
2184 mac_control
|= CPSW_SL_CTL_FULLDUPLEX
;
2186 /* rx_pause/tx_pause */
2188 mac_control
|= CPSW_SL_CTL_TX_FLOW_EN
;
2191 mac_control
|= CPSW_SL_CTL_RX_FLOW_EN
;
2193 cpsw_sl_ctl_set(port
->slave
.mac_sl
, mac_control
);
2195 /* enable forwarding */
2196 cpsw_ale_control_set(common
->ale
, port
->port_id
, ALE_PORT_STATE
, ALE_PORT_STATE_FORWARD
);
2198 am65_cpsw_qos_link_up(ndev
, speed
);
2199 netif_tx_wake_all_queues(ndev
);
2202 static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops
= {
2203 .mac_config
= am65_cpsw_nuss_mac_config
,
2204 .mac_link_down
= am65_cpsw_nuss_mac_link_down
,
2205 .mac_link_up
= am65_cpsw_nuss_mac_link_up
,
2208 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port
*port
)
2210 struct am65_cpsw_common
*common
= port
->common
;
2212 if (!port
->disabled
)
2215 cpsw_ale_control_set(common
->ale
, port
->port_id
,
2216 ALE_PORT_STATE
, ALE_PORT_STATE_DISABLE
);
2218 cpsw_sl_reset(port
->slave
.mac_sl
, 100);
2219 cpsw_sl_ctl_reset(port
->slave
.mac_sl
);
2222 static void am65_cpsw_nuss_free_tx_chns(void *data
)
2224 struct am65_cpsw_common
*common
= data
;
2227 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
2228 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
2230 if (!IS_ERR_OR_NULL(tx_chn
->desc_pool
))
2231 k3_cppi_desc_pool_destroy(tx_chn
->desc_pool
);
2233 if (!IS_ERR_OR_NULL(tx_chn
->tx_chn
))
2234 k3_udma_glue_release_tx_chn(tx_chn
->tx_chn
);
2236 memset(tx_chn
, 0, sizeof(*tx_chn
));
2240 static void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common
*common
)
2242 struct device
*dev
= common
->dev
;
2245 devm_remove_action(dev
, am65_cpsw_nuss_free_tx_chns
, common
);
2247 common
->tx_ch_rate_msk
= 0;
2248 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
2249 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
2252 devm_free_irq(dev
, tx_chn
->irq
, tx_chn
);
2254 netif_napi_del(&tx_chn
->napi_tx
);
2257 am65_cpsw_nuss_free_tx_chns(common
);
2260 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common
*common
)
2262 struct device
*dev
= common
->dev
;
2265 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
2266 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
2268 netif_napi_add_tx(common
->dma_ndev
, &tx_chn
->napi_tx
,
2269 am65_cpsw_nuss_tx_poll
);
2270 hrtimer_init(&tx_chn
->tx_hrtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
2271 tx_chn
->tx_hrtimer
.function
= &am65_cpsw_nuss_tx_timer_callback
;
2273 ret
= devm_request_irq(dev
, tx_chn
->irq
,
2274 am65_cpsw_nuss_tx_irq
,
2276 tx_chn
->tx_chn_name
, tx_chn
);
2278 dev_err(dev
, "failure requesting tx%u irq %u, %d\n",
2279 tx_chn
->id
, tx_chn
->irq
, ret
);
2288 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common
*common
)
2290 u32 max_desc_num
= ALIGN(AM65_CPSW_MAX_TX_DESC
, MAX_SKB_FRAGS
);
2291 struct k3_udma_glue_tx_channel_cfg tx_cfg
= { 0 };
2292 struct device
*dev
= common
->dev
;
2293 struct k3_ring_cfg ring_cfg
= {
2294 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
2295 .mode
= K3_RINGACC_RING_MODE_RING
,
2298 u32 hdesc_size
, hdesc_size_out
;
2301 hdesc_size
= cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE
,
2302 AM65_CPSW_NAV_SW_DATA_SIZE
);
2304 tx_cfg
.swdata_size
= AM65_CPSW_NAV_SW_DATA_SIZE
;
2305 tx_cfg
.tx_cfg
= ring_cfg
;
2306 tx_cfg
.txcq_cfg
= ring_cfg
;
2307 tx_cfg
.tx_cfg
.size
= max_desc_num
;
2308 tx_cfg
.txcq_cfg
.size
= max_desc_num
;
2310 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
2311 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
2313 snprintf(tx_chn
->tx_chn_name
,
2314 sizeof(tx_chn
->tx_chn_name
), "tx%d", i
);
2316 spin_lock_init(&tx_chn
->lock
);
2317 tx_chn
->common
= common
;
2319 tx_chn
->descs_num
= max_desc_num
;
2322 k3_udma_glue_request_tx_chn(dev
,
2323 tx_chn
->tx_chn_name
,
2325 if (IS_ERR(tx_chn
->tx_chn
)) {
2326 ret
= dev_err_probe(dev
, PTR_ERR(tx_chn
->tx_chn
),
2327 "Failed to request tx dma channel\n");
2330 tx_chn
->dma_dev
= k3_udma_glue_tx_get_dma_device(tx_chn
->tx_chn
);
2332 tx_chn
->desc_pool
= k3_cppi_desc_pool_create_name(tx_chn
->dma_dev
,
2335 tx_chn
->tx_chn_name
);
2336 if (IS_ERR(tx_chn
->desc_pool
)) {
2337 ret
= PTR_ERR(tx_chn
->desc_pool
);
2338 dev_err(dev
, "Failed to create poll %d\n", ret
);
2342 hdesc_size_out
= k3_cppi_desc_pool_desc_size(tx_chn
->desc_pool
);
2343 tx_chn
->dsize_log2
= __fls(hdesc_size_out
);
2344 WARN_ON(hdesc_size_out
!= (1 << tx_chn
->dsize_log2
));
2346 tx_chn
->irq
= k3_udma_glue_tx_get_irq(tx_chn
->tx_chn
);
2347 if (tx_chn
->irq
< 0) {
2348 dev_err(dev
, "Failed to get tx dma irq %d\n",
2354 snprintf(tx_chn
->tx_chn_name
,
2355 sizeof(tx_chn
->tx_chn_name
), "%s-tx%d",
2356 dev_name(dev
), tx_chn
->id
);
2359 ret
= am65_cpsw_nuss_ndev_add_tx_napi(common
);
2361 dev_err(dev
, "Failed to add tx NAPI %d\n", ret
);
2366 i
= devm_add_action(dev
, am65_cpsw_nuss_free_tx_chns
, common
);
2368 dev_err(dev
, "Failed to add free_tx_chns action %d\n", i
);
2375 static void am65_cpsw_nuss_free_rx_chns(void *data
)
2377 struct am65_cpsw_common
*common
= data
;
2378 struct am65_cpsw_rx_chn
*rx_chn
;
2380 rx_chn
= &common
->rx_chns
;
2382 if (!IS_ERR_OR_NULL(rx_chn
->desc_pool
))
2383 k3_cppi_desc_pool_destroy(rx_chn
->desc_pool
);
2385 if (!IS_ERR_OR_NULL(rx_chn
->rx_chn
))
2386 k3_udma_glue_release_rx_chn(rx_chn
->rx_chn
);
2389 static void am65_cpsw_nuss_remove_rx_chns(struct am65_cpsw_common
*common
)
2391 struct device
*dev
= common
->dev
;
2392 struct am65_cpsw_rx_chn
*rx_chn
;
2393 struct am65_cpsw_rx_flow
*flows
;
2396 rx_chn
= &common
->rx_chns
;
2397 flows
= rx_chn
->flows
;
2398 devm_remove_action(dev
, am65_cpsw_nuss_free_rx_chns
, common
);
2400 for (i
= 0; i
< common
->rx_ch_num_flows
; i
++) {
2401 if (!(flows
[i
].irq
< 0))
2402 devm_free_irq(dev
, flows
[i
].irq
, &flows
[i
]);
2403 netif_napi_del(&flows
[i
].napi_rx
);
2406 am65_cpsw_nuss_free_rx_chns(common
);
2408 common
->rx_flow_id_base
= -1;
2411 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common
*common
)
2413 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
2414 struct k3_udma_glue_rx_channel_cfg rx_cfg
= { 0 };
2415 u32 max_desc_num
= AM65_CPSW_MAX_RX_DESC
;
2416 struct device
*dev
= common
->dev
;
2417 struct am65_cpsw_rx_flow
*flow
;
2418 u32 hdesc_size
, hdesc_size_out
;
2422 hdesc_size
= cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE
,
2423 AM65_CPSW_NAV_SW_DATA_SIZE
);
2425 rx_cfg
.swdata_size
= AM65_CPSW_NAV_SW_DATA_SIZE
;
2426 rx_cfg
.flow_id_num
= common
->rx_ch_num_flows
;
2427 rx_cfg
.flow_id_base
= common
->rx_flow_id_base
;
2429 /* init all flows */
2431 rx_chn
->descs_num
= max_desc_num
* rx_cfg
.flow_id_num
;
2433 for (i
= 0; i
< common
->rx_ch_num_flows
; i
++) {
2434 flow
= &rx_chn
->flows
[i
];
2435 flow
->page_pool
= NULL
;
2438 rx_chn
->rx_chn
= k3_udma_glue_request_rx_chn(dev
, "rx", &rx_cfg
);
2439 if (IS_ERR(rx_chn
->rx_chn
)) {
2440 ret
= dev_err_probe(dev
, PTR_ERR(rx_chn
->rx_chn
),
2441 "Failed to request rx dma channel\n");
2444 rx_chn
->dma_dev
= k3_udma_glue_rx_get_dma_device(rx_chn
->rx_chn
);
2446 rx_chn
->desc_pool
= k3_cppi_desc_pool_create_name(rx_chn
->dma_dev
,
2449 if (IS_ERR(rx_chn
->desc_pool
)) {
2450 ret
= PTR_ERR(rx_chn
->desc_pool
);
2451 dev_err(dev
, "Failed to create rx poll %d\n", ret
);
2455 hdesc_size_out
= k3_cppi_desc_pool_desc_size(rx_chn
->desc_pool
);
2456 rx_chn
->dsize_log2
= __fls(hdesc_size_out
);
2457 WARN_ON(hdesc_size_out
!= (1 << rx_chn
->dsize_log2
));
2459 common
->rx_flow_id_base
=
2460 k3_udma_glue_rx_get_flow_id_base(rx_chn
->rx_chn
);
2461 dev_info(dev
, "set new flow-id-base %u\n", common
->rx_flow_id_base
);
2463 fdqring_id
= K3_RINGACC_RING_ID_ANY
;
2464 for (i
= 0; i
< rx_cfg
.flow_id_num
; i
++) {
2465 struct k3_ring_cfg rxring_cfg
= {
2466 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
2467 .mode
= K3_RINGACC_RING_MODE_RING
,
2470 struct k3_ring_cfg fdqring_cfg
= {
2471 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
2472 .flags
= K3_RINGACC_RING_SHARED
,
2474 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg
= {
2475 .rx_cfg
= rxring_cfg
,
2476 .rxfdq_cfg
= fdqring_cfg
,
2477 .ring_rxq_id
= K3_RINGACC_RING_ID_ANY
,
2479 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG
,
2482 flow
= &rx_chn
->flows
[i
];
2484 flow
->common
= common
;
2485 flow
->irq
= -EINVAL
;
2487 rx_flow_cfg
.ring_rxfdq0_id
= fdqring_id
;
2488 rx_flow_cfg
.rx_cfg
.size
= max_desc_num
;
2489 /* share same FDQ for all flows */
2490 rx_flow_cfg
.rxfdq_cfg
.size
= max_desc_num
* rx_cfg
.flow_id_num
;
2491 rx_flow_cfg
.rxfdq_cfg
.mode
= common
->pdata
.fdqring_mode
;
2493 ret
= k3_udma_glue_rx_flow_init(rx_chn
->rx_chn
,
2496 dev_err(dev
, "Failed to init rx flow%d %d\n", i
, ret
);
2501 k3_udma_glue_rx_flow_get_fdq_id(rx_chn
->rx_chn
,
2504 flow
->irq
= k3_udma_glue_rx_get_irq(rx_chn
->rx_chn
, i
);
2505 if (flow
->irq
<= 0) {
2506 dev_err(dev
, "Failed to get rx dma irq %d\n",
2512 snprintf(flow
->name
,
2513 sizeof(flow
->name
), "%s-rx%d",
2515 netif_napi_add(common
->dma_ndev
, &flow
->napi_rx
,
2516 am65_cpsw_nuss_rx_poll
);
2517 hrtimer_init(&flow
->rx_hrtimer
, CLOCK_MONOTONIC
,
2518 HRTIMER_MODE_REL_PINNED
);
2519 flow
->rx_hrtimer
.function
= &am65_cpsw_nuss_rx_timer_callback
;
2521 ret
= devm_request_irq(dev
, flow
->irq
,
2522 am65_cpsw_nuss_rx_irq
,
2526 dev_err(dev
, "failure requesting rx %d irq %u, %d\n",
2528 flow
->irq
= -EINVAL
;
2533 /* setup classifier to route priorities to flows */
2534 cpsw_ale_classifier_setup_default(common
->ale
, common
->rx_ch_num_flows
);
2537 i
= devm_add_action(dev
, am65_cpsw_nuss_free_rx_chns
, common
);
2539 dev_err(dev
, "Failed to add free_rx_chns action %d\n", i
);
2546 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common
*common
)
2548 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
2550 host_p
->common
= common
;
2551 host_p
->port_base
= common
->cpsw_base
+ AM65_CPSW_NU_PORTS_BASE
;
2552 host_p
->stat_base
= common
->cpsw_base
+ AM65_CPSW_NU_STATS_BASE
;
2557 static int am65_cpsw_am654_get_efuse_macid(struct device_node
*of_node
,
2558 int slave
, u8
*mac_addr
)
2560 u32 mac_lo
, mac_hi
, offset
;
2561 struct regmap
*syscon
;
2564 syscon
= syscon_regmap_lookup_by_phandle(of_node
, "ti,syscon-efuse");
2565 if (IS_ERR(syscon
)) {
2566 if (PTR_ERR(syscon
) == -ENODEV
)
2568 return PTR_ERR(syscon
);
2571 ret
= of_property_read_u32_index(of_node
, "ti,syscon-efuse", 1,
2576 regmap_read(syscon
, offset
, &mac_lo
);
2577 regmap_read(syscon
, offset
+ 4, &mac_hi
);
2579 mac_addr
[0] = (mac_hi
>> 8) & 0xff;
2580 mac_addr
[1] = mac_hi
& 0xff;
2581 mac_addr
[2] = (mac_lo
>> 24) & 0xff;
2582 mac_addr
[3] = (mac_lo
>> 16) & 0xff;
2583 mac_addr
[4] = (mac_lo
>> 8) & 0xff;
2584 mac_addr
[5] = mac_lo
& 0xff;
2589 static int am65_cpsw_init_cpts(struct am65_cpsw_common
*common
)
2591 struct device
*dev
= common
->dev
;
2592 struct device_node
*node
;
2593 struct am65_cpts
*cpts
;
2594 void __iomem
*reg_base
;
2596 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS
))
2599 node
= of_get_child_by_name(dev
->of_node
, "cpts");
2601 dev_err(dev
, "%s cpts not found\n", __func__
);
2605 reg_base
= common
->cpsw_base
+ AM65_CPSW_NU_CPTS_BASE
;
2606 cpts
= am65_cpts_create(dev
, reg_base
, node
);
2608 int ret
= PTR_ERR(cpts
);
2611 dev_err(dev
, "cpts create err %d\n", ret
);
2614 common
->cpts
= cpts
;
2615 /* Forbid PM runtime if CPTS is running.
2616 * K3 CPSWxG modules may completely lose context during ON->OFF
2617 * transitions depending on integration.
2618 * AM65x/J721E MCU CPSW2G: false
2619 * J721E MAIN_CPSW9G: true
2621 pm_runtime_forbid(dev
);
2626 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common
*common
)
2628 struct device_node
*node
, *port_np
;
2629 struct device
*dev
= common
->dev
;
2632 node
= of_get_child_by_name(dev
->of_node
, "ethernet-ports");
2636 for_each_child_of_node(node
, port_np
) {
2637 struct am65_cpsw_port
*port
;
2640 /* it is not a slave port node, continue */
2641 if (strcmp(port_np
->name
, "port"))
2644 ret
= of_property_read_u32(port_np
, "reg", &port_id
);
2646 dev_err(dev
, "%pOF error reading port_id %d\n",
2651 if (!port_id
|| port_id
> common
->port_num
) {
2652 dev_err(dev
, "%pOF has invalid port_id %u %s\n",
2653 port_np
, port_id
, port_np
->name
);
2658 port
= am65_common_get_port(common
, port_id
);
2659 port
->port_id
= port_id
;
2660 port
->common
= common
;
2661 port
->port_base
= common
->cpsw_base
+ AM65_CPSW_NU_PORTS_BASE
+
2662 AM65_CPSW_NU_PORTS_OFFSET
* (port_id
);
2663 if (common
->pdata
.extra_modes
)
2664 port
->sgmii_base
= common
->ss_base
+ AM65_CPSW_SGMII_BASE
* (port_id
);
2665 port
->stat_base
= common
->cpsw_base
+ AM65_CPSW_NU_STATS_BASE
+
2666 (AM65_CPSW_NU_STATS_PORT_OFFSET
* port_id
);
2667 port
->name
= of_get_property(port_np
, "label", NULL
);
2668 port
->fetch_ram_base
=
2669 common
->cpsw_base
+ AM65_CPSW_NU_FRAM_BASE
+
2670 (AM65_CPSW_NU_FRAM_PORT_OFFSET
* (port_id
- 1));
2672 port
->slave
.mac_sl
= cpsw_sl_get("am65", dev
, port
->port_base
);
2673 if (IS_ERR(port
->slave
.mac_sl
)) {
2674 ret
= PTR_ERR(port
->slave
.mac_sl
);
2678 port
->disabled
= !of_device_is_available(port_np
);
2679 if (port
->disabled
) {
2680 common
->disabled_ports_mask
|= BIT(port
->port_id
);
2684 port
->slave
.ifphy
= devm_of_phy_get(dev
, port_np
, NULL
);
2685 if (IS_ERR(port
->slave
.ifphy
)) {
2686 ret
= PTR_ERR(port
->slave
.ifphy
);
2687 dev_err(dev
, "%pOF error retrieving port phy: %d\n",
2692 /* Initialize the Serdes PHY for the port */
2693 ret
= am65_cpsw_init_serdes_phy(dev
, port_np
, port
);
2697 port
->slave
.mac_only
=
2698 of_property_read_bool(port_np
, "ti,mac-only");
2700 /* get phy/link info */
2701 port
->slave
.port_np
= port_np
;
2702 ret
= of_get_phy_mode(port_np
, &port
->slave
.phy_if
);
2704 dev_err(dev
, "%pOF read phy-mode err %d\n",
2709 ret
= phy_set_mode_ext(port
->slave
.ifphy
, PHY_MODE_ETHERNET
, port
->slave
.phy_if
);
2713 ret
= of_get_mac_address(port_np
, port
->slave
.mac_addr
);
2715 am65_cpsw_am654_get_efuse_macid(port_np
,
2717 port
->slave
.mac_addr
);
2718 if (!is_valid_ether_addr(port
->slave
.mac_addr
)) {
2719 eth_random_addr(port
->slave
.mac_addr
);
2720 dev_err(dev
, "Use random MAC address\n");
2724 /* Reset all Queue priorities to 0 */
2725 writel(0, port
->port_base
+ AM65_CPSW_PN_REG_TX_PRI_MAP
);
2729 /* is there at least one ext.port */
2730 if (!(~common
->disabled_ports_mask
& GENMASK(common
->port_num
, 1))) {
2731 dev_err(dev
, "No Ext. port are available\n");
2738 of_node_put(port_np
);
2743 static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common
*common
)
2745 struct am65_cpsw_port
*port
;
2748 for (i
= 0; i
< common
->port_num
; i
++) {
2749 port
= &common
->ports
[i
];
2750 if (port
->slave
.phylink
)
2751 phylink_destroy(port
->slave
.phylink
);
2756 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common
*common
, u32 port_idx
)
2758 struct am65_cpsw_ndev_priv
*ndev_priv
;
2759 struct device
*dev
= common
->dev
;
2760 struct am65_cpsw_port
*port
;
2761 struct phylink
*phylink
;
2763 port
= &common
->ports
[port_idx
];
2769 port
->ndev
= alloc_etherdev_mqs(sizeof(struct am65_cpsw_ndev_priv
),
2770 AM65_CPSW_MAX_QUEUES
,
2771 AM65_CPSW_MAX_QUEUES
);
2773 dev_err(dev
, "error allocating slave net_device %u\n",
2778 ndev_priv
= netdev_priv(port
->ndev
);
2779 ndev_priv
->port
= port
;
2780 ndev_priv
->msg_enable
= AM65_CPSW_DEBUG
;
2781 mutex_init(&ndev_priv
->mm_lock
);
2782 port
->qos
.link_speed
= SPEED_UNKNOWN
;
2783 SET_NETDEV_DEV(port
->ndev
, dev
);
2784 port
->ndev
->dev
.of_node
= port
->slave
.port_np
;
2786 eth_hw_addr_set(port
->ndev
, port
->slave
.mac_addr
);
2788 port
->ndev
->min_mtu
= AM65_CPSW_MIN_PACKET_SIZE
;
2789 port
->ndev
->max_mtu
= AM65_CPSW_MAX_PACKET_SIZE
-
2790 (VLAN_ETH_HLEN
+ ETH_FCS_LEN
);
2791 port
->ndev
->hw_features
= NETIF_F_SG
|
2795 port
->ndev
->features
= port
->ndev
->hw_features
|
2796 NETIF_F_HW_VLAN_CTAG_FILTER
;
2797 port
->ndev
->xdp_features
= NETDEV_XDP_ACT_BASIC
|
2798 NETDEV_XDP_ACT_REDIRECT
|
2799 NETDEV_XDP_ACT_NDO_XMIT
;
2800 port
->ndev
->vlan_features
|= NETIF_F_SG
;
2801 port
->ndev
->netdev_ops
= &am65_cpsw_nuss_netdev_ops
;
2802 port
->ndev
->ethtool_ops
= &am65_cpsw_ethtool_ops_slave
;
2804 /* Configuring Phylink */
2805 port
->slave
.phylink_config
.dev
= &port
->ndev
->dev
;
2806 port
->slave
.phylink_config
.type
= PHYLINK_NETDEV
;
2807 port
->slave
.phylink_config
.mac_capabilities
= MAC_SYM_PAUSE
| MAC_10
| MAC_100
|
2808 MAC_1000FD
| MAC_5000FD
;
2809 port
->slave
.phylink_config
.mac_managed_pm
= true; /* MAC does PM */
2811 switch (port
->slave
.phy_if
) {
2812 case PHY_INTERFACE_MODE_RGMII
:
2813 case PHY_INTERFACE_MODE_RGMII_ID
:
2814 case PHY_INTERFACE_MODE_RGMII_RXID
:
2815 case PHY_INTERFACE_MODE_RGMII_TXID
:
2816 phy_interface_set_rgmii(port
->slave
.phylink_config
.supported_interfaces
);
2819 case PHY_INTERFACE_MODE_RMII
:
2820 __set_bit(PHY_INTERFACE_MODE_RMII
,
2821 port
->slave
.phylink_config
.supported_interfaces
);
2824 case PHY_INTERFACE_MODE_QSGMII
:
2825 case PHY_INTERFACE_MODE_SGMII
:
2826 case PHY_INTERFACE_MODE_USXGMII
:
2827 if (common
->pdata
.extra_modes
& BIT(port
->slave
.phy_if
)) {
2828 __set_bit(port
->slave
.phy_if
,
2829 port
->slave
.phylink_config
.supported_interfaces
);
2831 dev_err(dev
, "selected phy-mode is not supported\n");
2837 dev_err(dev
, "selected phy-mode is not supported\n");
2841 phylink
= phylink_create(&port
->slave
.phylink_config
,
2842 of_fwnode_handle(port
->slave
.port_np
),
2844 &am65_cpsw_phylink_mac_ops
);
2845 if (IS_ERR(phylink
))
2846 return PTR_ERR(phylink
);
2848 port
->slave
.phylink
= phylink
;
2850 /* Disable TX checksum offload by default due to HW bug */
2851 if (common
->pdata
.quirks
& AM65_CPSW_QUIRK_I2027_NO_TX_CSUM
)
2852 port
->ndev
->features
&= ~NETIF_F_HW_CSUM
;
2854 port
->ndev
->pcpu_stat_type
= NETDEV_PCPU_STAT_TSTATS
;
2855 port
->xdp_prog
= NULL
;
2857 if (!common
->dma_ndev
)
2858 common
->dma_ndev
= port
->ndev
;
2863 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common
*common
)
2868 for (i
= 0; i
< common
->port_num
; i
++) {
2869 ret
= am65_cpsw_nuss_init_port_ndev(common
, i
);
2877 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common
*common
)
2879 struct am65_cpsw_port
*port
;
2882 for (i
= 0; i
< common
->port_num
; i
++) {
2883 port
= &common
->ports
[i
];
2886 if (port
->ndev
->reg_state
== NETREG_REGISTERED
)
2887 unregister_netdev(port
->ndev
);
2888 free_netdev(port
->ndev
);
2893 static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common
*common
)
2898 if (common
->br_members
== (GENMASK(common
->port_num
, 1) & ~common
->disabled_ports_mask
))
2901 dev_dbg(common
->dev
, "set offload_fwd_mark %d\n", set_val
);
2903 for (i
= 1; i
<= common
->port_num
; i
++) {
2904 struct am65_cpsw_port
*port
= am65_common_get_port(common
, i
);
2905 struct am65_cpsw_ndev_priv
*priv
;
2910 priv
= am65_ndev_to_priv(port
->ndev
);
2911 priv
->offload_fwd_mark
= set_val
;
2915 bool am65_cpsw_port_dev_check(const struct net_device
*ndev
)
2917 if (ndev
->netdev_ops
== &am65_cpsw_nuss_netdev_ops
) {
2918 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
2920 return !common
->is_emac_mode
;
2926 static int am65_cpsw_netdevice_port_link(struct net_device
*ndev
,
2927 struct net_device
*br_ndev
,
2928 struct netlink_ext_ack
*extack
)
2930 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
2931 struct am65_cpsw_ndev_priv
*priv
= am65_ndev_to_priv(ndev
);
2934 if (!common
->br_members
) {
2935 common
->hw_bridge_dev
= br_ndev
;
2937 /* This is adding the port to a second bridge, this is
2940 if (common
->hw_bridge_dev
!= br_ndev
)
2944 err
= switchdev_bridge_port_offload(ndev
, ndev
, NULL
, NULL
, NULL
,
2949 common
->br_members
|= BIT(priv
->port
->port_id
);
2951 am65_cpsw_port_offload_fwd_mark_update(common
);
2956 static void am65_cpsw_netdevice_port_unlink(struct net_device
*ndev
)
2958 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
2959 struct am65_cpsw_ndev_priv
*priv
= am65_ndev_to_priv(ndev
);
2961 switchdev_bridge_port_unoffload(ndev
, NULL
, NULL
, NULL
);
2963 common
->br_members
&= ~BIT(priv
->port
->port_id
);
2965 am65_cpsw_port_offload_fwd_mark_update(common
);
2967 if (!common
->br_members
)
2968 common
->hw_bridge_dev
= NULL
;
2971 /* netdev notifier */
2972 static int am65_cpsw_netdevice_event(struct notifier_block
*unused
,
2973 unsigned long event
, void *ptr
)
2975 struct netlink_ext_ack
*extack
= netdev_notifier_info_to_extack(ptr
);
2976 struct net_device
*ndev
= netdev_notifier_info_to_dev(ptr
);
2977 struct netdev_notifier_changeupper_info
*info
;
2978 int ret
= NOTIFY_DONE
;
2980 if (!am65_cpsw_port_dev_check(ndev
))
2984 case NETDEV_CHANGEUPPER
:
2987 if (netif_is_bridge_master(info
->upper_dev
)) {
2989 ret
= am65_cpsw_netdevice_port_link(ndev
,
2993 am65_cpsw_netdevice_port_unlink(ndev
);
3000 return notifier_from_errno(ret
);
3003 static int am65_cpsw_register_notifiers(struct am65_cpsw_common
*cpsw
)
3007 if (AM65_CPSW_IS_CPSW2G(cpsw
) ||
3008 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV
))
3011 cpsw
->am65_cpsw_netdevice_nb
.notifier_call
= &am65_cpsw_netdevice_event
;
3012 ret
= register_netdevice_notifier(&cpsw
->am65_cpsw_netdevice_nb
);
3014 dev_err(cpsw
->dev
, "can't register netdevice notifier\n");
3018 ret
= am65_cpsw_switchdev_register_notifiers(cpsw
);
3020 unregister_netdevice_notifier(&cpsw
->am65_cpsw_netdevice_nb
);
3025 static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common
*cpsw
)
3027 if (AM65_CPSW_IS_CPSW2G(cpsw
) ||
3028 !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV
))
3031 am65_cpsw_switchdev_unregister_notifiers(cpsw
);
3032 unregister_netdevice_notifier(&cpsw
->am65_cpsw_netdevice_nb
);
3035 static const struct devlink_ops am65_cpsw_devlink_ops
= {};
3037 static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common
*cpsw
)
3039 cpsw_ale_add_mcast(cpsw
->ale
, eth_stp_addr
, ALE_PORT_HOST
, ALE_SUPER
, 0,
3040 ALE_MCAST_BLOCK_LEARN_FWD
);
3043 static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common
*common
)
3045 struct am65_cpsw_host
*host
= am65_common_get_host(common
);
3047 writel(common
->default_vlan
, host
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3049 am65_cpsw_init_stp_ale_entry(common
);
3051 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
, ALE_P0_UNI_FLOOD
, 1);
3052 dev_dbg(common
->dev
, "Set P0_UNI_FLOOD\n");
3053 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
, ALE_PORT_NOLEARN
, 0);
3056 static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common
*common
)
3058 struct am65_cpsw_host
*host
= am65_common_get_host(common
);
3060 writel(0, host
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3062 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
, ALE_P0_UNI_FLOOD
, 0);
3063 dev_dbg(common
->dev
, "unset P0_UNI_FLOOD\n");
3065 /* learning make no sense in multi-mac mode */
3066 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
, ALE_PORT_NOLEARN
, 1);
3069 static int am65_cpsw_dl_switch_mode_get(struct devlink
*dl
, u32 id
,
3070 struct devlink_param_gset_ctx
*ctx
)
3072 struct am65_cpsw_devlink
*dl_priv
= devlink_priv(dl
);
3073 struct am65_cpsw_common
*common
= dl_priv
->common
;
3075 dev_dbg(common
->dev
, "%s id:%u\n", __func__
, id
);
3077 if (id
!= AM65_CPSW_DL_PARAM_SWITCH_MODE
)
3080 ctx
->val
.vbool
= !common
->is_emac_mode
;
3085 static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port
*port
)
3087 struct am65_cpsw_slave_data
*slave
= &port
->slave
;
3088 struct am65_cpsw_common
*common
= port
->common
;
3091 writel(slave
->port_vlan
, port
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3093 if (slave
->mac_only
)
3094 /* enable mac-only mode on port */
3095 cpsw_ale_control_set(common
->ale
, port
->port_id
,
3096 ALE_PORT_MACONLY
, 1);
3098 cpsw_ale_control_set(common
->ale
, port
->port_id
, ALE_PORT_NOLEARN
, 1);
3100 port_mask
= BIT(port
->port_id
) | ALE_PORT_HOST
;
3102 cpsw_ale_add_ucast(common
->ale
, port
->ndev
->dev_addr
,
3103 HOST_PORT_NUM
, ALE_SECURE
, slave
->port_vlan
);
3104 cpsw_ale_add_mcast(common
->ale
, port
->ndev
->broadcast
,
3105 port_mask
, ALE_VLAN
, slave
->port_vlan
, ALE_MCAST_FWD_2
);
3108 static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port
*port
)
3110 struct am65_cpsw_slave_data
*slave
= &port
->slave
;
3111 struct am65_cpsw_common
*cpsw
= port
->common
;
3114 cpsw_ale_control_set(cpsw
->ale
, port
->port_id
,
3115 ALE_PORT_NOLEARN
, 0);
3117 cpsw_ale_add_ucast(cpsw
->ale
, port
->ndev
->dev_addr
,
3118 HOST_PORT_NUM
, ALE_SECURE
| ALE_BLOCKED
| ALE_VLAN
,
3121 port_mask
= BIT(port
->port_id
) | ALE_PORT_HOST
;
3123 cpsw_ale_add_mcast(cpsw
->ale
, port
->ndev
->broadcast
,
3124 port_mask
, ALE_VLAN
, slave
->port_vlan
,
3127 writel(slave
->port_vlan
, port
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3129 cpsw_ale_control_set(cpsw
->ale
, port
->port_id
,
3130 ALE_PORT_MACONLY
, 0);
3133 static int am65_cpsw_dl_switch_mode_set(struct devlink
*dl
, u32 id
,
3134 struct devlink_param_gset_ctx
*ctx
,
3135 struct netlink_ext_ack
*extack
)
3137 struct am65_cpsw_devlink
*dl_priv
= devlink_priv(dl
);
3138 struct am65_cpsw_common
*cpsw
= dl_priv
->common
;
3139 bool switch_en
= ctx
->val
.vbool
;
3140 bool if_running
= false;
3143 dev_dbg(cpsw
->dev
, "%s id:%u\n", __func__
, id
);
3145 if (id
!= AM65_CPSW_DL_PARAM_SWITCH_MODE
)
3148 if (switch_en
== !cpsw
->is_emac_mode
)
3151 if (!switch_en
&& cpsw
->br_members
) {
3152 dev_err(cpsw
->dev
, "Remove ports from bridge before disabling switch mode\n");
3158 cpsw
->is_emac_mode
= !switch_en
;
3160 for (i
= 0; i
< cpsw
->port_num
; i
++) {
3161 struct net_device
*sl_ndev
= cpsw
->ports
[i
].ndev
;
3163 if (!sl_ndev
|| !netif_running(sl_ndev
))
3170 /* all ndevs are down */
3171 for (i
= 0; i
< cpsw
->port_num
; i
++) {
3172 struct net_device
*sl_ndev
= cpsw
->ports
[i
].ndev
;
3173 struct am65_cpsw_slave_data
*slave
;
3178 slave
= am65_ndev_to_slave(sl_ndev
);
3180 slave
->port_vlan
= cpsw
->default_vlan
;
3182 slave
->port_vlan
= 0;
3188 cpsw_ale_control_set(cpsw
->ale
, 0, ALE_BYPASS
, 1);
3189 /* clean up ALE table */
3190 cpsw_ale_control_set(cpsw
->ale
, HOST_PORT_NUM
, ALE_CLEAR
, 1);
3191 cpsw_ale_control_get(cpsw
->ale
, HOST_PORT_NUM
, ALE_AGEOUT
);
3194 dev_info(cpsw
->dev
, "Enable switch mode\n");
3196 am65_cpsw_init_host_port_switch(cpsw
);
3198 for (i
= 0; i
< cpsw
->port_num
; i
++) {
3199 struct net_device
*sl_ndev
= cpsw
->ports
[i
].ndev
;
3200 struct am65_cpsw_slave_data
*slave
;
3201 struct am65_cpsw_port
*port
;
3206 port
= am65_ndev_to_port(sl_ndev
);
3207 slave
= am65_ndev_to_slave(sl_ndev
);
3208 slave
->port_vlan
= cpsw
->default_vlan
;
3210 if (netif_running(sl_ndev
))
3211 am65_cpsw_init_port_switch_ale(port
);
3215 dev_info(cpsw
->dev
, "Disable switch mode\n");
3217 am65_cpsw_init_host_port_emac(cpsw
);
3219 for (i
= 0; i
< cpsw
->port_num
; i
++) {
3220 struct net_device
*sl_ndev
= cpsw
->ports
[i
].ndev
;
3221 struct am65_cpsw_port
*port
;
3226 port
= am65_ndev_to_port(sl_ndev
);
3227 port
->slave
.port_vlan
= 0;
3228 if (netif_running(sl_ndev
))
3229 am65_cpsw_init_port_emac_ale(port
);
3232 cpsw_ale_control_set(cpsw
->ale
, HOST_PORT_NUM
, ALE_BYPASS
, 0);
3239 static const struct devlink_param am65_cpsw_devlink_params
[] = {
3240 DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE
, "switch_mode",
3241 DEVLINK_PARAM_TYPE_BOOL
,
3242 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
3243 am65_cpsw_dl_switch_mode_get
,
3244 am65_cpsw_dl_switch_mode_set
, NULL
),
3247 static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common
*common
)
3249 struct devlink_port_attrs attrs
= {};
3250 struct am65_cpsw_devlink
*dl_priv
;
3251 struct device
*dev
= common
->dev
;
3252 struct devlink_port
*dl_port
;
3253 struct am65_cpsw_port
*port
;
3258 devlink_alloc(&am65_cpsw_devlink_ops
, sizeof(*dl_priv
), dev
);
3259 if (!common
->devlink
)
3262 dl_priv
= devlink_priv(common
->devlink
);
3263 dl_priv
->common
= common
;
3265 /* Provide devlink hook to switch mode when multiple external ports
3266 * are present NUSS switchdev driver is enabled.
3268 if (!AM65_CPSW_IS_CPSW2G(common
) &&
3269 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV
)) {
3270 ret
= devlink_params_register(common
->devlink
,
3271 am65_cpsw_devlink_params
,
3272 ARRAY_SIZE(am65_cpsw_devlink_params
));
3274 dev_err(dev
, "devlink params reg fail ret:%d\n", ret
);
3279 for (i
= 1; i
<= common
->port_num
; i
++) {
3280 port
= am65_common_get_port(common
, i
);
3281 dl_port
= &port
->devlink_port
;
3284 attrs
.flavour
= DEVLINK_PORT_FLAVOUR_PHYSICAL
;
3286 attrs
.flavour
= DEVLINK_PORT_FLAVOUR_UNUSED
;
3287 attrs
.phys
.port_number
= port
->port_id
;
3288 attrs
.switch_id
.id_len
= sizeof(resource_size_t
);
3289 memcpy(attrs
.switch_id
.id
, common
->switch_id
, attrs
.switch_id
.id_len
);
3290 devlink_port_attrs_set(dl_port
, &attrs
);
3292 ret
= devlink_port_register(common
->devlink
, dl_port
, port
->port_id
);
3294 dev_err(dev
, "devlink_port reg fail for port %d, ret:%d\n",
3295 port
->port_id
, ret
);
3299 devlink_register(common
->devlink
);
3303 for (i
= i
- 1; i
>= 1; i
--) {
3304 port
= am65_common_get_port(common
, i
);
3305 dl_port
= &port
->devlink_port
;
3307 devlink_port_unregister(dl_port
);
3310 devlink_free(common
->devlink
);
3314 static void am65_cpsw_unregister_devlink(struct am65_cpsw_common
*common
)
3316 struct devlink_port
*dl_port
;
3317 struct am65_cpsw_port
*port
;
3320 devlink_unregister(common
->devlink
);
3322 for (i
= 1; i
<= common
->port_num
; i
++) {
3323 port
= am65_common_get_port(common
, i
);
3324 dl_port
= &port
->devlink_port
;
3326 devlink_port_unregister(dl_port
);
3329 if (!AM65_CPSW_IS_CPSW2G(common
) &&
3330 IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV
))
3331 devlink_params_unregister(common
->devlink
,
3332 am65_cpsw_devlink_params
,
3333 ARRAY_SIZE(am65_cpsw_devlink_params
));
3335 devlink_free(common
->devlink
);
3338 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common
*common
)
3340 struct am65_cpsw_rx_chn
*rx_chan
= &common
->rx_chns
;
3341 struct am65_cpsw_tx_chn
*tx_chan
= common
->tx_chns
;
3342 struct device
*dev
= common
->dev
;
3343 struct am65_cpsw_port
*port
;
3346 /* init tx channels */
3347 ret
= am65_cpsw_nuss_init_tx_chns(common
);
3350 ret
= am65_cpsw_nuss_init_rx_chns(common
);
3354 /* The DMA Channels are not guaranteed to be in a clean state.
3355 * Reset and disable them to ensure that they are back to the
3356 * clean state and ready to be used.
3358 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
3359 k3_udma_glue_reset_tx_chn(tx_chan
[i
].tx_chn
, &tx_chan
[i
],
3360 am65_cpsw_nuss_tx_cleanup
);
3361 k3_udma_glue_disable_tx_chn(tx_chan
[i
].tx_chn
);
3364 for (i
= 0; i
< common
->rx_ch_num_flows
; i
++)
3365 k3_udma_glue_reset_rx_chn(rx_chan
->rx_chn
, i
,
3367 am65_cpsw_nuss_rx_cleanup
, !!i
);
3369 k3_udma_glue_disable_rx_chn(rx_chan
->rx_chn
);
3371 ret
= am65_cpsw_nuss_register_devlink(common
);
3375 for (i
= 0; i
< common
->port_num
; i
++) {
3376 port
= &common
->ports
[i
];
3381 SET_NETDEV_DEVLINK_PORT(port
->ndev
, &port
->devlink_port
);
3383 ret
= register_netdev(port
->ndev
);
3385 dev_err(dev
, "error registering slave net device%i %d\n",
3387 goto err_cleanup_ndev
;
3391 ret
= am65_cpsw_register_notifiers(common
);
3393 goto err_cleanup_ndev
;
3395 /* can't auto unregister ndev using devm_add_action() due to
3396 * devres release sequence in DD core for DMA
3402 am65_cpsw_nuss_cleanup_ndev(common
);
3403 am65_cpsw_unregister_devlink(common
);
3408 int am65_cpsw_nuss_update_tx_rx_chns(struct am65_cpsw_common
*common
,
3409 int num_tx
, int num_rx
)
3413 am65_cpsw_nuss_remove_tx_chns(common
);
3414 am65_cpsw_nuss_remove_rx_chns(common
);
3416 common
->tx_ch_num
= num_tx
;
3417 common
->rx_ch_num_flows
= num_rx
;
3418 ret
= am65_cpsw_nuss_init_tx_chns(common
);
3422 ret
= am65_cpsw_nuss_init_rx_chns(common
);
3427 struct am65_cpsw_soc_pdata
{
3431 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0
= {
3432 .quirks_dis
= AM65_CPSW_QUIRK_I2027_NO_TX_CSUM
,
3435 static const struct soc_device_attribute am65_cpsw_socinfo
[] = {
3436 { .family
= "AM65X",
3437 .revision
= "SR2.0",
3438 .data
= &am65x_soc_sr2_0
3443 static const struct am65_cpsw_pdata am65x_sr1_0
= {
3444 .quirks
= AM65_CPSW_QUIRK_I2027_NO_TX_CSUM
,
3445 .ale_dev_id
= "am65x-cpsw2g",
3446 .fdqring_mode
= K3_RINGACC_RING_MODE_MESSAGE
,
3449 static const struct am65_cpsw_pdata j721e_pdata
= {
3451 .ale_dev_id
= "am65x-cpsw2g",
3452 .fdqring_mode
= K3_RINGACC_RING_MODE_MESSAGE
,
3455 static const struct am65_cpsw_pdata am64x_cpswxg_pdata
= {
3456 .quirks
= AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ
,
3457 .ale_dev_id
= "am64-cpswxg",
3458 .fdqring_mode
= K3_RINGACC_RING_MODE_RING
,
3461 static const struct am65_cpsw_pdata j7200_cpswxg_pdata
= {
3463 .ale_dev_id
= "am64-cpswxg",
3464 .fdqring_mode
= K3_RINGACC_RING_MODE_RING
,
3465 .extra_modes
= BIT(PHY_INTERFACE_MODE_QSGMII
) | BIT(PHY_INTERFACE_MODE_SGMII
) |
3466 BIT(PHY_INTERFACE_MODE_USXGMII
),
3469 static const struct am65_cpsw_pdata j721e_cpswxg_pdata
= {
3471 .ale_dev_id
= "am64-cpswxg",
3472 .fdqring_mode
= K3_RINGACC_RING_MODE_MESSAGE
,
3473 .extra_modes
= BIT(PHY_INTERFACE_MODE_QSGMII
) | BIT(PHY_INTERFACE_MODE_SGMII
),
3476 static const struct am65_cpsw_pdata j784s4_cpswxg_pdata
= {
3478 .ale_dev_id
= "am64-cpswxg",
3479 .fdqring_mode
= K3_RINGACC_RING_MODE_MESSAGE
,
3480 .extra_modes
= BIT(PHY_INTERFACE_MODE_QSGMII
) | BIT(PHY_INTERFACE_MODE_SGMII
) |
3481 BIT(PHY_INTERFACE_MODE_USXGMII
),
3484 static const struct of_device_id am65_cpsw_nuss_of_mtable
[] = {
3485 { .compatible
= "ti,am654-cpsw-nuss", .data
= &am65x_sr1_0
},
3486 { .compatible
= "ti,j721e-cpsw-nuss", .data
= &j721e_pdata
},
3487 { .compatible
= "ti,am642-cpsw-nuss", .data
= &am64x_cpswxg_pdata
},
3488 { .compatible
= "ti,j7200-cpswxg-nuss", .data
= &j7200_cpswxg_pdata
},
3489 { .compatible
= "ti,j721e-cpswxg-nuss", .data
= &j721e_cpswxg_pdata
},
3490 { .compatible
= "ti,j784s4-cpswxg-nuss", .data
= &j784s4_cpswxg_pdata
},
3493 MODULE_DEVICE_TABLE(of
, am65_cpsw_nuss_of_mtable
);
3495 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common
*common
)
3497 const struct soc_device_attribute
*soc
;
3499 soc
= soc_device_match(am65_cpsw_socinfo
);
3500 if (soc
&& soc
->data
) {
3501 const struct am65_cpsw_soc_pdata
*socdata
= soc
->data
;
3503 /* disable quirks */
3504 common
->pdata
.quirks
&= ~socdata
->quirks_dis
;
3508 static int am65_cpsw_nuss_probe(struct platform_device
*pdev
)
3510 struct cpsw_ale_params ale_params
= { 0 };
3511 const struct of_device_id
*of_id
;
3512 struct device
*dev
= &pdev
->dev
;
3513 struct am65_cpsw_common
*common
;
3514 struct device_node
*node
;
3515 struct resource
*res
;
3521 common
= devm_kzalloc(dev
, sizeof(struct am65_cpsw_common
), GFP_KERNEL
);
3526 of_id
= of_match_device(am65_cpsw_nuss_of_mtable
, dev
);
3529 common
->pdata
= *(const struct am65_cpsw_pdata
*)of_id
->data
;
3531 am65_cpsw_nuss_apply_socinfo(common
);
3533 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cpsw_nuss");
3534 common
->ss_base
= devm_ioremap_resource(&pdev
->dev
, res
);
3535 if (IS_ERR(common
->ss_base
))
3536 return PTR_ERR(common
->ss_base
);
3537 common
->cpsw_base
= common
->ss_base
+ AM65_CPSW_CPSW_NU_BASE
;
3538 /* Use device's physical base address as switch id */
3539 id_temp
= cpu_to_be64(res
->start
);
3540 memcpy(common
->switch_id
, &id_temp
, sizeof(res
->start
));
3542 node
= of_get_child_by_name(dev
->of_node
, "ethernet-ports");
3545 common
->port_num
= of_get_child_count(node
);
3547 if (common
->port_num
< 1 || common
->port_num
> AM65_CPSW_MAX_PORTS
)
3550 common
->rx_flow_id_base
= -1;
3551 init_completion(&common
->tdown_complete
);
3552 common
->tx_ch_num
= AM65_CPSW_DEFAULT_TX_CHNS
;
3553 common
->rx_ch_num_flows
= AM65_CPSW_DEFAULT_RX_CHN_FLOWS
;
3554 common
->pf_p0_rx_ptype_rrobin
= false;
3555 common
->default_vlan
= 1;
3557 common
->ports
= devm_kcalloc(dev
, common
->port_num
,
3558 sizeof(*common
->ports
),
3563 clk
= devm_clk_get(dev
, "fck");
3565 return dev_err_probe(dev
, PTR_ERR(clk
), "getting fck clock\n");
3566 common
->bus_freq
= clk_get_rate(clk
);
3568 pm_runtime_enable(dev
);
3569 ret
= pm_runtime_resume_and_get(dev
);
3571 pm_runtime_disable(dev
);
3575 node
= of_get_child_by_name(dev
->of_node
, "mdio");
3577 dev_warn(dev
, "MDIO node not found\n");
3578 } else if (of_device_is_available(node
)) {
3579 struct platform_device
*mdio_pdev
;
3581 mdio_pdev
= of_platform_device_create(node
, NULL
, dev
);
3587 common
->mdio_dev
= &mdio_pdev
->dev
;
3591 am65_cpsw_nuss_get_ver(common
);
3593 ret
= am65_cpsw_nuss_init_host_p(common
);
3597 ret
= am65_cpsw_nuss_init_slave_ports(common
);
3601 /* init common data */
3602 ale_params
.dev
= dev
;
3603 ale_params
.ale_ageout
= AM65_CPSW_ALE_AGEOUT_DEFAULT
;
3604 ale_params
.ale_ports
= common
->port_num
+ 1;
3605 ale_params
.ale_regs
= common
->cpsw_base
+ AM65_CPSW_NU_ALE_BASE
;
3606 ale_params
.dev_id
= common
->pdata
.ale_dev_id
;
3607 ale_params
.bus_freq
= common
->bus_freq
;
3609 common
->ale
= cpsw_ale_create(&ale_params
);
3610 if (IS_ERR(common
->ale
)) {
3611 dev_err(dev
, "error initializing ale engine\n");
3612 ret
= PTR_ERR(common
->ale
);
3616 ale_entries
= common
->ale
->params
.ale_entries
;
3617 common
->ale_context
= devm_kzalloc(dev
,
3618 ale_entries
* ALE_ENTRY_WORDS
* sizeof(u32
),
3620 ret
= am65_cpsw_init_cpts(common
);
3625 for (i
= 0; i
< common
->port_num
; i
++)
3626 am65_cpsw_nuss_slave_disable_unused(&common
->ports
[i
]);
3628 dev_set_drvdata(dev
, common
);
3630 common
->is_emac_mode
= true;
3632 ret
= am65_cpsw_nuss_init_ndevs(common
);
3634 goto err_ndevs_clear
;
3636 ret
= am65_cpsw_nuss_register_ndevs(common
);
3638 goto err_ndevs_clear
;
3640 pm_runtime_put(dev
);
3644 am65_cpsw_nuss_cleanup_ndev(common
);
3645 am65_cpsw_nuss_phylink_cleanup(common
);
3646 am65_cpts_release(common
->cpts
);
3648 if (common
->mdio_dev
)
3649 of_platform_device_destroy(common
->mdio_dev
, NULL
);
3651 pm_runtime_put_sync(dev
);
3652 pm_runtime_disable(dev
);
3656 static void am65_cpsw_nuss_remove(struct platform_device
*pdev
)
3658 struct device
*dev
= &pdev
->dev
;
3659 struct am65_cpsw_common
*common
;
3662 common
= dev_get_drvdata(dev
);
3664 ret
= pm_runtime_resume_and_get(&pdev
->dev
);
3666 /* Note, if this error path is taken, we're leaking some
3669 dev_err(&pdev
->dev
, "Failed to resume device (%pe)\n",
3674 am65_cpsw_unregister_notifiers(common
);
3676 /* must unregister ndevs here because DD release_driver routine calls
3677 * dma_deconfigure(dev) before devres_release_all(dev)
3679 am65_cpsw_nuss_cleanup_ndev(common
);
3680 am65_cpsw_unregister_devlink(common
);
3681 am65_cpsw_nuss_phylink_cleanup(common
);
3682 am65_cpts_release(common
->cpts
);
3683 am65_cpsw_disable_serdes_phy(common
);
3685 if (common
->mdio_dev
)
3686 of_platform_device_destroy(common
->mdio_dev
, NULL
);
3688 pm_runtime_put_sync(&pdev
->dev
);
3689 pm_runtime_disable(&pdev
->dev
);
3692 static int am65_cpsw_nuss_suspend(struct device
*dev
)
3694 struct am65_cpsw_common
*common
= dev_get_drvdata(dev
);
3695 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
3696 struct am65_cpsw_port
*port
;
3697 struct net_device
*ndev
;
3700 cpsw_ale_dump(common
->ale
, common
->ale_context
);
3701 host_p
->vid_context
= readl(host_p
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3702 for (i
= 0; i
< common
->port_num
; i
++) {
3703 port
= &common
->ports
[i
];
3709 port
->vid_context
= readl(port
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3710 netif_device_detach(ndev
);
3711 if (netif_running(ndev
)) {
3713 ret
= am65_cpsw_nuss_ndo_slave_stop(ndev
);
3716 netdev_err(ndev
, "failed to stop: %d", ret
);
3722 am65_cpts_suspend(common
->cpts
);
3724 am65_cpsw_nuss_remove_rx_chns(common
);
3725 am65_cpsw_nuss_remove_tx_chns(common
);
3730 static int am65_cpsw_nuss_resume(struct device
*dev
)
3732 struct am65_cpsw_common
*common
= dev_get_drvdata(dev
);
3733 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
3734 struct am65_cpsw_port
*port
;
3735 struct net_device
*ndev
;
3738 ret
= am65_cpsw_nuss_init_tx_chns(common
);
3741 ret
= am65_cpsw_nuss_init_rx_chns(common
);
3745 /* If RX IRQ was disabled before suspend, keep it disabled */
3746 for (i
= 0; i
< common
->rx_ch_num_flows
; i
++) {
3747 if (common
->rx_chns
.flows
[i
].irq_disabled
)
3748 disable_irq(common
->rx_chns
.flows
[i
].irq
);
3751 am65_cpts_resume(common
->cpts
);
3753 for (i
= 0; i
< common
->port_num
; i
++) {
3754 port
= &common
->ports
[i
];
3760 if (netif_running(ndev
)) {
3762 ret
= am65_cpsw_nuss_ndo_slave_open(ndev
);
3765 netdev_err(ndev
, "failed to start: %d", ret
);
3770 netif_device_attach(ndev
);
3771 writel(port
->vid_context
, port
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3774 writel(host_p
->vid_context
, host_p
->port_base
+ AM65_CPSW_PORT_VLAN_REG_OFFSET
);
3775 cpsw_ale_restore(common
->ale
, common
->ale_context
);
3780 static const struct dev_pm_ops am65_cpsw_nuss_dev_pm_ops
= {
3781 SYSTEM_SLEEP_PM_OPS(am65_cpsw_nuss_suspend
, am65_cpsw_nuss_resume
)
3784 static struct platform_driver am65_cpsw_nuss_driver
= {
3786 .name
= AM65_CPSW_DRV_NAME
,
3787 .of_match_table
= am65_cpsw_nuss_of_mtable
,
3788 .pm
= &am65_cpsw_nuss_dev_pm_ops
,
3790 .probe
= am65_cpsw_nuss_probe
,
3791 .remove
= am65_cpsw_nuss_remove
,
3794 module_platform_driver(am65_cpsw_nuss_driver
);
3796 MODULE_LICENSE("GPL v2");
3797 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
3798 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");