1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
9 #include <linux/etherdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/kmemleak.h>
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/net_tstamp.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <linux/of_device.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regmap.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/sys_soc.h>
28 #include <linux/dma/ti-cppi5.h>
29 #include <linux/dma/k3-udma-glue.h>
33 #include "am65-cpsw-nuss.h"
34 #include "k3-cppi-desc-pool.h"
35 #include "am65-cpts.h"
37 #define AM65_CPSW_SS_BASE 0x0
38 #define AM65_CPSW_SGMII_BASE 0x100
39 #define AM65_CPSW_XGMII_BASE 0x2100
40 #define AM65_CPSW_CPSW_NU_BASE 0x20000
41 #define AM65_CPSW_NU_PORTS_BASE 0x1000
42 #define AM65_CPSW_NU_FRAM_BASE 0x12000
43 #define AM65_CPSW_NU_STATS_BASE 0x1a000
44 #define AM65_CPSW_NU_ALE_BASE 0x1e000
45 #define AM65_CPSW_NU_CPTS_BASE 0x1d000
47 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000
48 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
49 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
51 #define AM65_CPSW_MAX_PORTS 8
53 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
54 #define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
56 #define AM65_CPSW_REG_CTL 0x004
57 #define AM65_CPSW_REG_STAT_PORT_EN 0x014
58 #define AM65_CPSW_REG_PTYPE 0x018
60 #define AM65_CPSW_P0_REG_CTL 0x004
61 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
63 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
64 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
65 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
67 #define AM65_CPSW_PORTN_REG_SA_L 0x308
68 #define AM65_CPSW_PORTN_REG_SA_H 0x30c
69 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310
70 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
71 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
72 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
74 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
75 #define AM65_CPSW_CTL_P0_ENABLE BIT(2)
76 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
77 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
79 /* AM65_CPSW_P0_REG_CTL */
80 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
82 /* AM65_CPSW_PORT_REG_PRI_CTL */
83 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
85 /* AM65_CPSW_PN_TS_CTL register fields */
86 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
87 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
88 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
89 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
90 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
91 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
92 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
94 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
95 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
97 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
98 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
99 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
100 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
101 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
102 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
103 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
104 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
105 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
107 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
108 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
110 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
112 #define AM65_CPSW_TS_TX_ANX_ALL_EN \
113 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
114 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
115 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
117 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
118 /* Number of TX/RX descriptors */
119 #define AM65_CPSW_MAX_TX_DESC 500
120 #define AM65_CPSW_MAX_RX_DESC 500
122 #define AM65_CPSW_NAV_PS_DATA_SIZE 16
123 #define AM65_CPSW_NAV_SW_DATA_SIZE 16
125 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
126 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
127 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
129 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port
*slave
,
132 u32 mac_hi
= (dev_addr
[0] << 0) | (dev_addr
[1] << 8) |
133 (dev_addr
[2] << 16) | (dev_addr
[3] << 24);
134 u32 mac_lo
= (dev_addr
[4] << 0) | (dev_addr
[5] << 8);
136 writel(mac_hi
, slave
->port_base
+ AM65_CPSW_PORTN_REG_SA_H
);
137 writel(mac_lo
, slave
->port_base
+ AM65_CPSW_PORTN_REG_SA_L
);
140 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port
*port
)
142 cpsw_sl_reset(port
->slave
.mac_sl
, 100);
143 /* Max length register has to be restored after MAC SL reset */
144 writel(AM65_CPSW_MAX_PACKET_SIZE
,
145 port
->port_base
+ AM65_CPSW_PORT_REG_RX_MAXLEN
);
148 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common
*common
)
150 common
->nuss_ver
= readl(common
->ss_base
);
151 common
->cpsw_ver
= readl(common
->cpsw_base
);
152 dev_info(common
->dev
,
153 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
156 common
->port_num
+ 1,
157 common
->pdata
.quirks
);
160 void am65_cpsw_nuss_adjust_link(struct net_device
*ndev
)
162 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
163 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
164 struct phy_device
*phy
= port
->slave
.phy
;
171 mac_control
= CPSW_SL_CTL_GMII_EN
;
173 if (phy
->speed
== 1000)
174 mac_control
|= CPSW_SL_CTL_GIG
;
175 if (phy
->speed
== 10 && phy_interface_is_rgmii(phy
))
176 /* Can be used with in band mode only */
177 mac_control
|= CPSW_SL_CTL_EXT_EN
;
178 if (phy
->speed
== 100 && phy
->interface
== PHY_INTERFACE_MODE_RMII
)
179 mac_control
|= CPSW_SL_CTL_IFCTL_A
;
181 mac_control
|= CPSW_SL_CTL_FULLDUPLEX
;
183 /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
185 /* rx_pause/tx_pause */
186 if (port
->slave
.rx_pause
)
187 mac_control
|= CPSW_SL_CTL_RX_FLOW_EN
;
189 if (port
->slave
.tx_pause
)
190 mac_control
|= CPSW_SL_CTL_TX_FLOW_EN
;
192 cpsw_sl_ctl_set(port
->slave
.mac_sl
, mac_control
);
194 /* enable forwarding */
195 cpsw_ale_control_set(common
->ale
, port
->port_id
,
196 ALE_PORT_STATE
, ALE_PORT_STATE_FORWARD
);
198 am65_cpsw_qos_link_up(ndev
, phy
->speed
);
199 netif_tx_wake_all_queues(ndev
);
203 /* disable forwarding */
204 cpsw_ale_control_set(common
->ale
, port
->port_id
,
205 ALE_PORT_STATE
, ALE_PORT_STATE_DISABLE
);
207 cpsw_sl_ctl_set(port
->slave
.mac_sl
, CPSW_SL_CTL_CMD_IDLE
);
209 tmo
= cpsw_sl_wait_for_idle(port
->slave
.mac_sl
, 100);
210 dev_dbg(common
->dev
, "donw msc_sl %08x tmo %d\n",
211 cpsw_sl_reg_read(port
->slave
.mac_sl
, CPSW_SL_MACSTATUS
),
214 cpsw_sl_ctl_reset(port
->slave
.mac_sl
);
216 am65_cpsw_qos_link_down(ndev
);
217 netif_tx_stop_all_queues(ndev
);
220 phy_print_status(phy
);
223 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device
*ndev
,
224 __be16 proto
, u16 vid
)
226 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
227 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
228 u32 port_mask
, unreg_mcast
= 0;
231 if (!netif_running(ndev
) || !vid
)
234 ret
= pm_runtime_get_sync(common
->dev
);
236 pm_runtime_put_noidle(common
->dev
);
240 port_mask
= BIT(port
->port_id
) | ALE_PORT_HOST
;
242 unreg_mcast
= port_mask
;
243 dev_info(common
->dev
, "Adding vlan %d to vlan filter\n", vid
);
244 ret
= cpsw_ale_vlan_add_modify(common
->ale
, vid
, port_mask
,
245 unreg_mcast
, port_mask
, 0);
247 pm_runtime_put(common
->dev
);
251 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device
*ndev
,
252 __be16 proto
, u16 vid
)
254 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
255 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
258 if (!netif_running(ndev
) || !vid
)
261 ret
= pm_runtime_get_sync(common
->dev
);
263 pm_runtime_put_noidle(common
->dev
);
267 dev_info(common
->dev
, "Removing vlan %d from vlan filter\n", vid
);
268 ret
= cpsw_ale_del_vlan(common
->ale
, vid
,
269 BIT(port
->port_id
) | ALE_PORT_HOST
);
271 pm_runtime_put(common
->dev
);
275 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port
*port
,
278 struct am65_cpsw_common
*common
= port
->common
;
281 /* Enable promiscuous mode */
282 cpsw_ale_control_set(common
->ale
, port
->port_id
,
283 ALE_PORT_MACONLY_CAF
, 1);
284 dev_dbg(common
->dev
, "promisc enabled\n");
286 /* Disable promiscuous mode */
287 cpsw_ale_control_set(common
->ale
, port
->port_id
,
288 ALE_PORT_MACONLY_CAF
, 0);
289 dev_dbg(common
->dev
, "promisc disabled\n");
293 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device
*ndev
)
295 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
296 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
300 promisc
= !!(ndev
->flags
& IFF_PROMISC
);
301 am65_cpsw_slave_set_promisc(port
, promisc
);
306 /* Restore allmulti on vlans if necessary */
307 cpsw_ale_set_allmulti(common
->ale
,
308 ndev
->flags
& IFF_ALLMULTI
, port
->port_id
);
310 port_mask
= ALE_PORT_HOST
;
311 /* Clear all mcast from ALE */
312 cpsw_ale_flush_multicast(common
->ale
, port_mask
, -1);
314 if (!netdev_mc_empty(ndev
)) {
315 struct netdev_hw_addr
*ha
;
317 /* program multicast address list into ALE register */
318 netdev_for_each_mc_addr(ha
, ndev
) {
319 cpsw_ale_add_mcast(common
->ale
, ha
->addr
,
325 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device
*ndev
,
326 unsigned int txqueue
)
328 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
329 struct am65_cpsw_tx_chn
*tx_chn
;
330 struct netdev_queue
*netif_txq
;
331 unsigned long trans_start
;
333 netif_txq
= netdev_get_tx_queue(ndev
, txqueue
);
334 tx_chn
= &common
->tx_chns
[txqueue
];
335 trans_start
= netif_txq
->trans_start
;
337 netdev_err(ndev
, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
339 netif_tx_queue_stopped(netif_txq
),
340 jiffies_to_msecs(jiffies
- trans_start
),
341 dql_avail(&netif_txq
->dql
),
342 k3_cppi_desc_pool_avail(tx_chn
->desc_pool
));
344 if (netif_tx_queue_stopped(netif_txq
)) {
345 /* try recover if stopped by us */
346 txq_trans_update(netif_txq
);
347 netif_tx_wake_queue(netif_txq
);
351 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common
*common
,
354 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
355 struct cppi5_host_desc_t
*desc_rx
;
356 struct device
*dev
= common
->dev
;
357 u32 pkt_len
= skb_tailroom(skb
);
362 desc_rx
= k3_cppi_desc_pool_alloc(rx_chn
->desc_pool
);
364 dev_err(dev
, "Failed to allocate RXFDQ descriptor\n");
367 desc_dma
= k3_cppi_desc_pool_virt2dma(rx_chn
->desc_pool
, desc_rx
);
369 buf_dma
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_FROM_DEVICE
);
370 if (unlikely(dma_mapping_error(dev
, buf_dma
))) {
371 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
372 dev_err(dev
, "Failed to map rx skb buffer\n");
376 cppi5_hdesc_init(desc_rx
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
377 AM65_CPSW_NAV_PS_DATA_SIZE
);
378 cppi5_hdesc_attach_buf(desc_rx
, buf_dma
, skb_tailroom(skb
), buf_dma
, skb_tailroom(skb
));
379 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
380 *((void **)swdata
) = skb
;
382 return k3_udma_glue_push_rx_chn(rx_chn
->rx_chn
, 0, desc_rx
, desc_dma
);
385 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common
*common
)
387 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
390 /* P0 set Receive Priority Type */
391 val
= readl(host_p
->port_base
+ AM65_CPSW_PORT_REG_PRI_CTL
);
393 if (common
->pf_p0_rx_ptype_rrobin
) {
394 val
|= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN
;
395 /* Enet Ports fifos works in fixed priority mode only, so
396 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
400 val
&= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN
;
401 /* restore P0_Rx_Pri_Map */
402 pri_map
= 0x76543210;
405 writel(pri_map
, host_p
->port_base
+ AM65_CPSW_PORT_REG_RX_PRI_MAP
);
406 writel(val
, host_p
->port_base
+ AM65_CPSW_PORT_REG_PRI_CTL
);
409 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common
*common
,
410 netdev_features_t features
)
412 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
413 int port_idx
, i
, ret
;
417 if (common
->usage_count
)
420 /* Control register */
421 writel(AM65_CPSW_CTL_P0_ENABLE
| AM65_CPSW_CTL_P0_TX_CRC_REMOVE
|
422 AM65_CPSW_CTL_VLAN_AWARE
| AM65_CPSW_CTL_P0_RX_PAD
,
423 common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
424 /* Max length register */
425 writel(AM65_CPSW_MAX_PACKET_SIZE
,
426 host_p
->port_base
+ AM65_CPSW_PORT_REG_RX_MAXLEN
);
427 /* set base flow_id */
428 writel(common
->rx_flow_id_base
,
429 host_p
->port_base
+ AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET
);
430 /* en tx crc offload */
431 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN
, host_p
->port_base
+ AM65_CPSW_P0_REG_CTL
);
433 am65_cpsw_nuss_set_p0_ptype(common
);
435 /* enable statistic */
436 val
= BIT(HOST_PORT_NUM
);
437 for (port_idx
= 0; port_idx
< common
->port_num
; port_idx
++) {
438 struct am65_cpsw_port
*port
= &common
->ports
[port_idx
];
441 val
|= BIT(port
->port_id
);
443 writel(val
, common
->cpsw_base
+ AM65_CPSW_REG_STAT_PORT_EN
);
445 /* disable priority elevation */
446 writel(0, common
->cpsw_base
+ AM65_CPSW_REG_PTYPE
);
448 cpsw_ale_start(common
->ale
);
450 /* limit to one RX flow only */
451 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
452 ALE_DEFAULT_THREAD_ID
, 0);
453 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
454 ALE_DEFAULT_THREAD_ENABLE
, 1);
455 if (AM65_CPSW_IS_CPSW2G(common
))
456 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
457 ALE_PORT_NOLEARN
, 1);
458 /* switch to vlan unaware mode */
459 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
, ALE_VLAN_AWARE
, 1);
460 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
461 ALE_PORT_STATE
, ALE_PORT_STATE_FORWARD
);
463 /* default vlan cfg: create mask based on enabled ports */
464 port_mask
= GENMASK(common
->port_num
, 0) &
465 ~common
->disabled_ports_mask
;
467 cpsw_ale_add_vlan(common
->ale
, 0, port_mask
,
468 port_mask
, port_mask
,
469 port_mask
& ~ALE_PORT_HOST
);
471 for (i
= 0; i
< common
->rx_chns
.descs_num
; i
++) {
472 skb
= __netdev_alloc_skb_ip_align(NULL
,
473 AM65_CPSW_MAX_PACKET_SIZE
,
476 dev_err(common
->dev
, "cannot allocate skb\n");
480 ret
= am65_cpsw_nuss_rx_push(common
, skb
);
483 "cannot submit skb to channel rx, error %d\n",
488 kmemleak_not_leak(skb
);
490 k3_udma_glue_enable_rx_chn(common
->rx_chns
.rx_chn
);
492 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
493 ret
= k3_udma_glue_enable_tx_chn(common
->tx_chns
[i
].tx_chn
);
496 napi_enable(&common
->tx_chns
[i
].napi_tx
);
499 napi_enable(&common
->napi_rx
);
501 dev_dbg(common
->dev
, "cpsw_nuss started\n");
505 static void am65_cpsw_nuss_tx_cleanup(void *data
, dma_addr_t desc_dma
);
506 static void am65_cpsw_nuss_rx_cleanup(void *data
, dma_addr_t desc_dma
);
508 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common
*common
)
512 if (common
->usage_count
!= 1)
515 cpsw_ale_control_set(common
->ale
, HOST_PORT_NUM
,
516 ALE_PORT_STATE
, ALE_PORT_STATE_DISABLE
);
518 /* shutdown tx channels */
519 atomic_set(&common
->tdown_cnt
, common
->tx_ch_num
);
520 /* ensure new tdown_cnt value is visible */
521 smp_mb__after_atomic();
522 reinit_completion(&common
->tdown_complete
);
524 for (i
= 0; i
< common
->tx_ch_num
; i
++)
525 k3_udma_glue_tdown_tx_chn(common
->tx_chns
[i
].tx_chn
, false);
527 i
= wait_for_completion_timeout(&common
->tdown_complete
,
528 msecs_to_jiffies(1000));
530 dev_err(common
->dev
, "tx timeout\n");
531 for (i
= 0; i
< common
->tx_ch_num
; i
++)
532 napi_disable(&common
->tx_chns
[i
].napi_tx
);
534 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
535 k3_udma_glue_reset_tx_chn(common
->tx_chns
[i
].tx_chn
,
537 am65_cpsw_nuss_tx_cleanup
);
538 k3_udma_glue_disable_tx_chn(common
->tx_chns
[i
].tx_chn
);
541 k3_udma_glue_tdown_rx_chn(common
->rx_chns
.rx_chn
, true);
542 napi_disable(&common
->napi_rx
);
544 for (i
= 0; i
< AM65_CPSW_MAX_RX_FLOWS
; i
++)
545 k3_udma_glue_reset_rx_chn(common
->rx_chns
.rx_chn
, i
,
547 am65_cpsw_nuss_rx_cleanup
, !!i
);
549 k3_udma_glue_disable_rx_chn(common
->rx_chns
.rx_chn
);
551 cpsw_ale_stop(common
->ale
);
553 writel(0, common
->cpsw_base
+ AM65_CPSW_REG_CTL
);
554 writel(0, common
->cpsw_base
+ AM65_CPSW_REG_STAT_PORT_EN
);
556 dev_dbg(common
->dev
, "cpsw_nuss stopped\n");
560 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device
*ndev
)
562 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
563 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
567 phy_stop(port
->slave
.phy
);
569 netif_tx_stop_all_queues(ndev
);
571 if (port
->slave
.phy
) {
572 phy_disconnect(port
->slave
.phy
);
573 port
->slave
.phy
= NULL
;
576 ret
= am65_cpsw_nuss_common_stop(common
);
580 common
->usage_count
--;
581 pm_runtime_put(common
->dev
);
585 static int cpsw_restore_vlans(struct net_device
*vdev
, int vid
, void *arg
)
587 struct am65_cpsw_port
*port
= arg
;
592 return am65_cpsw_nuss_ndo_slave_add_vid(port
->ndev
, 0, vid
);
595 static int am65_cpsw_nuss_ndo_slave_open(struct net_device
*ndev
)
597 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
598 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
602 ret
= pm_runtime_get_sync(common
->dev
);
604 pm_runtime_put_noidle(common
->dev
);
608 /* Notify the stack of the actual queue counts. */
609 ret
= netif_set_real_num_tx_queues(ndev
, common
->tx_ch_num
);
611 dev_err(common
->dev
, "cannot set real number of tx queues\n");
615 ret
= netif_set_real_num_rx_queues(ndev
, AM65_CPSW_MAX_RX_QUEUES
);
617 dev_err(common
->dev
, "cannot set real number of rx queues\n");
621 for (i
= 0; i
< common
->tx_ch_num
; i
++)
622 netdev_tx_reset_queue(netdev_get_tx_queue(ndev
, i
));
624 ret
= am65_cpsw_nuss_common_open(common
, ndev
->features
);
628 common
->usage_count
++;
630 am65_cpsw_port_set_sl_mac(port
, ndev
->dev_addr
);
632 if (port
->slave
.mac_only
) {
633 /* enable mac-only mode on port */
634 cpsw_ale_control_set(common
->ale
, port
->port_id
,
635 ALE_PORT_MACONLY
, 1);
636 cpsw_ale_control_set(common
->ale
, port
->port_id
,
637 ALE_PORT_NOLEARN
, 1);
640 port_mask
= BIT(port
->port_id
) | ALE_PORT_HOST
;
641 cpsw_ale_add_ucast(common
->ale
, ndev
->dev_addr
,
642 HOST_PORT_NUM
, ALE_SECURE
, 0);
643 cpsw_ale_add_mcast(common
->ale
, ndev
->broadcast
,
644 port_mask
, 0, 0, ALE_MCAST_FWD_2
);
646 /* mac_sl should be configured via phy-link interface */
647 am65_cpsw_sl_ctl_reset(port
);
649 ret
= phy_set_mode_ext(port
->slave
.ifphy
, PHY_MODE_ETHERNET
,
654 if (port
->slave
.phy_node
) {
655 port
->slave
.phy
= of_phy_connect(ndev
,
656 port
->slave
.phy_node
,
657 &am65_cpsw_nuss_adjust_link
,
658 0, port
->slave
.phy_if
);
659 if (!port
->slave
.phy
) {
660 dev_err(common
->dev
, "phy %pOF not found on slave %d\n",
661 port
->slave
.phy_node
,
668 /* restore vlan configurations */
669 vlan_for_each(ndev
, cpsw_restore_vlans
, port
);
671 phy_attached_info(port
->slave
.phy
);
672 phy_start(port
->slave
.phy
);
677 am65_cpsw_nuss_ndo_slave_stop(ndev
);
681 static void am65_cpsw_nuss_rx_cleanup(void *data
, dma_addr_t desc_dma
)
683 struct am65_cpsw_rx_chn
*rx_chn
= data
;
684 struct cppi5_host_desc_t
*desc_rx
;
690 desc_rx
= k3_cppi_desc_pool_dma2virt(rx_chn
->desc_pool
, desc_dma
);
691 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
693 cppi5_hdesc_get_obuf(desc_rx
, &buf_dma
, &buf_dma_len
);
695 dma_unmap_single(rx_chn
->dev
, buf_dma
, buf_dma_len
, DMA_FROM_DEVICE
);
696 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
698 dev_kfree_skb_any(skb
);
701 static void am65_cpsw_nuss_rx_ts(struct sk_buff
*skb
, u32
*psdata
)
703 struct skb_shared_hwtstamps
*ssh
;
706 ns
= ((u64
)psdata
[1] << 32) | psdata
[0];
708 ssh
= skb_hwtstamps(skb
);
709 memset(ssh
, 0, sizeof(*ssh
));
710 ssh
->hwtstamp
= ns_to_ktime(ns
);
713 /* RX psdata[2] word format - checksum information */
714 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
715 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
716 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
717 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
718 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
719 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
721 static void am65_cpsw_nuss_rx_csum(struct sk_buff
*skb
, u32 csum_info
)
723 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
724 * csum information provides in psdata[2] word:
725 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
726 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
727 * bits - indicates IPv4/IPv6 packet
728 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
729 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
730 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
732 skb_checksum_none_assert(skb
);
734 if (unlikely(!(skb
->dev
->features
& NETIF_F_RXCSUM
)))
737 if ((csum_info
& (AM65_CPSW_RX_PSD_IPV6_VALID
|
738 AM65_CPSW_RX_PSD_IPV4_VALID
)) &&
739 !(csum_info
& AM65_CPSW_RX_PSD_CSUM_ERR
)) {
740 /* csum for fragmented packets is unsupported */
741 if (!(csum_info
& AM65_CPSW_RX_PSD_IS_FRAGMENT
))
742 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
746 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common
*common
,
749 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
750 u32 buf_dma_len
, pkt_len
, port_id
= 0, csum_info
;
751 struct am65_cpsw_ndev_priv
*ndev_priv
;
752 struct am65_cpsw_ndev_stats
*stats
;
753 struct cppi5_host_desc_t
*desc_rx
;
754 struct device
*dev
= common
->dev
;
755 struct sk_buff
*skb
, *new_skb
;
756 dma_addr_t desc_dma
, buf_dma
;
757 struct am65_cpsw_port
*port
;
758 struct net_device
*ndev
;
763 ret
= k3_udma_glue_pop_rx_chn(rx_chn
->rx_chn
, flow_idx
, &desc_dma
);
766 dev_err(dev
, "RX: pop chn fail %d\n", ret
);
770 if (cppi5_desc_is_tdcm(desc_dma
)) {
771 dev_dbg(dev
, "%s RX tdown flow: %u\n", __func__
, flow_idx
);
775 desc_rx
= k3_cppi_desc_pool_dma2virt(rx_chn
->desc_pool
, desc_dma
);
776 dev_dbg(dev
, "%s flow_idx: %u desc %pad\n",
777 __func__
, flow_idx
, &desc_dma
);
779 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
781 cppi5_hdesc_get_obuf(desc_rx
, &buf_dma
, &buf_dma_len
);
782 pkt_len
= cppi5_hdesc_get_pktlen(desc_rx
);
783 cppi5_desc_get_tags_ids(&desc_rx
->hdr
, &port_id
, NULL
);
784 dev_dbg(dev
, "%s rx port_id:%d\n", __func__
, port_id
);
785 port
= am65_common_get_port(common
, port_id
);
789 psdata
= cppi5_hdesc_get_psdata(desc_rx
);
790 /* add RX timestamp */
791 if (port
->rx_ts_enabled
)
792 am65_cpsw_nuss_rx_ts(skb
, psdata
);
793 csum_info
= psdata
[2];
794 dev_dbg(dev
, "%s rx csum_info:%#x\n", __func__
, csum_info
);
796 dma_unmap_single(dev
, buf_dma
, buf_dma_len
, DMA_FROM_DEVICE
);
798 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
800 new_skb
= netdev_alloc_skb_ip_align(ndev
, AM65_CPSW_MAX_PACKET_SIZE
);
802 skb_put(skb
, pkt_len
);
803 skb
->protocol
= eth_type_trans(skb
, ndev
);
804 am65_cpsw_nuss_rx_csum(skb
, csum_info
);
805 napi_gro_receive(&common
->napi_rx
, skb
);
807 ndev_priv
= netdev_priv(ndev
);
808 stats
= this_cpu_ptr(ndev_priv
->stats
);
810 u64_stats_update_begin(&stats
->syncp
);
812 stats
->rx_bytes
+= pkt_len
;
813 u64_stats_update_end(&stats
->syncp
);
814 kmemleak_not_leak(new_skb
);
816 ndev
->stats
.rx_dropped
++;
820 if (netif_dormant(ndev
)) {
821 dev_kfree_skb_any(new_skb
);
822 ndev
->stats
.rx_dropped
++;
826 ret
= am65_cpsw_nuss_rx_push(common
, new_skb
);
827 if (WARN_ON(ret
< 0)) {
828 dev_kfree_skb_any(new_skb
);
829 ndev
->stats
.rx_errors
++;
830 ndev
->stats
.rx_dropped
++;
836 static int am65_cpsw_nuss_rx_poll(struct napi_struct
*napi_rx
, int budget
)
838 struct am65_cpsw_common
*common
= am65_cpsw_napi_to_common(napi_rx
);
839 int flow
= AM65_CPSW_MAX_RX_FLOWS
;
843 /* process every flow */
845 cur_budget
= budget
- num_rx
;
847 while (cur_budget
--) {
848 ret
= am65_cpsw_nuss_rx_packets(common
, flow
);
854 if (num_rx
>= budget
)
858 dev_dbg(common
->dev
, "%s num_rx:%d %d\n", __func__
, num_rx
, budget
);
860 if (num_rx
< budget
&& napi_complete_done(napi_rx
, num_rx
))
861 enable_irq(common
->rx_chns
.irq
);
866 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn
*tx_chn
,
868 struct cppi5_host_desc_t
*desc
)
870 struct cppi5_host_desc_t
*first_desc
, *next_desc
;
871 dma_addr_t buf_dma
, next_desc_dma
;
875 next_desc
= first_desc
;
877 cppi5_hdesc_get_obuf(first_desc
, &buf_dma
, &buf_dma_len
);
879 dma_unmap_single(dev
, buf_dma
, buf_dma_len
,
882 next_desc_dma
= cppi5_hdesc_get_next_hbdesc(first_desc
);
883 while (next_desc_dma
) {
884 next_desc
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
,
886 cppi5_hdesc_get_obuf(next_desc
, &buf_dma
, &buf_dma_len
);
888 dma_unmap_page(dev
, buf_dma
, buf_dma_len
,
891 next_desc_dma
= cppi5_hdesc_get_next_hbdesc(next_desc
);
893 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, next_desc
);
896 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, first_desc
);
899 static void am65_cpsw_nuss_tx_cleanup(void *data
, dma_addr_t desc_dma
)
901 struct am65_cpsw_tx_chn
*tx_chn
= data
;
902 struct cppi5_host_desc_t
*desc_tx
;
906 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
, desc_dma
);
907 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
909 am65_cpsw_nuss_xmit_free(tx_chn
, tx_chn
->common
->dev
, desc_tx
);
911 dev_kfree_skb_any(skb
);
914 static struct sk_buff
*
915 am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn
*tx_chn
,
918 struct am65_cpsw_ndev_priv
*ndev_priv
;
919 struct am65_cpsw_ndev_stats
*stats
;
920 struct cppi5_host_desc_t
*desc_tx
;
921 struct net_device
*ndev
;
925 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
,
927 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
929 am65_cpsw_nuss_xmit_free(tx_chn
, tx_chn
->common
->dev
, desc_tx
);
933 am65_cpts_tx_timestamp(tx_chn
->common
->cpts
, skb
);
935 ndev_priv
= netdev_priv(ndev
);
936 stats
= this_cpu_ptr(ndev_priv
->stats
);
937 u64_stats_update_begin(&stats
->syncp
);
939 stats
->tx_bytes
+= skb
->len
;
940 u64_stats_update_end(&stats
->syncp
);
945 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn
*tx_chn
, struct net_device
*ndev
,
946 struct netdev_queue
*netif_txq
)
948 if (netif_tx_queue_stopped(netif_txq
)) {
949 /* Check whether the queue is stopped due to stalled
950 * tx dma, if the queue is stopped then wake the queue
951 * as we have free desc for tx
953 __netif_tx_lock(netif_txq
, smp_processor_id());
954 if (netif_running(ndev
) &&
955 (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) >= MAX_SKB_FRAGS
))
956 netif_tx_wake_queue(netif_txq
);
958 __netif_tx_unlock(netif_txq
);
962 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common
*common
,
963 int chn
, unsigned int budget
)
965 struct device
*dev
= common
->dev
;
966 struct am65_cpsw_tx_chn
*tx_chn
;
967 struct netdev_queue
*netif_txq
;
968 unsigned int total_bytes
= 0;
969 struct net_device
*ndev
;
974 tx_chn
= &common
->tx_chns
[chn
];
977 spin_lock(&tx_chn
->lock
);
978 res
= k3_udma_glue_pop_tx_chn(tx_chn
->tx_chn
, &desc_dma
);
979 spin_unlock(&tx_chn
->lock
);
983 if (cppi5_desc_is_tdcm(desc_dma
)) {
984 if (atomic_dec_and_test(&common
->tdown_cnt
))
985 complete(&common
->tdown_complete
);
989 skb
= am65_cpsw_nuss_tx_compl_packet(tx_chn
, desc_dma
);
990 total_bytes
= skb
->len
;
992 napi_consume_skb(skb
, budget
);
995 netif_txq
= netdev_get_tx_queue(ndev
, chn
);
997 netdev_tx_completed_queue(netif_txq
, num_tx
, total_bytes
);
999 am65_cpsw_nuss_tx_wake(tx_chn
, ndev
, netif_txq
);
1002 dev_dbg(dev
, "%s:%u pkt:%d\n", __func__
, chn
, num_tx
);
1007 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common
*common
,
1008 int chn
, unsigned int budget
)
1010 struct device
*dev
= common
->dev
;
1011 struct am65_cpsw_tx_chn
*tx_chn
;
1012 struct netdev_queue
*netif_txq
;
1013 unsigned int total_bytes
= 0;
1014 struct net_device
*ndev
;
1015 struct sk_buff
*skb
;
1016 dma_addr_t desc_dma
;
1017 int res
, num_tx
= 0;
1019 tx_chn
= &common
->tx_chns
[chn
];
1022 res
= k3_udma_glue_pop_tx_chn(tx_chn
->tx_chn
, &desc_dma
);
1023 if (res
== -ENODATA
)
1026 if (cppi5_desc_is_tdcm(desc_dma
)) {
1027 if (atomic_dec_and_test(&common
->tdown_cnt
))
1028 complete(&common
->tdown_complete
);
1032 skb
= am65_cpsw_nuss_tx_compl_packet(tx_chn
, desc_dma
);
1035 total_bytes
+= skb
->len
;
1036 napi_consume_skb(skb
, budget
);
1043 netif_txq
= netdev_get_tx_queue(ndev
, chn
);
1045 netdev_tx_completed_queue(netif_txq
, num_tx
, total_bytes
);
1047 am65_cpsw_nuss_tx_wake(tx_chn
, ndev
, netif_txq
);
1049 dev_dbg(dev
, "%s:%u pkt:%d\n", __func__
, chn
, num_tx
);
1054 static int am65_cpsw_nuss_tx_poll(struct napi_struct
*napi_tx
, int budget
)
1056 struct am65_cpsw_tx_chn
*tx_chn
= am65_cpsw_napi_to_tx_chn(napi_tx
);
1059 if (AM65_CPSW_IS_CPSW2G(tx_chn
->common
))
1060 num_tx
= am65_cpsw_nuss_tx_compl_packets_2g(tx_chn
->common
, tx_chn
->id
, budget
);
1062 num_tx
= am65_cpsw_nuss_tx_compl_packets(tx_chn
->common
, tx_chn
->id
, budget
);
1064 num_tx
= min(num_tx
, budget
);
1065 if (num_tx
< budget
) {
1066 napi_complete(napi_tx
);
1067 enable_irq(tx_chn
->irq
);
1073 static irqreturn_t
am65_cpsw_nuss_rx_irq(int irq
, void *dev_id
)
1075 struct am65_cpsw_common
*common
= dev_id
;
1077 disable_irq_nosync(irq
);
1078 napi_schedule(&common
->napi_rx
);
1083 static irqreturn_t
am65_cpsw_nuss_tx_irq(int irq
, void *dev_id
)
1085 struct am65_cpsw_tx_chn
*tx_chn
= dev_id
;
1087 disable_irq_nosync(irq
);
1088 napi_schedule(&tx_chn
->napi_tx
);
1093 static netdev_tx_t
am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff
*skb
,
1094 struct net_device
*ndev
)
1096 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1097 struct cppi5_host_desc_t
*first_desc
, *next_desc
, *cur_desc
;
1098 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1099 struct device
*dev
= common
->dev
;
1100 struct am65_cpsw_tx_chn
*tx_chn
;
1101 struct netdev_queue
*netif_txq
;
1102 dma_addr_t desc_dma
, buf_dma
;
1108 /* padding enabled in hw */
1109 pkt_len
= skb_headlen(skb
);
1111 /* SKB TX timestamp */
1112 if (port
->tx_ts_enabled
)
1113 am65_cpts_prep_tx_timestamp(common
->cpts
, skb
);
1115 q_idx
= skb_get_queue_mapping(skb
);
1116 dev_dbg(dev
, "%s skb_queue:%d\n", __func__
, q_idx
);
1118 tx_chn
= &common
->tx_chns
[q_idx
];
1119 netif_txq
= netdev_get_tx_queue(ndev
, q_idx
);
1121 /* Map the linear buffer */
1122 buf_dma
= dma_map_single(dev
, skb
->data
, pkt_len
,
1124 if (unlikely(dma_mapping_error(dev
, buf_dma
))) {
1125 dev_err(dev
, "Failed to map tx skb buffer\n");
1126 ndev
->stats
.tx_errors
++;
1130 first_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
1132 dev_dbg(dev
, "Failed to allocate descriptor\n");
1133 dma_unmap_single(dev
, buf_dma
, pkt_len
, DMA_TO_DEVICE
);
1137 cppi5_hdesc_init(first_desc
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
1138 AM65_CPSW_NAV_PS_DATA_SIZE
);
1139 cppi5_desc_set_pktids(&first_desc
->hdr
, 0, 0x3FFF);
1140 cppi5_hdesc_set_pkttype(first_desc
, 0x7);
1141 cppi5_desc_set_tags_ids(&first_desc
->hdr
, 0, port
->port_id
);
1143 cppi5_hdesc_attach_buf(first_desc
, buf_dma
, pkt_len
, buf_dma
, pkt_len
);
1144 swdata
= cppi5_hdesc_get_swdata(first_desc
);
1146 psdata
= cppi5_hdesc_get_psdata(first_desc
);
1148 /* HW csum offload if enabled */
1150 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
1151 unsigned int cs_start
, cs_offset
;
1153 cs_start
= skb_transport_offset(skb
);
1154 cs_offset
= cs_start
+ skb
->csum_offset
;
1155 /* HW numerates bytes starting from 1 */
1156 psdata
[2] = ((cs_offset
+ 1) << 24) |
1157 ((cs_start
+ 1) << 16) | (skb
->len
- cs_start
);
1158 dev_dbg(dev
, "%s tx psdata:%#x\n", __func__
, psdata
[2]);
1161 if (!skb_is_nonlinear(skb
))
1164 dev_dbg(dev
, "fragmented SKB\n");
1166 /* Handle the case where skb is fragmented in pages */
1167 cur_desc
= first_desc
;
1168 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1169 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1170 u32 frag_size
= skb_frag_size(frag
);
1172 next_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
1174 dev_err(dev
, "Failed to allocate descriptor\n");
1175 goto busy_free_descs
;
1178 buf_dma
= skb_frag_dma_map(dev
, frag
, 0, frag_size
,
1180 if (unlikely(dma_mapping_error(dev
, buf_dma
))) {
1181 dev_err(dev
, "Failed to map tx skb page\n");
1182 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, next_desc
);
1183 ndev
->stats
.tx_errors
++;
1184 goto err_free_descs
;
1187 cppi5_hdesc_reset_hbdesc(next_desc
);
1188 cppi5_hdesc_attach_buf(next_desc
,
1189 buf_dma
, frag_size
, buf_dma
, frag_size
);
1191 desc_dma
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
,
1193 cppi5_hdesc_link_hbdesc(cur_desc
, desc_dma
);
1195 pkt_len
+= frag_size
;
1196 cur_desc
= next_desc
;
1198 WARN_ON(pkt_len
!= skb
->len
);
1201 skb_tx_timestamp(skb
);
1203 /* report bql before sending packet */
1204 netdev_tx_sent_queue(netif_txq
, pkt_len
);
1206 cppi5_hdesc_set_pktlen(first_desc
, pkt_len
);
1207 desc_dma
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
, first_desc
);
1208 if (AM65_CPSW_IS_CPSW2G(common
)) {
1209 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, first_desc
, desc_dma
);
1211 spin_lock_bh(&tx_chn
->lock
);
1212 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, first_desc
, desc_dma
);
1213 spin_unlock_bh(&tx_chn
->lock
);
1216 dev_err(dev
, "can't push desc %d\n", ret
);
1218 netdev_tx_completed_queue(netif_txq
, 1, pkt_len
);
1219 ndev
->stats
.tx_errors
++;
1220 goto err_free_descs
;
1223 if (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) < MAX_SKB_FRAGS
) {
1224 netif_tx_stop_queue(netif_txq
);
1225 /* Barrier, so that stop_queue visible to other cpus */
1226 smp_mb__after_atomic();
1227 dev_dbg(dev
, "netif_tx_stop_queue %d\n", q_idx
);
1229 /* re-check for smp */
1230 if (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) >=
1232 netif_tx_wake_queue(netif_txq
);
1233 dev_dbg(dev
, "netif_tx_wake_queue %d\n", q_idx
);
1237 return NETDEV_TX_OK
;
1240 am65_cpsw_nuss_xmit_free(tx_chn
, dev
, first_desc
);
1242 ndev
->stats
.tx_dropped
++;
1243 dev_kfree_skb_any(skb
);
1244 return NETDEV_TX_OK
;
1247 am65_cpsw_nuss_xmit_free(tx_chn
, dev
, first_desc
);
1249 netif_tx_stop_queue(netif_txq
);
1250 return NETDEV_TX_BUSY
;
1253 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device
*ndev
,
1256 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1257 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1258 struct sockaddr
*sockaddr
= (struct sockaddr
*)addr
;
1261 ret
= eth_prepare_mac_addr_change(ndev
, addr
);
1265 ret
= pm_runtime_get_sync(common
->dev
);
1267 pm_runtime_put_noidle(common
->dev
);
1271 cpsw_ale_del_ucast(common
->ale
, ndev
->dev_addr
,
1272 HOST_PORT_NUM
, 0, 0);
1273 cpsw_ale_add_ucast(common
->ale
, sockaddr
->sa_data
,
1274 HOST_PORT_NUM
, ALE_SECURE
, 0);
1276 am65_cpsw_port_set_sl_mac(port
, addr
);
1277 eth_commit_mac_addr_change(ndev
, sockaddr
);
1279 pm_runtime_put(common
->dev
);
1284 static int am65_cpsw_nuss_hwtstamp_set(struct net_device
*ndev
,
1287 struct am65_cpsw_common
*common
= am65_ndev_to_common(ndev
);
1288 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1289 u32 ts_ctrl
, seq_id
, ts_ctrl_ltype2
, ts_vlan_ltype
;
1290 struct hwtstamp_config cfg
;
1292 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS
))
1295 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1298 /* TX HW timestamp */
1299 switch (cfg
.tx_type
) {
1300 case HWTSTAMP_TX_OFF
:
1301 case HWTSTAMP_TX_ON
:
1307 switch (cfg
.rx_filter
) {
1308 case HWTSTAMP_FILTER_NONE
:
1309 port
->rx_ts_enabled
= false;
1311 case HWTSTAMP_FILTER_ALL
:
1312 case HWTSTAMP_FILTER_SOME
:
1313 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1314 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1315 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1316 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1317 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1318 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1319 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1320 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1321 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1322 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1323 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1324 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1325 case HWTSTAMP_FILTER_NTP_ALL
:
1326 port
->rx_ts_enabled
= true;
1327 cfg
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1333 port
->tx_ts_enabled
= (cfg
.tx_type
== HWTSTAMP_TX_ON
);
1335 /* cfg TX timestamp */
1336 seq_id
= (AM65_CPSW_TS_SEQ_ID_OFFSET
<<
1337 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT
) | ETH_P_1588
;
1339 ts_vlan_ltype
= ETH_P_8021Q
;
1341 ts_ctrl_ltype2
= ETH_P_1588
|
1342 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107
|
1343 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129
|
1344 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130
|
1345 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131
|
1346 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132
|
1347 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319
|
1348 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320
|
1349 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO
;
1351 ts_ctrl
= AM65_CPSW_TS_EVENT_MSG_TYPE_BITS
<<
1352 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT
;
1354 if (port
->tx_ts_enabled
)
1355 ts_ctrl
|= AM65_CPSW_TS_TX_ANX_ALL_EN
|
1356 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN
;
1358 writel(seq_id
, port
->port_base
+ AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG
);
1359 writel(ts_vlan_ltype
, port
->port_base
+
1360 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG
);
1361 writel(ts_ctrl_ltype2
, port
->port_base
+
1362 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2
);
1363 writel(ts_ctrl
, port
->port_base
+ AM65_CPSW_PORTN_REG_TS_CTL
);
1365 /* en/dis RX timestamp */
1366 am65_cpts_rx_enable(common
->cpts
, port
->rx_ts_enabled
);
1368 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1371 static int am65_cpsw_nuss_hwtstamp_get(struct net_device
*ndev
,
1374 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1375 struct hwtstamp_config cfg
;
1377 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS
))
1381 cfg
.tx_type
= port
->tx_ts_enabled
?
1382 HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
1383 cfg
.rx_filter
= port
->rx_ts_enabled
?
1384 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
;
1386 return copy_to_user(ifr
->ifr_data
, &cfg
, sizeof(cfg
)) ? -EFAULT
: 0;
1389 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device
*ndev
,
1390 struct ifreq
*req
, int cmd
)
1392 struct am65_cpsw_port
*port
= am65_ndev_to_port(ndev
);
1394 if (!netif_running(ndev
))
1399 return am65_cpsw_nuss_hwtstamp_set(ndev
, req
);
1401 return am65_cpsw_nuss_hwtstamp_get(ndev
, req
);
1404 if (!port
->slave
.phy
)
1407 return phy_mii_ioctl(port
->slave
.phy
, req
, cmd
);
1410 static void am65_cpsw_nuss_ndo_get_stats(struct net_device
*dev
,
1411 struct rtnl_link_stats64
*stats
)
1413 struct am65_cpsw_ndev_priv
*ndev_priv
= netdev_priv(dev
);
1417 for_each_possible_cpu(cpu
) {
1418 struct am65_cpsw_ndev_stats
*cpu_stats
;
1424 cpu_stats
= per_cpu_ptr(ndev_priv
->stats
, cpu
);
1426 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
1427 rx_packets
= cpu_stats
->rx_packets
;
1428 rx_bytes
= cpu_stats
->rx_bytes
;
1429 tx_packets
= cpu_stats
->tx_packets
;
1430 tx_bytes
= cpu_stats
->tx_bytes
;
1431 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
1433 stats
->rx_packets
+= rx_packets
;
1434 stats
->rx_bytes
+= rx_bytes
;
1435 stats
->tx_packets
+= tx_packets
;
1436 stats
->tx_bytes
+= tx_bytes
;
1439 stats
->rx_errors
= dev
->stats
.rx_errors
;
1440 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
1441 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
1444 static const struct net_device_ops am65_cpsw_nuss_netdev_ops
= {
1445 .ndo_open
= am65_cpsw_nuss_ndo_slave_open
,
1446 .ndo_stop
= am65_cpsw_nuss_ndo_slave_stop
,
1447 .ndo_start_xmit
= am65_cpsw_nuss_ndo_slave_xmit
,
1448 .ndo_set_rx_mode
= am65_cpsw_nuss_ndo_slave_set_rx_mode
,
1449 .ndo_get_stats64
= am65_cpsw_nuss_ndo_get_stats
,
1450 .ndo_validate_addr
= eth_validate_addr
,
1451 .ndo_set_mac_address
= am65_cpsw_nuss_ndo_slave_set_mac_address
,
1452 .ndo_tx_timeout
= am65_cpsw_nuss_ndo_host_tx_timeout
,
1453 .ndo_vlan_rx_add_vid
= am65_cpsw_nuss_ndo_slave_add_vid
,
1454 .ndo_vlan_rx_kill_vid
= am65_cpsw_nuss_ndo_slave_kill_vid
,
1455 .ndo_do_ioctl
= am65_cpsw_nuss_ndo_slave_ioctl
,
1456 .ndo_setup_tc
= am65_cpsw_qos_ndo_setup_tc
,
1459 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port
*port
)
1461 struct am65_cpsw_common
*common
= port
->common
;
1463 if (!port
->disabled
)
1466 cpsw_ale_control_set(common
->ale
, port
->port_id
,
1467 ALE_PORT_STATE
, ALE_PORT_STATE_DISABLE
);
1469 cpsw_sl_reset(port
->slave
.mac_sl
, 100);
1470 cpsw_sl_ctl_reset(port
->slave
.mac_sl
);
1473 static void am65_cpsw_nuss_free_tx_chns(void *data
)
1475 struct am65_cpsw_common
*common
= data
;
1478 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
1479 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
1481 if (!IS_ERR_OR_NULL(tx_chn
->tx_chn
))
1482 k3_udma_glue_release_tx_chn(tx_chn
->tx_chn
);
1484 if (!IS_ERR_OR_NULL(tx_chn
->desc_pool
))
1485 k3_cppi_desc_pool_destroy(tx_chn
->desc_pool
);
1487 memset(tx_chn
, 0, sizeof(*tx_chn
));
1491 void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common
*common
)
1493 struct device
*dev
= common
->dev
;
1496 devm_remove_action(dev
, am65_cpsw_nuss_free_tx_chns
, common
);
1498 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
1499 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
1502 devm_free_irq(dev
, tx_chn
->irq
, tx_chn
);
1504 netif_napi_del(&tx_chn
->napi_tx
);
1506 if (!IS_ERR_OR_NULL(tx_chn
->tx_chn
))
1507 k3_udma_glue_release_tx_chn(tx_chn
->tx_chn
);
1509 if (!IS_ERR_OR_NULL(tx_chn
->desc_pool
))
1510 k3_cppi_desc_pool_destroy(tx_chn
->desc_pool
);
1512 memset(tx_chn
, 0, sizeof(*tx_chn
));
1516 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common
*common
)
1518 u32 max_desc_num
= ALIGN(AM65_CPSW_MAX_TX_DESC
, MAX_SKB_FRAGS
);
1519 struct k3_udma_glue_tx_channel_cfg tx_cfg
= { 0 };
1520 struct device
*dev
= common
->dev
;
1521 struct k3_ring_cfg ring_cfg
= {
1522 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
1523 .mode
= K3_RINGACC_RING_MODE_RING
,
1529 hdesc_size
= cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE
,
1530 AM65_CPSW_NAV_SW_DATA_SIZE
);
1532 tx_cfg
.swdata_size
= AM65_CPSW_NAV_SW_DATA_SIZE
;
1533 tx_cfg
.tx_cfg
= ring_cfg
;
1534 tx_cfg
.txcq_cfg
= ring_cfg
;
1535 tx_cfg
.tx_cfg
.size
= max_desc_num
;
1536 tx_cfg
.txcq_cfg
.size
= max_desc_num
;
1538 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
1539 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
1541 snprintf(tx_chn
->tx_chn_name
,
1542 sizeof(tx_chn
->tx_chn_name
), "tx%d", i
);
1544 spin_lock_init(&tx_chn
->lock
);
1545 tx_chn
->common
= common
;
1547 tx_chn
->descs_num
= max_desc_num
;
1549 k3_cppi_desc_pool_create_name(dev
,
1552 tx_chn
->tx_chn_name
);
1553 if (IS_ERR(tx_chn
->desc_pool
)) {
1554 ret
= PTR_ERR(tx_chn
->desc_pool
);
1555 dev_err(dev
, "Failed to create poll %d\n", ret
);
1560 k3_udma_glue_request_tx_chn(dev
,
1561 tx_chn
->tx_chn_name
,
1563 if (IS_ERR(tx_chn
->tx_chn
)) {
1564 ret
= dev_err_probe(dev
, PTR_ERR(tx_chn
->tx_chn
),
1565 "Failed to request tx dma channel\n");
1569 tx_chn
->irq
= k3_udma_glue_tx_get_irq(tx_chn
->tx_chn
);
1570 if (tx_chn
->irq
<= 0) {
1571 dev_err(dev
, "Failed to get tx dma irq %d\n",
1576 snprintf(tx_chn
->tx_chn_name
,
1577 sizeof(tx_chn
->tx_chn_name
), "%s-tx%d",
1578 dev_name(dev
), tx_chn
->id
);
1582 i
= devm_add_action(dev
, am65_cpsw_nuss_free_tx_chns
, common
);
1584 dev_err(dev
, "Failed to add free_tx_chns action %d\n", i
);
1591 static void am65_cpsw_nuss_free_rx_chns(void *data
)
1593 struct am65_cpsw_common
*common
= data
;
1594 struct am65_cpsw_rx_chn
*rx_chn
;
1596 rx_chn
= &common
->rx_chns
;
1598 if (!IS_ERR_OR_NULL(rx_chn
->rx_chn
))
1599 k3_udma_glue_release_rx_chn(rx_chn
->rx_chn
);
1601 if (!IS_ERR_OR_NULL(rx_chn
->desc_pool
))
1602 k3_cppi_desc_pool_destroy(rx_chn
->desc_pool
);
1605 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common
*common
)
1607 struct am65_cpsw_rx_chn
*rx_chn
= &common
->rx_chns
;
1608 struct k3_udma_glue_rx_channel_cfg rx_cfg
= { 0 };
1609 u32 max_desc_num
= AM65_CPSW_MAX_RX_DESC
;
1610 struct device
*dev
= common
->dev
;
1615 hdesc_size
= cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE
,
1616 AM65_CPSW_NAV_SW_DATA_SIZE
);
1618 rx_cfg
.swdata_size
= AM65_CPSW_NAV_SW_DATA_SIZE
;
1619 rx_cfg
.flow_id_num
= AM65_CPSW_MAX_RX_FLOWS
;
1620 rx_cfg
.flow_id_base
= common
->rx_flow_id_base
;
1622 /* init all flows */
1624 rx_chn
->descs_num
= max_desc_num
;
1625 rx_chn
->desc_pool
= k3_cppi_desc_pool_create_name(dev
,
1628 if (IS_ERR(rx_chn
->desc_pool
)) {
1629 ret
= PTR_ERR(rx_chn
->desc_pool
);
1630 dev_err(dev
, "Failed to create rx poll %d\n", ret
);
1634 rx_chn
->rx_chn
= k3_udma_glue_request_rx_chn(dev
, "rx", &rx_cfg
);
1635 if (IS_ERR(rx_chn
->rx_chn
)) {
1636 ret
= dev_err_probe(dev
, PTR_ERR(rx_chn
->rx_chn
),
1637 "Failed to request rx dma channel\n");
1641 common
->rx_flow_id_base
=
1642 k3_udma_glue_rx_get_flow_id_base(rx_chn
->rx_chn
);
1643 dev_info(dev
, "set new flow-id-base %u\n", common
->rx_flow_id_base
);
1645 fdqring_id
= K3_RINGACC_RING_ID_ANY
;
1646 for (i
= 0; i
< rx_cfg
.flow_id_num
; i
++) {
1647 struct k3_ring_cfg rxring_cfg
= {
1648 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
1649 .mode
= K3_RINGACC_RING_MODE_RING
,
1652 struct k3_ring_cfg fdqring_cfg
= {
1653 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
1654 .flags
= K3_RINGACC_RING_SHARED
,
1656 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg
= {
1657 .rx_cfg
= rxring_cfg
,
1658 .rxfdq_cfg
= fdqring_cfg
,
1659 .ring_rxq_id
= K3_RINGACC_RING_ID_ANY
,
1661 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG
,
1664 rx_flow_cfg
.ring_rxfdq0_id
= fdqring_id
;
1665 rx_flow_cfg
.rx_cfg
.size
= max_desc_num
;
1666 rx_flow_cfg
.rxfdq_cfg
.size
= max_desc_num
;
1667 rx_flow_cfg
.rxfdq_cfg
.mode
= common
->pdata
.fdqring_mode
;
1669 ret
= k3_udma_glue_rx_flow_init(rx_chn
->rx_chn
,
1672 dev_err(dev
, "Failed to init rx flow%d %d\n", i
, ret
);
1677 k3_udma_glue_rx_flow_get_fdq_id(rx_chn
->rx_chn
,
1680 rx_chn
->irq
= k3_udma_glue_rx_get_irq(rx_chn
->rx_chn
, i
);
1682 if (rx_chn
->irq
<= 0) {
1683 dev_err(dev
, "Failed to get rx dma irq %d\n",
1691 i
= devm_add_action(dev
, am65_cpsw_nuss_free_rx_chns
, common
);
1693 dev_err(dev
, "Failed to add free_rx_chns action %d\n", i
);
1700 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common
*common
)
1702 struct am65_cpsw_host
*host_p
= am65_common_get_host(common
);
1704 host_p
->common
= common
;
1705 host_p
->port_base
= common
->cpsw_base
+ AM65_CPSW_NU_PORTS_BASE
;
1706 host_p
->stat_base
= common
->cpsw_base
+ AM65_CPSW_NU_STATS_BASE
;
1711 static int am65_cpsw_am654_get_efuse_macid(struct device_node
*of_node
,
1712 int slave
, u8
*mac_addr
)
1714 u32 mac_lo
, mac_hi
, offset
;
1715 struct regmap
*syscon
;
1718 syscon
= syscon_regmap_lookup_by_phandle(of_node
, "ti,syscon-efuse");
1719 if (IS_ERR(syscon
)) {
1720 if (PTR_ERR(syscon
) == -ENODEV
)
1722 return PTR_ERR(syscon
);
1725 ret
= of_property_read_u32_index(of_node
, "ti,syscon-efuse", 1,
1730 regmap_read(syscon
, offset
, &mac_lo
);
1731 regmap_read(syscon
, offset
+ 4, &mac_hi
);
1733 mac_addr
[0] = (mac_hi
>> 8) & 0xff;
1734 mac_addr
[1] = mac_hi
& 0xff;
1735 mac_addr
[2] = (mac_lo
>> 24) & 0xff;
1736 mac_addr
[3] = (mac_lo
>> 16) & 0xff;
1737 mac_addr
[4] = (mac_lo
>> 8) & 0xff;
1738 mac_addr
[5] = mac_lo
& 0xff;
1743 static int am65_cpsw_init_cpts(struct am65_cpsw_common
*common
)
1745 struct device
*dev
= common
->dev
;
1746 struct device_node
*node
;
1747 struct am65_cpts
*cpts
;
1748 void __iomem
*reg_base
;
1750 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS
))
1753 node
= of_get_child_by_name(dev
->of_node
, "cpts");
1755 dev_err(dev
, "%s cpts not found\n", __func__
);
1759 reg_base
= common
->cpsw_base
+ AM65_CPSW_NU_CPTS_BASE
;
1760 cpts
= am65_cpts_create(dev
, reg_base
, node
);
1762 int ret
= PTR_ERR(cpts
);
1764 if (ret
== -EOPNOTSUPP
) {
1765 dev_info(dev
, "cpts disabled\n");
1769 dev_err(dev
, "cpts create err %d\n", ret
);
1772 common
->cpts
= cpts
;
1773 /* Forbid PM runtime if CPTS is running.
1774 * K3 CPSWxG modules may completely lose context during ON->OFF
1775 * transitions depending on integration.
1776 * AM65x/J721E MCU CPSW2G: false
1777 * J721E MAIN_CPSW9G: true
1779 pm_runtime_forbid(dev
);
1784 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common
*common
)
1786 struct device_node
*node
, *port_np
;
1787 struct device
*dev
= common
->dev
;
1790 node
= of_get_child_by_name(dev
->of_node
, "ethernet-ports");
1794 for_each_child_of_node(node
, port_np
) {
1795 struct am65_cpsw_port
*port
;
1796 const void *mac_addr
;
1799 /* it is not a slave port node, continue */
1800 if (strcmp(port_np
->name
, "port"))
1803 ret
= of_property_read_u32(port_np
, "reg", &port_id
);
1805 dev_err(dev
, "%pOF error reading port_id %d\n",
1810 if (!port_id
|| port_id
> common
->port_num
) {
1811 dev_err(dev
, "%pOF has invalid port_id %u %s\n",
1812 port_np
, port_id
, port_np
->name
);
1816 port
= am65_common_get_port(common
, port_id
);
1817 port
->port_id
= port_id
;
1818 port
->common
= common
;
1819 port
->port_base
= common
->cpsw_base
+ AM65_CPSW_NU_PORTS_BASE
+
1820 AM65_CPSW_NU_PORTS_OFFSET
* (port_id
);
1821 port
->stat_base
= common
->cpsw_base
+ AM65_CPSW_NU_STATS_BASE
+
1822 (AM65_CPSW_NU_STATS_PORT_OFFSET
* port_id
);
1823 port
->name
= of_get_property(port_np
, "label", NULL
);
1824 port
->fetch_ram_base
=
1825 common
->cpsw_base
+ AM65_CPSW_NU_FRAM_BASE
+
1826 (AM65_CPSW_NU_FRAM_PORT_OFFSET
* (port_id
- 1));
1828 port
->slave
.mac_sl
= cpsw_sl_get("am65", dev
, port
->port_base
);
1829 if (IS_ERR(port
->slave
.mac_sl
))
1830 return PTR_ERR(port
->slave
.mac_sl
);
1832 port
->disabled
= !of_device_is_available(port_np
);
1833 if (port
->disabled
) {
1834 common
->disabled_ports_mask
|= BIT(port
->port_id
);
1838 port
->slave
.ifphy
= devm_of_phy_get(dev
, port_np
, NULL
);
1839 if (IS_ERR(port
->slave
.ifphy
)) {
1840 ret
= PTR_ERR(port
->slave
.ifphy
);
1841 dev_err(dev
, "%pOF error retrieving port phy: %d\n",
1846 port
->slave
.mac_only
=
1847 of_property_read_bool(port_np
, "ti,mac-only");
1849 /* get phy/link info */
1850 if (of_phy_is_fixed_link(port_np
)) {
1851 ret
= of_phy_register_fixed_link(port_np
);
1853 return dev_err_probe(dev
, ret
,
1854 "failed to register fixed-link phy %pOF\n",
1856 port
->slave
.phy_node
= of_node_get(port_np
);
1858 port
->slave
.phy_node
=
1859 of_parse_phandle(port_np
, "phy-handle", 0);
1862 if (!port
->slave
.phy_node
) {
1864 "slave[%d] no phy found\n", port_id
);
1868 ret
= of_get_phy_mode(port_np
, &port
->slave
.phy_if
);
1870 dev_err(dev
, "%pOF read phy-mode err %d\n",
1875 mac_addr
= of_get_mac_address(port_np
);
1876 if (!IS_ERR(mac_addr
)) {
1877 ether_addr_copy(port
->slave
.mac_addr
, mac_addr
);
1878 } else if (am65_cpsw_am654_get_efuse_macid(port_np
,
1880 port
->slave
.mac_addr
) ||
1881 !is_valid_ether_addr(port
->slave
.mac_addr
)) {
1882 random_ether_addr(port
->slave
.mac_addr
);
1883 dev_err(dev
, "Use random MAC address\n");
1888 /* is there at least one ext.port */
1889 if (!(~common
->disabled_ports_mask
& GENMASK(common
->port_num
, 1))) {
1890 dev_err(dev
, "No Ext. port are available\n");
1897 static void am65_cpsw_pcpu_stats_free(void *data
)
1899 struct am65_cpsw_ndev_stats __percpu
*stats
= data
;
1905 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common
*common
, u32 port_idx
)
1907 struct am65_cpsw_ndev_priv
*ndev_priv
;
1908 struct device
*dev
= common
->dev
;
1909 struct am65_cpsw_port
*port
;
1912 port
= &common
->ports
[port_idx
];
1918 port
->ndev
= devm_alloc_etherdev_mqs(common
->dev
,
1919 sizeof(struct am65_cpsw_ndev_priv
),
1920 AM65_CPSW_MAX_TX_QUEUES
,
1921 AM65_CPSW_MAX_RX_QUEUES
);
1923 dev_err(dev
, "error allocating slave net_device %u\n",
1928 ndev_priv
= netdev_priv(port
->ndev
);
1929 ndev_priv
->port
= port
;
1930 ndev_priv
->msg_enable
= AM65_CPSW_DEBUG
;
1931 SET_NETDEV_DEV(port
->ndev
, dev
);
1933 ether_addr_copy(port
->ndev
->dev_addr
, port
->slave
.mac_addr
);
1935 port
->ndev
->min_mtu
= AM65_CPSW_MIN_PACKET_SIZE
;
1936 port
->ndev
->max_mtu
= AM65_CPSW_MAX_PACKET_SIZE
;
1937 port
->ndev
->hw_features
= NETIF_F_SG
|
1941 port
->ndev
->features
= port
->ndev
->hw_features
|
1942 NETIF_F_HW_VLAN_CTAG_FILTER
;
1943 port
->ndev
->vlan_features
|= NETIF_F_SG
;
1944 port
->ndev
->netdev_ops
= &am65_cpsw_nuss_netdev_ops
;
1945 port
->ndev
->ethtool_ops
= &am65_cpsw_ethtool_ops_slave
;
1947 /* Disable TX checksum offload by default due to HW bug */
1948 if (common
->pdata
.quirks
& AM65_CPSW_QUIRK_I2027_NO_TX_CSUM
)
1949 port
->ndev
->features
&= ~NETIF_F_HW_CSUM
;
1951 ndev_priv
->stats
= netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats
);
1952 if (!ndev_priv
->stats
)
1955 ret
= devm_add_action_or_reset(dev
, am65_cpsw_pcpu_stats_free
,
1958 dev_err(dev
, "failed to add percpu stat free action %d\n", ret
);
1960 if (!common
->dma_ndev
)
1961 common
->dma_ndev
= port
->ndev
;
1966 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common
*common
)
1971 for (i
= 0; i
< common
->port_num
; i
++) {
1972 ret
= am65_cpsw_nuss_init_port_ndev(common
, i
);
1977 netif_napi_add(common
->dma_ndev
, &common
->napi_rx
,
1978 am65_cpsw_nuss_rx_poll
, NAPI_POLL_WEIGHT
);
1983 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common
*common
)
1985 struct device
*dev
= common
->dev
;
1988 for (i
= 0; i
< common
->tx_ch_num
; i
++) {
1989 struct am65_cpsw_tx_chn
*tx_chn
= &common
->tx_chns
[i
];
1991 netif_tx_napi_add(common
->dma_ndev
, &tx_chn
->napi_tx
,
1992 am65_cpsw_nuss_tx_poll
, NAPI_POLL_WEIGHT
);
1994 ret
= devm_request_irq(dev
, tx_chn
->irq
,
1995 am65_cpsw_nuss_tx_irq
,
1997 tx_chn
->tx_chn_name
, tx_chn
);
1999 dev_err(dev
, "failure requesting tx%u irq %u, %d\n",
2000 tx_chn
->id
, tx_chn
->irq
, ret
);
2009 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common
*common
)
2011 struct am65_cpsw_port
*port
;
2014 for (i
= 0; i
< common
->port_num
; i
++) {
2015 port
= &common
->ports
[i
];
2017 unregister_netdev(port
->ndev
);
2021 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common
*common
)
2023 struct device
*dev
= common
->dev
;
2024 struct am65_cpsw_port
*port
;
2027 ret
= am65_cpsw_nuss_ndev_add_tx_napi(common
);
2031 ret
= devm_request_irq(dev
, common
->rx_chns
.irq
,
2032 am65_cpsw_nuss_rx_irq
,
2033 IRQF_TRIGGER_HIGH
, dev_name(dev
), common
);
2035 dev_err(dev
, "failure requesting rx irq %u, %d\n",
2036 common
->rx_chns
.irq
, ret
);
2040 for (i
= 0; i
< common
->port_num
; i
++) {
2041 port
= &common
->ports
[i
];
2046 ret
= register_netdev(port
->ndev
);
2048 dev_err(dev
, "error registering slave net device%i %d\n",
2050 goto err_cleanup_ndev
;
2055 /* can't auto unregister ndev using devm_add_action() due to
2056 * devres release sequence in DD core for DMA
2061 am65_cpsw_nuss_cleanup_ndev(common
);
2065 int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common
*common
, int num_tx
)
2069 common
->tx_ch_num
= num_tx
;
2070 ret
= am65_cpsw_nuss_init_tx_chns(common
);
2074 return am65_cpsw_nuss_ndev_add_tx_napi(common
);
2077 struct am65_cpsw_soc_pdata
{
2081 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0
= {
2082 .quirks_dis
= AM65_CPSW_QUIRK_I2027_NO_TX_CSUM
,
2085 static const struct soc_device_attribute am65_cpsw_socinfo
[] = {
2086 { .family
= "AM65X",
2087 .revision
= "SR2.0",
2088 .data
= &am65x_soc_sr2_0
2093 static const struct am65_cpsw_pdata am65x_sr1_0
= {
2094 .quirks
= AM65_CPSW_QUIRK_I2027_NO_TX_CSUM
,
2095 .ale_dev_id
= "am65x-cpsw2g",
2096 .fdqring_mode
= K3_RINGACC_RING_MODE_MESSAGE
,
2099 static const struct am65_cpsw_pdata j721e_pdata
= {
2101 .ale_dev_id
= "am65x-cpsw2g",
2102 .fdqring_mode
= K3_RINGACC_RING_MODE_MESSAGE
,
2105 static const struct of_device_id am65_cpsw_nuss_of_mtable
[] = {
2106 { .compatible
= "ti,am654-cpsw-nuss", .data
= &am65x_sr1_0
},
2107 { .compatible
= "ti,j721e-cpsw-nuss", .data
= &j721e_pdata
},
2110 MODULE_DEVICE_TABLE(of
, am65_cpsw_nuss_of_mtable
);
2112 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common
*common
)
2114 const struct soc_device_attribute
*soc
;
2116 soc
= soc_device_match(am65_cpsw_socinfo
);
2117 if (soc
&& soc
->data
) {
2118 const struct am65_cpsw_soc_pdata
*socdata
= soc
->data
;
2120 /* disable quirks */
2121 common
->pdata
.quirks
&= ~socdata
->quirks_dis
;
2125 static int am65_cpsw_nuss_probe(struct platform_device
*pdev
)
2127 struct cpsw_ale_params ale_params
= { 0 };
2128 const struct of_device_id
*of_id
;
2129 struct device
*dev
= &pdev
->dev
;
2130 struct am65_cpsw_common
*common
;
2131 struct device_node
*node
;
2132 struct resource
*res
;
2136 common
= devm_kzalloc(dev
, sizeof(struct am65_cpsw_common
), GFP_KERNEL
);
2141 of_id
= of_match_device(am65_cpsw_nuss_of_mtable
, dev
);
2144 common
->pdata
= *(const struct am65_cpsw_pdata
*)of_id
->data
;
2146 am65_cpsw_nuss_apply_socinfo(common
);
2148 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cpsw_nuss");
2149 common
->ss_base
= devm_ioremap_resource(&pdev
->dev
, res
);
2150 if (IS_ERR(common
->ss_base
))
2151 return PTR_ERR(common
->ss_base
);
2152 common
->cpsw_base
= common
->ss_base
+ AM65_CPSW_CPSW_NU_BASE
;
2154 node
= of_get_child_by_name(dev
->of_node
, "ethernet-ports");
2157 common
->port_num
= of_get_child_count(node
);
2158 if (common
->port_num
< 1 || common
->port_num
> AM65_CPSW_MAX_PORTS
)
2162 common
->rx_flow_id_base
= -1;
2163 init_completion(&common
->tdown_complete
);
2164 common
->tx_ch_num
= 1;
2165 common
->pf_p0_rx_ptype_rrobin
= false;
2167 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(48));
2169 dev_err(dev
, "error setting dma mask: %d\n", ret
);
2173 common
->ports
= devm_kcalloc(dev
, common
->port_num
,
2174 sizeof(*common
->ports
),
2179 clk
= devm_clk_get(dev
, "fck");
2181 return dev_err_probe(dev
, PTR_ERR(clk
), "getting fck clock\n");
2182 common
->bus_freq
= clk_get_rate(clk
);
2184 pm_runtime_enable(dev
);
2185 ret
= pm_runtime_get_sync(dev
);
2187 pm_runtime_put_noidle(dev
);
2188 pm_runtime_disable(dev
);
2192 node
= of_get_child_by_name(dev
->of_node
, "mdio");
2194 dev_warn(dev
, "MDIO node not found\n");
2195 } else if (of_device_is_available(node
)) {
2196 struct platform_device
*mdio_pdev
;
2198 mdio_pdev
= of_platform_device_create(node
, NULL
, dev
);
2204 common
->mdio_dev
= &mdio_pdev
->dev
;
2208 am65_cpsw_nuss_get_ver(common
);
2210 /* init tx channels */
2211 ret
= am65_cpsw_nuss_init_tx_chns(common
);
2214 ret
= am65_cpsw_nuss_init_rx_chns(common
);
2218 ret
= am65_cpsw_nuss_init_host_p(common
);
2222 ret
= am65_cpsw_nuss_init_slave_ports(common
);
2226 /* init common data */
2227 ale_params
.dev
= dev
;
2228 ale_params
.ale_ageout
= AM65_CPSW_ALE_AGEOUT_DEFAULT
;
2229 ale_params
.ale_ports
= common
->port_num
+ 1;
2230 ale_params
.ale_regs
= common
->cpsw_base
+ AM65_CPSW_NU_ALE_BASE
;
2231 ale_params
.dev_id
= common
->pdata
.ale_dev_id
;
2232 ale_params
.bus_freq
= common
->bus_freq
;
2234 common
->ale
= cpsw_ale_create(&ale_params
);
2235 if (IS_ERR(common
->ale
)) {
2236 dev_err(dev
, "error initializing ale engine\n");
2237 ret
= PTR_ERR(common
->ale
);
2241 ret
= am65_cpsw_init_cpts(common
);
2246 for (i
= 0; i
< common
->port_num
; i
++)
2247 am65_cpsw_nuss_slave_disable_unused(&common
->ports
[i
]);
2249 dev_set_drvdata(dev
, common
);
2251 ret
= am65_cpsw_nuss_init_ndevs(common
);
2255 ret
= am65_cpsw_nuss_register_ndevs(common
);
2259 pm_runtime_put(dev
);
2263 of_platform_device_destroy(common
->mdio_dev
, NULL
);
2265 pm_runtime_put_sync(dev
);
2266 pm_runtime_disable(dev
);
2270 static int am65_cpsw_nuss_remove(struct platform_device
*pdev
)
2272 struct device
*dev
= &pdev
->dev
;
2273 struct am65_cpsw_common
*common
;
2276 common
= dev_get_drvdata(dev
);
2278 ret
= pm_runtime_get_sync(&pdev
->dev
);
2280 pm_runtime_put_noidle(&pdev
->dev
);
2284 /* must unregister ndevs here because DD release_driver routine calls
2285 * dma_deconfigure(dev) before devres_release_all(dev)
2287 am65_cpsw_nuss_cleanup_ndev(common
);
2289 of_platform_device_destroy(common
->mdio_dev
, NULL
);
2291 pm_runtime_put_sync(&pdev
->dev
);
2292 pm_runtime_disable(&pdev
->dev
);
2296 static struct platform_driver am65_cpsw_nuss_driver
= {
2298 .name
= AM65_CPSW_DRV_NAME
,
2299 .of_match_table
= am65_cpsw_nuss_of_mtable
,
2301 .probe
= am65_cpsw_nuss_probe
,
2302 .remove
= am65_cpsw_nuss_remove
,
2305 module_platform_driver(am65_cpsw_nuss_driver
);
2307 MODULE_LICENSE("GPL v2");
2308 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
2309 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");