WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / ti / am65-cpsw-nuss.c
blob766e8866bbefce753c17f7915350d09e16a8cb31
1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
4 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
6 */
8 #include <linux/clk.h>
9 #include <linux/etherdevice.h>
10 #include <linux/if_vlan.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/kmemleak.h>
14 #include <linux/module.h>
15 #include <linux/netdevice.h>
16 #include <linux/net_tstamp.h>
17 #include <linux/of.h>
18 #include <linux/of_mdio.h>
19 #include <linux/of_net.h>
20 #include <linux/of_device.h>
21 #include <linux/phy.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/regmap.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/sys_soc.h>
28 #include <linux/dma/ti-cppi5.h>
29 #include <linux/dma/k3-udma-glue.h>
31 #include "cpsw_ale.h"
32 #include "cpsw_sl.h"
33 #include "am65-cpsw-nuss.h"
34 #include "k3-cppi-desc-pool.h"
35 #include "am65-cpts.h"
37 #define AM65_CPSW_SS_BASE 0x0
38 #define AM65_CPSW_SGMII_BASE 0x100
39 #define AM65_CPSW_XGMII_BASE 0x2100
40 #define AM65_CPSW_CPSW_NU_BASE 0x20000
41 #define AM65_CPSW_NU_PORTS_BASE 0x1000
42 #define AM65_CPSW_NU_FRAM_BASE 0x12000
43 #define AM65_CPSW_NU_STATS_BASE 0x1a000
44 #define AM65_CPSW_NU_ALE_BASE 0x1e000
45 #define AM65_CPSW_NU_CPTS_BASE 0x1d000
47 #define AM65_CPSW_NU_PORTS_OFFSET 0x1000
48 #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
49 #define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
51 #define AM65_CPSW_MAX_PORTS 8
53 #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
54 #define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
56 #define AM65_CPSW_REG_CTL 0x004
57 #define AM65_CPSW_REG_STAT_PORT_EN 0x014
58 #define AM65_CPSW_REG_PTYPE 0x018
60 #define AM65_CPSW_P0_REG_CTL 0x004
61 #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
63 #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
64 #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
65 #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
67 #define AM65_CPSW_PORTN_REG_SA_L 0x308
68 #define AM65_CPSW_PORTN_REG_SA_H 0x30c
69 #define AM65_CPSW_PORTN_REG_TS_CTL 0x310
70 #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
71 #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
72 #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
74 #define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
75 #define AM65_CPSW_CTL_P0_ENABLE BIT(2)
76 #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
77 #define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
79 /* AM65_CPSW_P0_REG_CTL */
80 #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
82 /* AM65_CPSW_PORT_REG_PRI_CTL */
83 #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
85 /* AM65_CPSW_PN_TS_CTL register fields */
86 #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
87 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
88 #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
89 #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
90 #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
91 #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
92 #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
94 /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
95 #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
97 /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
98 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
99 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
100 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
101 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
102 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
103 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
104 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
105 #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
107 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
108 #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
110 #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
112 #define AM65_CPSW_TS_TX_ANX_ALL_EN \
113 (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
114 AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
115 AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
117 #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
118 /* Number of TX/RX descriptors */
119 #define AM65_CPSW_MAX_TX_DESC 500
120 #define AM65_CPSW_MAX_RX_DESC 500
122 #define AM65_CPSW_NAV_PS_DATA_SIZE 16
123 #define AM65_CPSW_NAV_SW_DATA_SIZE 16
125 #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
126 NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
127 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
129 static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
130 const u8 *dev_addr)
132 u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
133 (dev_addr[2] << 16) | (dev_addr[3] << 24);
134 u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
136 writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
137 writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
140 static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
142 cpsw_sl_reset(port->slave.mac_sl, 100);
143 /* Max length register has to be restored after MAC SL reset */
144 writel(AM65_CPSW_MAX_PACKET_SIZE,
145 port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
148 static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
150 common->nuss_ver = readl(common->ss_base);
151 common->cpsw_ver = readl(common->cpsw_base);
152 dev_info(common->dev,
153 "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
154 common->nuss_ver,
155 common->cpsw_ver,
156 common->port_num + 1,
157 common->pdata.quirks);
160 void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
162 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
163 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
164 struct phy_device *phy = port->slave.phy;
165 u32 mac_control = 0;
167 if (!phy)
168 return;
170 if (phy->link) {
171 mac_control = CPSW_SL_CTL_GMII_EN;
173 if (phy->speed == 1000)
174 mac_control |= CPSW_SL_CTL_GIG;
175 if (phy->speed == 10 && phy_interface_is_rgmii(phy))
176 /* Can be used with in band mode only */
177 mac_control |= CPSW_SL_CTL_EXT_EN;
178 if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
179 mac_control |= CPSW_SL_CTL_IFCTL_A;
180 if (phy->duplex)
181 mac_control |= CPSW_SL_CTL_FULLDUPLEX;
183 /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/
185 /* rx_pause/tx_pause */
186 if (port->slave.rx_pause)
187 mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
189 if (port->slave.tx_pause)
190 mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
192 cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
194 /* enable forwarding */
195 cpsw_ale_control_set(common->ale, port->port_id,
196 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
198 am65_cpsw_qos_link_up(ndev, phy->speed);
199 netif_tx_wake_all_queues(ndev);
200 } else {
201 int tmo;
203 /* disable forwarding */
204 cpsw_ale_control_set(common->ale, port->port_id,
205 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
207 cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
209 tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
210 dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n",
211 cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS),
212 tmo);
214 cpsw_sl_ctl_reset(port->slave.mac_sl);
216 am65_cpsw_qos_link_down(ndev);
217 netif_tx_stop_all_queues(ndev);
220 phy_print_status(phy);
223 static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
224 __be16 proto, u16 vid)
226 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
227 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
228 u32 port_mask, unreg_mcast = 0;
229 int ret;
231 if (!netif_running(ndev) || !vid)
232 return 0;
234 ret = pm_runtime_get_sync(common->dev);
235 if (ret < 0) {
236 pm_runtime_put_noidle(common->dev);
237 return ret;
240 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
241 if (!vid)
242 unreg_mcast = port_mask;
243 dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
244 ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
245 unreg_mcast, port_mask, 0);
247 pm_runtime_put(common->dev);
248 return ret;
251 static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
252 __be16 proto, u16 vid)
254 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
255 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
256 int ret;
258 if (!netif_running(ndev) || !vid)
259 return 0;
261 ret = pm_runtime_get_sync(common->dev);
262 if (ret < 0) {
263 pm_runtime_put_noidle(common->dev);
264 return ret;
267 dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
268 ret = cpsw_ale_del_vlan(common->ale, vid,
269 BIT(port->port_id) | ALE_PORT_HOST);
271 pm_runtime_put(common->dev);
272 return ret;
275 static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
276 bool promisc)
278 struct am65_cpsw_common *common = port->common;
280 if (promisc) {
281 /* Enable promiscuous mode */
282 cpsw_ale_control_set(common->ale, port->port_id,
283 ALE_PORT_MACONLY_CAF, 1);
284 dev_dbg(common->dev, "promisc enabled\n");
285 } else {
286 /* Disable promiscuous mode */
287 cpsw_ale_control_set(common->ale, port->port_id,
288 ALE_PORT_MACONLY_CAF, 0);
289 dev_dbg(common->dev, "promisc disabled\n");
293 static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
295 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
296 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
297 u32 port_mask;
298 bool promisc;
300 promisc = !!(ndev->flags & IFF_PROMISC);
301 am65_cpsw_slave_set_promisc(port, promisc);
303 if (promisc)
304 return;
306 /* Restore allmulti on vlans if necessary */
307 cpsw_ale_set_allmulti(common->ale,
308 ndev->flags & IFF_ALLMULTI, port->port_id);
310 port_mask = ALE_PORT_HOST;
311 /* Clear all mcast from ALE */
312 cpsw_ale_flush_multicast(common->ale, port_mask, -1);
314 if (!netdev_mc_empty(ndev)) {
315 struct netdev_hw_addr *ha;
317 /* program multicast address list into ALE register */
318 netdev_for_each_mc_addr(ha, ndev) {
319 cpsw_ale_add_mcast(common->ale, ha->addr,
320 port_mask, 0, 0, 0);
325 static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
326 unsigned int txqueue)
328 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
329 struct am65_cpsw_tx_chn *tx_chn;
330 struct netdev_queue *netif_txq;
331 unsigned long trans_start;
333 netif_txq = netdev_get_tx_queue(ndev, txqueue);
334 tx_chn = &common->tx_chns[txqueue];
335 trans_start = netif_txq->trans_start;
337 netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
338 txqueue,
339 netif_tx_queue_stopped(netif_txq),
340 jiffies_to_msecs(jiffies - trans_start),
341 dql_avail(&netif_txq->dql),
342 k3_cppi_desc_pool_avail(tx_chn->desc_pool));
344 if (netif_tx_queue_stopped(netif_txq)) {
345 /* try recover if stopped by us */
346 txq_trans_update(netif_txq);
347 netif_tx_wake_queue(netif_txq);
351 static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
352 struct sk_buff *skb)
354 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
355 struct cppi5_host_desc_t *desc_rx;
356 struct device *dev = common->dev;
357 u32 pkt_len = skb_tailroom(skb);
358 dma_addr_t desc_dma;
359 dma_addr_t buf_dma;
360 void *swdata;
362 desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
363 if (!desc_rx) {
364 dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
365 return -ENOMEM;
367 desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
369 buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
370 if (unlikely(dma_mapping_error(dev, buf_dma))) {
371 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
372 dev_err(dev, "Failed to map rx skb buffer\n");
373 return -EINVAL;
376 cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
377 AM65_CPSW_NAV_PS_DATA_SIZE);
378 cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
379 swdata = cppi5_hdesc_get_swdata(desc_rx);
380 *((void **)swdata) = skb;
382 return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
385 void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
387 struct am65_cpsw_host *host_p = am65_common_get_host(common);
388 u32 val, pri_map;
390 /* P0 set Receive Priority Type */
391 val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
393 if (common->pf_p0_rx_ptype_rrobin) {
394 val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
395 /* Enet Ports fifos works in fixed priority mode only, so
396 * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
398 pri_map = 0x0;
399 } else {
400 val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
401 /* restore P0_Rx_Pri_Map */
402 pri_map = 0x76543210;
405 writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
406 writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
409 static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
410 netdev_features_t features)
412 struct am65_cpsw_host *host_p = am65_common_get_host(common);
413 int port_idx, i, ret;
414 struct sk_buff *skb;
415 u32 val, port_mask;
417 if (common->usage_count)
418 return 0;
420 /* Control register */
421 writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
422 AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
423 common->cpsw_base + AM65_CPSW_REG_CTL);
424 /* Max length register */
425 writel(AM65_CPSW_MAX_PACKET_SIZE,
426 host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
427 /* set base flow_id */
428 writel(common->rx_flow_id_base,
429 host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
430 /* en tx crc offload */
431 writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
433 am65_cpsw_nuss_set_p0_ptype(common);
435 /* enable statistic */
436 val = BIT(HOST_PORT_NUM);
437 for (port_idx = 0; port_idx < common->port_num; port_idx++) {
438 struct am65_cpsw_port *port = &common->ports[port_idx];
440 if (!port->disabled)
441 val |= BIT(port->port_id);
443 writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
445 /* disable priority elevation */
446 writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
448 cpsw_ale_start(common->ale);
450 /* limit to one RX flow only */
451 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
452 ALE_DEFAULT_THREAD_ID, 0);
453 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
454 ALE_DEFAULT_THREAD_ENABLE, 1);
455 if (AM65_CPSW_IS_CPSW2G(common))
456 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
457 ALE_PORT_NOLEARN, 1);
458 /* switch to vlan unaware mode */
459 cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
460 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
461 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
463 /* default vlan cfg: create mask based on enabled ports */
464 port_mask = GENMASK(common->port_num, 0) &
465 ~common->disabled_ports_mask;
467 cpsw_ale_add_vlan(common->ale, 0, port_mask,
468 port_mask, port_mask,
469 port_mask & ~ALE_PORT_HOST);
471 for (i = 0; i < common->rx_chns.descs_num; i++) {
472 skb = __netdev_alloc_skb_ip_align(NULL,
473 AM65_CPSW_MAX_PACKET_SIZE,
474 GFP_KERNEL);
475 if (!skb) {
476 dev_err(common->dev, "cannot allocate skb\n");
477 return -ENOMEM;
480 ret = am65_cpsw_nuss_rx_push(common, skb);
481 if (ret < 0) {
482 dev_err(common->dev,
483 "cannot submit skb to channel rx, error %d\n",
484 ret);
485 kfree_skb(skb);
486 return ret;
488 kmemleak_not_leak(skb);
490 k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
492 for (i = 0; i < common->tx_ch_num; i++) {
493 ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
494 if (ret)
495 return ret;
496 napi_enable(&common->tx_chns[i].napi_tx);
499 napi_enable(&common->napi_rx);
501 dev_dbg(common->dev, "cpsw_nuss started\n");
502 return 0;
505 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
506 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
508 static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
510 int i;
512 if (common->usage_count != 1)
513 return 0;
515 cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
516 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
518 /* shutdown tx channels */
519 atomic_set(&common->tdown_cnt, common->tx_ch_num);
520 /* ensure new tdown_cnt value is visible */
521 smp_mb__after_atomic();
522 reinit_completion(&common->tdown_complete);
524 for (i = 0; i < common->tx_ch_num; i++)
525 k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
527 i = wait_for_completion_timeout(&common->tdown_complete,
528 msecs_to_jiffies(1000));
529 if (!i)
530 dev_err(common->dev, "tx timeout\n");
531 for (i = 0; i < common->tx_ch_num; i++)
532 napi_disable(&common->tx_chns[i].napi_tx);
534 for (i = 0; i < common->tx_ch_num; i++) {
535 k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
536 &common->tx_chns[i],
537 am65_cpsw_nuss_tx_cleanup);
538 k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
541 k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
542 napi_disable(&common->napi_rx);
544 for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
545 k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
546 &common->rx_chns,
547 am65_cpsw_nuss_rx_cleanup, !!i);
549 k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
551 cpsw_ale_stop(common->ale);
553 writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
554 writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
556 dev_dbg(common->dev, "cpsw_nuss stopped\n");
557 return 0;
560 static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
562 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
563 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
564 int ret;
566 if (port->slave.phy)
567 phy_stop(port->slave.phy);
569 netif_tx_stop_all_queues(ndev);
571 if (port->slave.phy) {
572 phy_disconnect(port->slave.phy);
573 port->slave.phy = NULL;
576 ret = am65_cpsw_nuss_common_stop(common);
577 if (ret)
578 return ret;
580 common->usage_count--;
581 pm_runtime_put(common->dev);
582 return 0;
585 static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
587 struct am65_cpsw_port *port = arg;
589 if (!vdev)
590 return 0;
592 return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
595 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
597 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
598 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
599 u32 port_mask;
600 int ret, i;
602 ret = pm_runtime_get_sync(common->dev);
603 if (ret < 0) {
604 pm_runtime_put_noidle(common->dev);
605 return ret;
608 /* Notify the stack of the actual queue counts. */
609 ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
610 if (ret) {
611 dev_err(common->dev, "cannot set real number of tx queues\n");
612 return ret;
615 ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
616 if (ret) {
617 dev_err(common->dev, "cannot set real number of rx queues\n");
618 return ret;
621 for (i = 0; i < common->tx_ch_num; i++)
622 netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
624 ret = am65_cpsw_nuss_common_open(common, ndev->features);
625 if (ret)
626 return ret;
628 common->usage_count++;
630 am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
632 if (port->slave.mac_only) {
633 /* enable mac-only mode on port */
634 cpsw_ale_control_set(common->ale, port->port_id,
635 ALE_PORT_MACONLY, 1);
636 cpsw_ale_control_set(common->ale, port->port_id,
637 ALE_PORT_NOLEARN, 1);
640 port_mask = BIT(port->port_id) | ALE_PORT_HOST;
641 cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
642 HOST_PORT_NUM, ALE_SECURE, 0);
643 cpsw_ale_add_mcast(common->ale, ndev->broadcast,
644 port_mask, 0, 0, ALE_MCAST_FWD_2);
646 /* mac_sl should be configured via phy-link interface */
647 am65_cpsw_sl_ctl_reset(port);
649 ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET,
650 port->slave.phy_if);
651 if (ret)
652 goto error_cleanup;
654 if (port->slave.phy_node) {
655 port->slave.phy = of_phy_connect(ndev,
656 port->slave.phy_node,
657 &am65_cpsw_nuss_adjust_link,
658 0, port->slave.phy_if);
659 if (!port->slave.phy) {
660 dev_err(common->dev, "phy %pOF not found on slave %d\n",
661 port->slave.phy_node,
662 port->port_id);
663 ret = -ENODEV;
664 goto error_cleanup;
668 /* restore vlan configurations */
669 vlan_for_each(ndev, cpsw_restore_vlans, port);
671 phy_attached_info(port->slave.phy);
672 phy_start(port->slave.phy);
674 return 0;
676 error_cleanup:
677 am65_cpsw_nuss_ndo_slave_stop(ndev);
678 return ret;
681 static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
683 struct am65_cpsw_rx_chn *rx_chn = data;
684 struct cppi5_host_desc_t *desc_rx;
685 struct sk_buff *skb;
686 dma_addr_t buf_dma;
687 u32 buf_dma_len;
688 void **swdata;
690 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
691 swdata = cppi5_hdesc_get_swdata(desc_rx);
692 skb = *swdata;
693 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
695 dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
696 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
698 dev_kfree_skb_any(skb);
701 static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
703 struct skb_shared_hwtstamps *ssh;
704 u64 ns;
706 ns = ((u64)psdata[1] << 32) | psdata[0];
708 ssh = skb_hwtstamps(skb);
709 memset(ssh, 0, sizeof(*ssh));
710 ssh->hwtstamp = ns_to_ktime(ns);
713 /* RX psdata[2] word format - checksum information */
714 #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
715 #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
716 #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
717 #define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
718 #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
719 #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
721 static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
723 /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
724 * csum information provides in psdata[2] word:
725 * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
726 * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
727 * bits - indicates IPv4/IPv6 packet
728 * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
729 * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
730 * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
732 skb_checksum_none_assert(skb);
734 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
735 return;
737 if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
738 AM65_CPSW_RX_PSD_IPV4_VALID)) &&
739 !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
740 /* csum for fragmented packets is unsupported */
741 if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
742 skb->ip_summed = CHECKSUM_UNNECESSARY;
746 static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
747 u32 flow_idx)
749 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
750 u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
751 struct am65_cpsw_ndev_priv *ndev_priv;
752 struct am65_cpsw_ndev_stats *stats;
753 struct cppi5_host_desc_t *desc_rx;
754 struct device *dev = common->dev;
755 struct sk_buff *skb, *new_skb;
756 dma_addr_t desc_dma, buf_dma;
757 struct am65_cpsw_port *port;
758 struct net_device *ndev;
759 void **swdata;
760 u32 *psdata;
761 int ret = 0;
763 ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
764 if (ret) {
765 if (ret != -ENODATA)
766 dev_err(dev, "RX: pop chn fail %d\n", ret);
767 return ret;
770 if (cppi5_desc_is_tdcm(desc_dma)) {
771 dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
772 return 0;
775 desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
776 dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
777 __func__, flow_idx, &desc_dma);
779 swdata = cppi5_hdesc_get_swdata(desc_rx);
780 skb = *swdata;
781 cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
782 pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
783 cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
784 dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
785 port = am65_common_get_port(common, port_id);
786 ndev = port->ndev;
787 skb->dev = ndev;
789 psdata = cppi5_hdesc_get_psdata(desc_rx);
790 /* add RX timestamp */
791 if (port->rx_ts_enabled)
792 am65_cpsw_nuss_rx_ts(skb, psdata);
793 csum_info = psdata[2];
794 dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
796 dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
798 k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
800 new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
801 if (new_skb) {
802 skb_put(skb, pkt_len);
803 skb->protocol = eth_type_trans(skb, ndev);
804 am65_cpsw_nuss_rx_csum(skb, csum_info);
805 napi_gro_receive(&common->napi_rx, skb);
807 ndev_priv = netdev_priv(ndev);
808 stats = this_cpu_ptr(ndev_priv->stats);
810 u64_stats_update_begin(&stats->syncp);
811 stats->rx_packets++;
812 stats->rx_bytes += pkt_len;
813 u64_stats_update_end(&stats->syncp);
814 kmemleak_not_leak(new_skb);
815 } else {
816 ndev->stats.rx_dropped++;
817 new_skb = skb;
820 if (netif_dormant(ndev)) {
821 dev_kfree_skb_any(new_skb);
822 ndev->stats.rx_dropped++;
823 return 0;
826 ret = am65_cpsw_nuss_rx_push(common, new_skb);
827 if (WARN_ON(ret < 0)) {
828 dev_kfree_skb_any(new_skb);
829 ndev->stats.rx_errors++;
830 ndev->stats.rx_dropped++;
833 return ret;
836 static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
838 struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
839 int flow = AM65_CPSW_MAX_RX_FLOWS;
840 int cur_budget, ret;
841 int num_rx = 0;
843 /* process every flow */
844 while (flow--) {
845 cur_budget = budget - num_rx;
847 while (cur_budget--) {
848 ret = am65_cpsw_nuss_rx_packets(common, flow);
849 if (ret)
850 break;
851 num_rx++;
854 if (num_rx >= budget)
855 break;
858 dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
860 if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
861 enable_irq(common->rx_chns.irq);
863 return num_rx;
866 static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
867 struct device *dev,
868 struct cppi5_host_desc_t *desc)
870 struct cppi5_host_desc_t *first_desc, *next_desc;
871 dma_addr_t buf_dma, next_desc_dma;
872 u32 buf_dma_len;
874 first_desc = desc;
875 next_desc = first_desc;
877 cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
879 dma_unmap_single(dev, buf_dma, buf_dma_len,
880 DMA_TO_DEVICE);
882 next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
883 while (next_desc_dma) {
884 next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
885 next_desc_dma);
886 cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
888 dma_unmap_page(dev, buf_dma, buf_dma_len,
889 DMA_TO_DEVICE);
891 next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
893 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
896 k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
899 static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
901 struct am65_cpsw_tx_chn *tx_chn = data;
902 struct cppi5_host_desc_t *desc_tx;
903 struct sk_buff *skb;
904 void **swdata;
906 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
907 swdata = cppi5_hdesc_get_swdata(desc_tx);
908 skb = *(swdata);
909 am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
911 dev_kfree_skb_any(skb);
914 static struct sk_buff *
915 am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
916 dma_addr_t desc_dma)
918 struct am65_cpsw_ndev_priv *ndev_priv;
919 struct am65_cpsw_ndev_stats *stats;
920 struct cppi5_host_desc_t *desc_tx;
921 struct net_device *ndev;
922 struct sk_buff *skb;
923 void **swdata;
925 desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
926 desc_dma);
927 swdata = cppi5_hdesc_get_swdata(desc_tx);
928 skb = *(swdata);
929 am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
931 ndev = skb->dev;
933 am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
935 ndev_priv = netdev_priv(ndev);
936 stats = this_cpu_ptr(ndev_priv->stats);
937 u64_stats_update_begin(&stats->syncp);
938 stats->tx_packets++;
939 stats->tx_bytes += skb->len;
940 u64_stats_update_end(&stats->syncp);
942 return skb;
945 static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
946 struct netdev_queue *netif_txq)
948 if (netif_tx_queue_stopped(netif_txq)) {
949 /* Check whether the queue is stopped due to stalled
950 * tx dma, if the queue is stopped then wake the queue
951 * as we have free desc for tx
953 __netif_tx_lock(netif_txq, smp_processor_id());
954 if (netif_running(ndev) &&
955 (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
956 netif_tx_wake_queue(netif_txq);
958 __netif_tx_unlock(netif_txq);
962 static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
963 int chn, unsigned int budget)
965 struct device *dev = common->dev;
966 struct am65_cpsw_tx_chn *tx_chn;
967 struct netdev_queue *netif_txq;
968 unsigned int total_bytes = 0;
969 struct net_device *ndev;
970 struct sk_buff *skb;
971 dma_addr_t desc_dma;
972 int res, num_tx = 0;
974 tx_chn = &common->tx_chns[chn];
976 while (true) {
977 spin_lock(&tx_chn->lock);
978 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
979 spin_unlock(&tx_chn->lock);
980 if (res == -ENODATA)
981 break;
983 if (cppi5_desc_is_tdcm(desc_dma)) {
984 if (atomic_dec_and_test(&common->tdown_cnt))
985 complete(&common->tdown_complete);
986 break;
989 skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
990 total_bytes = skb->len;
991 ndev = skb->dev;
992 napi_consume_skb(skb, budget);
993 num_tx++;
995 netif_txq = netdev_get_tx_queue(ndev, chn);
997 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
999 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1002 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1004 return num_tx;
1007 static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
1008 int chn, unsigned int budget)
1010 struct device *dev = common->dev;
1011 struct am65_cpsw_tx_chn *tx_chn;
1012 struct netdev_queue *netif_txq;
1013 unsigned int total_bytes = 0;
1014 struct net_device *ndev;
1015 struct sk_buff *skb;
1016 dma_addr_t desc_dma;
1017 int res, num_tx = 0;
1019 tx_chn = &common->tx_chns[chn];
1021 while (true) {
1022 res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
1023 if (res == -ENODATA)
1024 break;
1026 if (cppi5_desc_is_tdcm(desc_dma)) {
1027 if (atomic_dec_and_test(&common->tdown_cnt))
1028 complete(&common->tdown_complete);
1029 break;
1032 skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
1034 ndev = skb->dev;
1035 total_bytes += skb->len;
1036 napi_consume_skb(skb, budget);
1037 num_tx++;
1040 if (!num_tx)
1041 return 0;
1043 netif_txq = netdev_get_tx_queue(ndev, chn);
1045 netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
1047 am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
1049 dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
1051 return num_tx;
1054 static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
1056 struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
1057 int num_tx;
1059 if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
1060 num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
1061 else
1062 num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
1064 num_tx = min(num_tx, budget);
1065 if (num_tx < budget) {
1066 napi_complete(napi_tx);
1067 enable_irq(tx_chn->irq);
1070 return num_tx;
1073 static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
1075 struct am65_cpsw_common *common = dev_id;
1077 disable_irq_nosync(irq);
1078 napi_schedule(&common->napi_rx);
1080 return IRQ_HANDLED;
1083 static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
1085 struct am65_cpsw_tx_chn *tx_chn = dev_id;
1087 disable_irq_nosync(irq);
1088 napi_schedule(&tx_chn->napi_tx);
1090 return IRQ_HANDLED;
1093 static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
1094 struct net_device *ndev)
1096 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1097 struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
1098 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1099 struct device *dev = common->dev;
1100 struct am65_cpsw_tx_chn *tx_chn;
1101 struct netdev_queue *netif_txq;
1102 dma_addr_t desc_dma, buf_dma;
1103 int ret, q_idx, i;
1104 void **swdata;
1105 u32 *psdata;
1106 u32 pkt_len;
1108 /* padding enabled in hw */
1109 pkt_len = skb_headlen(skb);
1111 /* SKB TX timestamp */
1112 if (port->tx_ts_enabled)
1113 am65_cpts_prep_tx_timestamp(common->cpts, skb);
1115 q_idx = skb_get_queue_mapping(skb);
1116 dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
1118 tx_chn = &common->tx_chns[q_idx];
1119 netif_txq = netdev_get_tx_queue(ndev, q_idx);
1121 /* Map the linear buffer */
1122 buf_dma = dma_map_single(dev, skb->data, pkt_len,
1123 DMA_TO_DEVICE);
1124 if (unlikely(dma_mapping_error(dev, buf_dma))) {
1125 dev_err(dev, "Failed to map tx skb buffer\n");
1126 ndev->stats.tx_errors++;
1127 goto err_free_skb;
1130 first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1131 if (!first_desc) {
1132 dev_dbg(dev, "Failed to allocate descriptor\n");
1133 dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
1134 goto busy_stop_q;
1137 cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
1138 AM65_CPSW_NAV_PS_DATA_SIZE);
1139 cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
1140 cppi5_hdesc_set_pkttype(first_desc, 0x7);
1141 cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
1143 cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
1144 swdata = cppi5_hdesc_get_swdata(first_desc);
1145 *(swdata) = skb;
1146 psdata = cppi5_hdesc_get_psdata(first_desc);
1148 /* HW csum offload if enabled */
1149 psdata[2] = 0;
1150 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1151 unsigned int cs_start, cs_offset;
1153 cs_start = skb_transport_offset(skb);
1154 cs_offset = cs_start + skb->csum_offset;
1155 /* HW numerates bytes starting from 1 */
1156 psdata[2] = ((cs_offset + 1) << 24) |
1157 ((cs_start + 1) << 16) | (skb->len - cs_start);
1158 dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
1161 if (!skb_is_nonlinear(skb))
1162 goto done_tx;
1164 dev_dbg(dev, "fragmented SKB\n");
1166 /* Handle the case where skb is fragmented in pages */
1167 cur_desc = first_desc;
1168 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1169 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1170 u32 frag_size = skb_frag_size(frag);
1172 next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
1173 if (!next_desc) {
1174 dev_err(dev, "Failed to allocate descriptor\n");
1175 goto busy_free_descs;
1178 buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
1179 DMA_TO_DEVICE);
1180 if (unlikely(dma_mapping_error(dev, buf_dma))) {
1181 dev_err(dev, "Failed to map tx skb page\n");
1182 k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
1183 ndev->stats.tx_errors++;
1184 goto err_free_descs;
1187 cppi5_hdesc_reset_hbdesc(next_desc);
1188 cppi5_hdesc_attach_buf(next_desc,
1189 buf_dma, frag_size, buf_dma, frag_size);
1191 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
1192 next_desc);
1193 cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
1195 pkt_len += frag_size;
1196 cur_desc = next_desc;
1198 WARN_ON(pkt_len != skb->len);
1200 done_tx:
1201 skb_tx_timestamp(skb);
1203 /* report bql before sending packet */
1204 netdev_tx_sent_queue(netif_txq, pkt_len);
1206 cppi5_hdesc_set_pktlen(first_desc, pkt_len);
1207 desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
1208 if (AM65_CPSW_IS_CPSW2G(common)) {
1209 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1210 } else {
1211 spin_lock_bh(&tx_chn->lock);
1212 ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
1213 spin_unlock_bh(&tx_chn->lock);
1215 if (ret) {
1216 dev_err(dev, "can't push desc %d\n", ret);
1217 /* inform bql */
1218 netdev_tx_completed_queue(netif_txq, 1, pkt_len);
1219 ndev->stats.tx_errors++;
1220 goto err_free_descs;
1223 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
1224 netif_tx_stop_queue(netif_txq);
1225 /* Barrier, so that stop_queue visible to other cpus */
1226 smp_mb__after_atomic();
1227 dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
1229 /* re-check for smp */
1230 if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
1231 MAX_SKB_FRAGS) {
1232 netif_tx_wake_queue(netif_txq);
1233 dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
1237 return NETDEV_TX_OK;
1239 err_free_descs:
1240 am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
1241 err_free_skb:
1242 ndev->stats.tx_dropped++;
1243 dev_kfree_skb_any(skb);
1244 return NETDEV_TX_OK;
1246 busy_free_descs:
1247 am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
1248 busy_stop_q:
1249 netif_tx_stop_queue(netif_txq);
1250 return NETDEV_TX_BUSY;
1253 static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
1254 void *addr)
1256 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1257 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1258 struct sockaddr *sockaddr = (struct sockaddr *)addr;
1259 int ret;
1261 ret = eth_prepare_mac_addr_change(ndev, addr);
1262 if (ret < 0)
1263 return ret;
1265 ret = pm_runtime_get_sync(common->dev);
1266 if (ret < 0) {
1267 pm_runtime_put_noidle(common->dev);
1268 return ret;
1271 cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
1272 HOST_PORT_NUM, 0, 0);
1273 cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
1274 HOST_PORT_NUM, ALE_SECURE, 0);
1276 am65_cpsw_port_set_sl_mac(port, addr);
1277 eth_commit_mac_addr_change(ndev, sockaddr);
1279 pm_runtime_put(common->dev);
1281 return 0;
1284 static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
1285 struct ifreq *ifr)
1287 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
1288 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1289 u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
1290 struct hwtstamp_config cfg;
1292 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1293 return -EOPNOTSUPP;
1295 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1296 return -EFAULT;
1298 /* TX HW timestamp */
1299 switch (cfg.tx_type) {
1300 case HWTSTAMP_TX_OFF:
1301 case HWTSTAMP_TX_ON:
1302 break;
1303 default:
1304 return -ERANGE;
1307 switch (cfg.rx_filter) {
1308 case HWTSTAMP_FILTER_NONE:
1309 port->rx_ts_enabled = false;
1310 break;
1311 case HWTSTAMP_FILTER_ALL:
1312 case HWTSTAMP_FILTER_SOME:
1313 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1314 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1315 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1316 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1317 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1318 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1319 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1320 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1321 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1322 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1323 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1324 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1325 case HWTSTAMP_FILTER_NTP_ALL:
1326 port->rx_ts_enabled = true;
1327 cfg.rx_filter = HWTSTAMP_FILTER_ALL;
1328 break;
1329 default:
1330 return -ERANGE;
1333 port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
1335 /* cfg TX timestamp */
1336 seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
1337 AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
1339 ts_vlan_ltype = ETH_P_8021Q;
1341 ts_ctrl_ltype2 = ETH_P_1588 |
1342 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
1343 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
1344 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
1345 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
1346 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
1347 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
1348 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
1349 AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
1351 ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
1352 AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
1354 if (port->tx_ts_enabled)
1355 ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
1356 AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
1358 writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
1359 writel(ts_vlan_ltype, port->port_base +
1360 AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
1361 writel(ts_ctrl_ltype2, port->port_base +
1362 AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
1363 writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
1365 /* en/dis RX timestamp */
1366 am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
1368 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1371 static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
1372 struct ifreq *ifr)
1374 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1375 struct hwtstamp_config cfg;
1377 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1378 return -EOPNOTSUPP;
1380 cfg.flags = 0;
1381 cfg.tx_type = port->tx_ts_enabled ?
1382 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1383 cfg.rx_filter = port->rx_ts_enabled ?
1384 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1386 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
1389 static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
1390 struct ifreq *req, int cmd)
1392 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1394 if (!netif_running(ndev))
1395 return -EINVAL;
1397 switch (cmd) {
1398 case SIOCSHWTSTAMP:
1399 return am65_cpsw_nuss_hwtstamp_set(ndev, req);
1400 case SIOCGHWTSTAMP:
1401 return am65_cpsw_nuss_hwtstamp_get(ndev, req);
1404 if (!port->slave.phy)
1405 return -EOPNOTSUPP;
1407 return phy_mii_ioctl(port->slave.phy, req, cmd);
1410 static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
1411 struct rtnl_link_stats64 *stats)
1413 struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
1414 unsigned int start;
1415 int cpu;
1417 for_each_possible_cpu(cpu) {
1418 struct am65_cpsw_ndev_stats *cpu_stats;
1419 u64 rx_packets;
1420 u64 rx_bytes;
1421 u64 tx_packets;
1422 u64 tx_bytes;
1424 cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
1425 do {
1426 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1427 rx_packets = cpu_stats->rx_packets;
1428 rx_bytes = cpu_stats->rx_bytes;
1429 tx_packets = cpu_stats->tx_packets;
1430 tx_bytes = cpu_stats->tx_bytes;
1431 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1433 stats->rx_packets += rx_packets;
1434 stats->rx_bytes += rx_bytes;
1435 stats->tx_packets += tx_packets;
1436 stats->tx_bytes += tx_bytes;
1439 stats->rx_errors = dev->stats.rx_errors;
1440 stats->rx_dropped = dev->stats.rx_dropped;
1441 stats->tx_dropped = dev->stats.tx_dropped;
1444 static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
1445 .ndo_open = am65_cpsw_nuss_ndo_slave_open,
1446 .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
1447 .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
1448 .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode,
1449 .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats,
1450 .ndo_validate_addr = eth_validate_addr,
1451 .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address,
1452 .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
1453 .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
1454 .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
1455 .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
1456 .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
1459 static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
1461 struct am65_cpsw_common *common = port->common;
1463 if (!port->disabled)
1464 return;
1466 cpsw_ale_control_set(common->ale, port->port_id,
1467 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1469 cpsw_sl_reset(port->slave.mac_sl, 100);
1470 cpsw_sl_ctl_reset(port->slave.mac_sl);
1473 static void am65_cpsw_nuss_free_tx_chns(void *data)
1475 struct am65_cpsw_common *common = data;
1476 int i;
1478 for (i = 0; i < common->tx_ch_num; i++) {
1479 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1481 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1482 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1484 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1485 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1487 memset(tx_chn, 0, sizeof(*tx_chn));
1491 void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
1493 struct device *dev = common->dev;
1494 int i;
1496 devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
1498 for (i = 0; i < common->tx_ch_num; i++) {
1499 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1501 if (tx_chn->irq)
1502 devm_free_irq(dev, tx_chn->irq, tx_chn);
1504 netif_napi_del(&tx_chn->napi_tx);
1506 if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
1507 k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
1509 if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
1510 k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
1512 memset(tx_chn, 0, sizeof(*tx_chn));
1516 static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
1518 u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
1519 struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
1520 struct device *dev = common->dev;
1521 struct k3_ring_cfg ring_cfg = {
1522 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1523 .mode = K3_RINGACC_RING_MODE_RING,
1524 .flags = 0
1526 u32 hdesc_size;
1527 int i, ret = 0;
1529 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
1530 AM65_CPSW_NAV_SW_DATA_SIZE);
1532 tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
1533 tx_cfg.tx_cfg = ring_cfg;
1534 tx_cfg.txcq_cfg = ring_cfg;
1535 tx_cfg.tx_cfg.size = max_desc_num;
1536 tx_cfg.txcq_cfg.size = max_desc_num;
1538 for (i = 0; i < common->tx_ch_num; i++) {
1539 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1541 snprintf(tx_chn->tx_chn_name,
1542 sizeof(tx_chn->tx_chn_name), "tx%d", i);
1544 spin_lock_init(&tx_chn->lock);
1545 tx_chn->common = common;
1546 tx_chn->id = i;
1547 tx_chn->descs_num = max_desc_num;
1548 tx_chn->desc_pool =
1549 k3_cppi_desc_pool_create_name(dev,
1550 tx_chn->descs_num,
1551 hdesc_size,
1552 tx_chn->tx_chn_name);
1553 if (IS_ERR(tx_chn->desc_pool)) {
1554 ret = PTR_ERR(tx_chn->desc_pool);
1555 dev_err(dev, "Failed to create poll %d\n", ret);
1556 goto err;
1559 tx_chn->tx_chn =
1560 k3_udma_glue_request_tx_chn(dev,
1561 tx_chn->tx_chn_name,
1562 &tx_cfg);
1563 if (IS_ERR(tx_chn->tx_chn)) {
1564 ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
1565 "Failed to request tx dma channel\n");
1566 goto err;
1569 tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
1570 if (tx_chn->irq <= 0) {
1571 dev_err(dev, "Failed to get tx dma irq %d\n",
1572 tx_chn->irq);
1573 goto err;
1576 snprintf(tx_chn->tx_chn_name,
1577 sizeof(tx_chn->tx_chn_name), "%s-tx%d",
1578 dev_name(dev), tx_chn->id);
1581 err:
1582 i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
1583 if (i) {
1584 dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
1585 return i;
1588 return ret;
1591 static void am65_cpsw_nuss_free_rx_chns(void *data)
1593 struct am65_cpsw_common *common = data;
1594 struct am65_cpsw_rx_chn *rx_chn;
1596 rx_chn = &common->rx_chns;
1598 if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
1599 k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
1601 if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
1602 k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
1605 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
1607 struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
1608 struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
1609 u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
1610 struct device *dev = common->dev;
1611 u32 hdesc_size;
1612 u32 fdqring_id;
1613 int i, ret = 0;
1615 hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
1616 AM65_CPSW_NAV_SW_DATA_SIZE);
1618 rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
1619 rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
1620 rx_cfg.flow_id_base = common->rx_flow_id_base;
1622 /* init all flows */
1623 rx_chn->dev = dev;
1624 rx_chn->descs_num = max_desc_num;
1625 rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
1626 rx_chn->descs_num,
1627 hdesc_size, "rx");
1628 if (IS_ERR(rx_chn->desc_pool)) {
1629 ret = PTR_ERR(rx_chn->desc_pool);
1630 dev_err(dev, "Failed to create rx poll %d\n", ret);
1631 goto err;
1634 rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
1635 if (IS_ERR(rx_chn->rx_chn)) {
1636 ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
1637 "Failed to request rx dma channel\n");
1638 goto err;
1641 common->rx_flow_id_base =
1642 k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
1643 dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
1645 fdqring_id = K3_RINGACC_RING_ID_ANY;
1646 for (i = 0; i < rx_cfg.flow_id_num; i++) {
1647 struct k3_ring_cfg rxring_cfg = {
1648 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1649 .mode = K3_RINGACC_RING_MODE_RING,
1650 .flags = 0,
1652 struct k3_ring_cfg fdqring_cfg = {
1653 .elm_size = K3_RINGACC_RING_ELSIZE_8,
1654 .flags = K3_RINGACC_RING_SHARED,
1656 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
1657 .rx_cfg = rxring_cfg,
1658 .rxfdq_cfg = fdqring_cfg,
1659 .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
1660 .src_tag_lo_sel =
1661 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
1664 rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
1665 rx_flow_cfg.rx_cfg.size = max_desc_num;
1666 rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
1667 rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
1669 ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
1670 i, &rx_flow_cfg);
1671 if (ret) {
1672 dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
1673 goto err;
1675 if (!i)
1676 fdqring_id =
1677 k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
1680 rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
1682 if (rx_chn->irq <= 0) {
1683 dev_err(dev, "Failed to get rx dma irq %d\n",
1684 rx_chn->irq);
1685 ret = -ENXIO;
1686 goto err;
1690 err:
1691 i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
1692 if (i) {
1693 dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
1694 return i;
1697 return ret;
1700 static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
1702 struct am65_cpsw_host *host_p = am65_common_get_host(common);
1704 host_p->common = common;
1705 host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
1706 host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
1708 return 0;
1711 static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
1712 int slave, u8 *mac_addr)
1714 u32 mac_lo, mac_hi, offset;
1715 struct regmap *syscon;
1716 int ret;
1718 syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
1719 if (IS_ERR(syscon)) {
1720 if (PTR_ERR(syscon) == -ENODEV)
1721 return 0;
1722 return PTR_ERR(syscon);
1725 ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
1726 &offset);
1727 if (ret)
1728 return ret;
1730 regmap_read(syscon, offset, &mac_lo);
1731 regmap_read(syscon, offset + 4, &mac_hi);
1733 mac_addr[0] = (mac_hi >> 8) & 0xff;
1734 mac_addr[1] = mac_hi & 0xff;
1735 mac_addr[2] = (mac_lo >> 24) & 0xff;
1736 mac_addr[3] = (mac_lo >> 16) & 0xff;
1737 mac_addr[4] = (mac_lo >> 8) & 0xff;
1738 mac_addr[5] = mac_lo & 0xff;
1740 return 0;
1743 static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
1745 struct device *dev = common->dev;
1746 struct device_node *node;
1747 struct am65_cpts *cpts;
1748 void __iomem *reg_base;
1750 if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
1751 return 0;
1753 node = of_get_child_by_name(dev->of_node, "cpts");
1754 if (!node) {
1755 dev_err(dev, "%s cpts not found\n", __func__);
1756 return -ENOENT;
1759 reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
1760 cpts = am65_cpts_create(dev, reg_base, node);
1761 if (IS_ERR(cpts)) {
1762 int ret = PTR_ERR(cpts);
1764 if (ret == -EOPNOTSUPP) {
1765 dev_info(dev, "cpts disabled\n");
1766 return 0;
1769 dev_err(dev, "cpts create err %d\n", ret);
1770 return ret;
1772 common->cpts = cpts;
1773 /* Forbid PM runtime if CPTS is running.
1774 * K3 CPSWxG modules may completely lose context during ON->OFF
1775 * transitions depending on integration.
1776 * AM65x/J721E MCU CPSW2G: false
1777 * J721E MAIN_CPSW9G: true
1779 pm_runtime_forbid(dev);
1781 return 0;
1784 static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
1786 struct device_node *node, *port_np;
1787 struct device *dev = common->dev;
1788 int ret;
1790 node = of_get_child_by_name(dev->of_node, "ethernet-ports");
1791 if (!node)
1792 return -ENOENT;
1794 for_each_child_of_node(node, port_np) {
1795 struct am65_cpsw_port *port;
1796 const void *mac_addr;
1797 u32 port_id;
1799 /* it is not a slave port node, continue */
1800 if (strcmp(port_np->name, "port"))
1801 continue;
1803 ret = of_property_read_u32(port_np, "reg", &port_id);
1804 if (ret < 0) {
1805 dev_err(dev, "%pOF error reading port_id %d\n",
1806 port_np, ret);
1807 return ret;
1810 if (!port_id || port_id > common->port_num) {
1811 dev_err(dev, "%pOF has invalid port_id %u %s\n",
1812 port_np, port_id, port_np->name);
1813 return -EINVAL;
1816 port = am65_common_get_port(common, port_id);
1817 port->port_id = port_id;
1818 port->common = common;
1819 port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
1820 AM65_CPSW_NU_PORTS_OFFSET * (port_id);
1821 port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
1822 (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
1823 port->name = of_get_property(port_np, "label", NULL);
1824 port->fetch_ram_base =
1825 common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
1826 (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
1828 port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
1829 if (IS_ERR(port->slave.mac_sl))
1830 return PTR_ERR(port->slave.mac_sl);
1832 port->disabled = !of_device_is_available(port_np);
1833 if (port->disabled) {
1834 common->disabled_ports_mask |= BIT(port->port_id);
1835 continue;
1838 port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
1839 if (IS_ERR(port->slave.ifphy)) {
1840 ret = PTR_ERR(port->slave.ifphy);
1841 dev_err(dev, "%pOF error retrieving port phy: %d\n",
1842 port_np, ret);
1843 return ret;
1846 port->slave.mac_only =
1847 of_property_read_bool(port_np, "ti,mac-only");
1849 /* get phy/link info */
1850 if (of_phy_is_fixed_link(port_np)) {
1851 ret = of_phy_register_fixed_link(port_np);
1852 if (ret)
1853 return dev_err_probe(dev, ret,
1854 "failed to register fixed-link phy %pOF\n",
1855 port_np);
1856 port->slave.phy_node = of_node_get(port_np);
1857 } else {
1858 port->slave.phy_node =
1859 of_parse_phandle(port_np, "phy-handle", 0);
1862 if (!port->slave.phy_node) {
1863 dev_err(dev,
1864 "slave[%d] no phy found\n", port_id);
1865 return -ENODEV;
1868 ret = of_get_phy_mode(port_np, &port->slave.phy_if);
1869 if (ret) {
1870 dev_err(dev, "%pOF read phy-mode err %d\n",
1871 port_np, ret);
1872 return ret;
1875 mac_addr = of_get_mac_address(port_np);
1876 if (!IS_ERR(mac_addr)) {
1877 ether_addr_copy(port->slave.mac_addr, mac_addr);
1878 } else if (am65_cpsw_am654_get_efuse_macid(port_np,
1879 port->port_id,
1880 port->slave.mac_addr) ||
1881 !is_valid_ether_addr(port->slave.mac_addr)) {
1882 random_ether_addr(port->slave.mac_addr);
1883 dev_err(dev, "Use random MAC address\n");
1886 of_node_put(node);
1888 /* is there at least one ext.port */
1889 if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
1890 dev_err(dev, "No Ext. port are available\n");
1891 return -ENODEV;
1894 return 0;
1897 static void am65_cpsw_pcpu_stats_free(void *data)
1899 struct am65_cpsw_ndev_stats __percpu *stats = data;
1901 free_percpu(stats);
1904 static int
1905 am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
1907 struct am65_cpsw_ndev_priv *ndev_priv;
1908 struct device *dev = common->dev;
1909 struct am65_cpsw_port *port;
1910 int ret;
1912 port = &common->ports[port_idx];
1914 if (port->disabled)
1915 return 0;
1917 /* alloc netdev */
1918 port->ndev = devm_alloc_etherdev_mqs(common->dev,
1919 sizeof(struct am65_cpsw_ndev_priv),
1920 AM65_CPSW_MAX_TX_QUEUES,
1921 AM65_CPSW_MAX_RX_QUEUES);
1922 if (!port->ndev) {
1923 dev_err(dev, "error allocating slave net_device %u\n",
1924 port->port_id);
1925 return -ENOMEM;
1928 ndev_priv = netdev_priv(port->ndev);
1929 ndev_priv->port = port;
1930 ndev_priv->msg_enable = AM65_CPSW_DEBUG;
1931 SET_NETDEV_DEV(port->ndev, dev);
1933 ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
1935 port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
1936 port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
1937 port->ndev->hw_features = NETIF_F_SG |
1938 NETIF_F_RXCSUM |
1939 NETIF_F_HW_CSUM |
1940 NETIF_F_HW_TC;
1941 port->ndev->features = port->ndev->hw_features |
1942 NETIF_F_HW_VLAN_CTAG_FILTER;
1943 port->ndev->vlan_features |= NETIF_F_SG;
1944 port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
1945 port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
1947 /* Disable TX checksum offload by default due to HW bug */
1948 if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
1949 port->ndev->features &= ~NETIF_F_HW_CSUM;
1951 ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
1952 if (!ndev_priv->stats)
1953 return -ENOMEM;
1955 ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
1956 ndev_priv->stats);
1957 if (ret)
1958 dev_err(dev, "failed to add percpu stat free action %d\n", ret);
1960 if (!common->dma_ndev)
1961 common->dma_ndev = port->ndev;
1963 return ret;
1966 static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
1968 int ret;
1969 int i;
1971 for (i = 0; i < common->port_num; i++) {
1972 ret = am65_cpsw_nuss_init_port_ndev(common, i);
1973 if (ret)
1974 return ret;
1977 netif_napi_add(common->dma_ndev, &common->napi_rx,
1978 am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
1980 return ret;
1983 static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
1985 struct device *dev = common->dev;
1986 int i, ret = 0;
1988 for (i = 0; i < common->tx_ch_num; i++) {
1989 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
1991 netif_tx_napi_add(common->dma_ndev, &tx_chn->napi_tx,
1992 am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT);
1994 ret = devm_request_irq(dev, tx_chn->irq,
1995 am65_cpsw_nuss_tx_irq,
1996 IRQF_TRIGGER_HIGH,
1997 tx_chn->tx_chn_name, tx_chn);
1998 if (ret) {
1999 dev_err(dev, "failure requesting tx%u irq %u, %d\n",
2000 tx_chn->id, tx_chn->irq, ret);
2001 goto err;
2005 err:
2006 return ret;
2009 static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
2011 struct am65_cpsw_port *port;
2012 int i;
2014 for (i = 0; i < common->port_num; i++) {
2015 port = &common->ports[i];
2016 if (port->ndev)
2017 unregister_netdev(port->ndev);
2021 static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
2023 struct device *dev = common->dev;
2024 struct am65_cpsw_port *port;
2025 int ret = 0, i;
2027 ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
2028 if (ret)
2029 return ret;
2031 ret = devm_request_irq(dev, common->rx_chns.irq,
2032 am65_cpsw_nuss_rx_irq,
2033 IRQF_TRIGGER_HIGH, dev_name(dev), common);
2034 if (ret) {
2035 dev_err(dev, "failure requesting rx irq %u, %d\n",
2036 common->rx_chns.irq, ret);
2037 return ret;
2040 for (i = 0; i < common->port_num; i++) {
2041 port = &common->ports[i];
2043 if (!port->ndev)
2044 continue;
2046 ret = register_netdev(port->ndev);
2047 if (ret) {
2048 dev_err(dev, "error registering slave net device%i %d\n",
2049 i, ret);
2050 goto err_cleanup_ndev;
2055 /* can't auto unregister ndev using devm_add_action() due to
2056 * devres release sequence in DD core for DMA
2058 return 0;
2060 err_cleanup_ndev:
2061 am65_cpsw_nuss_cleanup_ndev(common);
2062 return ret;
2065 int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
2067 int ret;
2069 common->tx_ch_num = num_tx;
2070 ret = am65_cpsw_nuss_init_tx_chns(common);
2071 if (ret)
2072 return ret;
2074 return am65_cpsw_nuss_ndev_add_tx_napi(common);
2077 struct am65_cpsw_soc_pdata {
2078 u32 quirks_dis;
2081 static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
2082 .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
2085 static const struct soc_device_attribute am65_cpsw_socinfo[] = {
2086 { .family = "AM65X",
2087 .revision = "SR2.0",
2088 .data = &am65x_soc_sr2_0
2090 {/* sentinel */}
2093 static const struct am65_cpsw_pdata am65x_sr1_0 = {
2094 .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
2095 .ale_dev_id = "am65x-cpsw2g",
2096 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2099 static const struct am65_cpsw_pdata j721e_pdata = {
2100 .quirks = 0,
2101 .ale_dev_id = "am65x-cpsw2g",
2102 .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
2105 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
2106 { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
2107 { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
2108 { /* sentinel */ },
2110 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
2112 static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
2114 const struct soc_device_attribute *soc;
2116 soc = soc_device_match(am65_cpsw_socinfo);
2117 if (soc && soc->data) {
2118 const struct am65_cpsw_soc_pdata *socdata = soc->data;
2120 /* disable quirks */
2121 common->pdata.quirks &= ~socdata->quirks_dis;
2125 static int am65_cpsw_nuss_probe(struct platform_device *pdev)
2127 struct cpsw_ale_params ale_params = { 0 };
2128 const struct of_device_id *of_id;
2129 struct device *dev = &pdev->dev;
2130 struct am65_cpsw_common *common;
2131 struct device_node *node;
2132 struct resource *res;
2133 struct clk *clk;
2134 int ret, i;
2136 common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
2137 if (!common)
2138 return -ENOMEM;
2139 common->dev = dev;
2141 of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
2142 if (!of_id)
2143 return -EINVAL;
2144 common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
2146 am65_cpsw_nuss_apply_socinfo(common);
2148 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
2149 common->ss_base = devm_ioremap_resource(&pdev->dev, res);
2150 if (IS_ERR(common->ss_base))
2151 return PTR_ERR(common->ss_base);
2152 common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
2154 node = of_get_child_by_name(dev->of_node, "ethernet-ports");
2155 if (!node)
2156 return -ENOENT;
2157 common->port_num = of_get_child_count(node);
2158 if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
2159 return -ENOENT;
2160 of_node_put(node);
2162 common->rx_flow_id_base = -1;
2163 init_completion(&common->tdown_complete);
2164 common->tx_ch_num = 1;
2165 common->pf_p0_rx_ptype_rrobin = false;
2167 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
2168 if (ret) {
2169 dev_err(dev, "error setting dma mask: %d\n", ret);
2170 return ret;
2173 common->ports = devm_kcalloc(dev, common->port_num,
2174 sizeof(*common->ports),
2175 GFP_KERNEL);
2176 if (!common->ports)
2177 return -ENOMEM;
2179 clk = devm_clk_get(dev, "fck");
2180 if (IS_ERR(clk))
2181 return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
2182 common->bus_freq = clk_get_rate(clk);
2184 pm_runtime_enable(dev);
2185 ret = pm_runtime_get_sync(dev);
2186 if (ret < 0) {
2187 pm_runtime_put_noidle(dev);
2188 pm_runtime_disable(dev);
2189 return ret;
2192 node = of_get_child_by_name(dev->of_node, "mdio");
2193 if (!node) {
2194 dev_warn(dev, "MDIO node not found\n");
2195 } else if (of_device_is_available(node)) {
2196 struct platform_device *mdio_pdev;
2198 mdio_pdev = of_platform_device_create(node, NULL, dev);
2199 if (!mdio_pdev) {
2200 ret = -ENODEV;
2201 goto err_pm_clear;
2204 common->mdio_dev = &mdio_pdev->dev;
2206 of_node_put(node);
2208 am65_cpsw_nuss_get_ver(common);
2210 /* init tx channels */
2211 ret = am65_cpsw_nuss_init_tx_chns(common);
2212 if (ret)
2213 goto err_of_clear;
2214 ret = am65_cpsw_nuss_init_rx_chns(common);
2215 if (ret)
2216 goto err_of_clear;
2218 ret = am65_cpsw_nuss_init_host_p(common);
2219 if (ret)
2220 goto err_of_clear;
2222 ret = am65_cpsw_nuss_init_slave_ports(common);
2223 if (ret)
2224 goto err_of_clear;
2226 /* init common data */
2227 ale_params.dev = dev;
2228 ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
2229 ale_params.ale_ports = common->port_num + 1;
2230 ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
2231 ale_params.dev_id = common->pdata.ale_dev_id;
2232 ale_params.bus_freq = common->bus_freq;
2234 common->ale = cpsw_ale_create(&ale_params);
2235 if (IS_ERR(common->ale)) {
2236 dev_err(dev, "error initializing ale engine\n");
2237 ret = PTR_ERR(common->ale);
2238 goto err_of_clear;
2241 ret = am65_cpsw_init_cpts(common);
2242 if (ret)
2243 goto err_of_clear;
2245 /* init ports */
2246 for (i = 0; i < common->port_num; i++)
2247 am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
2249 dev_set_drvdata(dev, common);
2251 ret = am65_cpsw_nuss_init_ndevs(common);
2252 if (ret)
2253 goto err_of_clear;
2255 ret = am65_cpsw_nuss_register_ndevs(common);
2256 if (ret)
2257 goto err_of_clear;
2259 pm_runtime_put(dev);
2260 return 0;
2262 err_of_clear:
2263 of_platform_device_destroy(common->mdio_dev, NULL);
2264 err_pm_clear:
2265 pm_runtime_put_sync(dev);
2266 pm_runtime_disable(dev);
2267 return ret;
2270 static int am65_cpsw_nuss_remove(struct platform_device *pdev)
2272 struct device *dev = &pdev->dev;
2273 struct am65_cpsw_common *common;
2274 int ret;
2276 common = dev_get_drvdata(dev);
2278 ret = pm_runtime_get_sync(&pdev->dev);
2279 if (ret < 0) {
2280 pm_runtime_put_noidle(&pdev->dev);
2281 return ret;
2284 /* must unregister ndevs here because DD release_driver routine calls
2285 * dma_deconfigure(dev) before devres_release_all(dev)
2287 am65_cpsw_nuss_cleanup_ndev(common);
2289 of_platform_device_destroy(common->mdio_dev, NULL);
2291 pm_runtime_put_sync(&pdev->dev);
2292 pm_runtime_disable(&pdev->dev);
2293 return 0;
2296 static struct platform_driver am65_cpsw_nuss_driver = {
2297 .driver = {
2298 .name = AM65_CPSW_DRV_NAME,
2299 .of_match_table = am65_cpsw_nuss_of_mtable,
2301 .probe = am65_cpsw_nuss_probe,
2302 .remove = am65_cpsw_nuss_remove,
2305 module_platform_driver(am65_cpsw_nuss_driver);
2307 MODULE_LICENSE("GPL v2");
2308 MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
2309 MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");