1 // SPDX-License-Identifier: GPL-2.0
3 /* Texas Instruments ICSSG Ethernet Driver
5 * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
6 * Copyright (C) Siemens AG, 2024
10 #include <linux/dma-mapping.h>
11 #include <linux/dma/ti-cppi5.h>
12 #include <linux/etherdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
16 #include <linux/of_mdio.h>
17 #include <linux/phy.h>
18 #include <linux/remoteproc/pruss.h>
19 #include <linux/regmap.h>
20 #include <linux/remoteproc.h>
22 #include "icssg_prueth.h"
23 #include "../k3-cppi-desc-pool.h"
25 /* Netif debug messages possible */
26 #define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
34 NETIF_MSG_TX_QUEUED | \
37 NETIF_MSG_RX_STATUS | \
42 #define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
44 void prueth_cleanup_rx_chns(struct prueth_emac
*emac
,
45 struct prueth_rx_chn
*rx_chn
,
48 if (rx_chn
->desc_pool
)
49 k3_cppi_desc_pool_destroy(rx_chn
->desc_pool
);
52 k3_udma_glue_release_rx_chn(rx_chn
->rx_chn
);
54 EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns
);
56 void prueth_cleanup_tx_chns(struct prueth_emac
*emac
)
60 for (i
= 0; i
< emac
->tx_ch_num
; i
++) {
61 struct prueth_tx_chn
*tx_chn
= &emac
->tx_chns
[i
];
63 if (tx_chn
->desc_pool
)
64 k3_cppi_desc_pool_destroy(tx_chn
->desc_pool
);
67 k3_udma_glue_release_tx_chn(tx_chn
->tx_chn
);
69 /* Assume prueth_cleanup_tx_chns() is called at the
70 * end after all channel resources are freed
72 memset(tx_chn
, 0, sizeof(*tx_chn
));
75 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns
);
77 void prueth_ndev_del_tx_napi(struct prueth_emac
*emac
, int num
)
81 for (i
= 0; i
< num
; i
++) {
82 struct prueth_tx_chn
*tx_chn
= &emac
->tx_chns
[i
];
85 free_irq(tx_chn
->irq
, tx_chn
);
86 netif_napi_del(&tx_chn
->napi_tx
);
89 EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi
);
91 void prueth_xmit_free(struct prueth_tx_chn
*tx_chn
,
92 struct cppi5_host_desc_t
*desc
)
94 struct cppi5_host_desc_t
*first_desc
, *next_desc
;
95 dma_addr_t buf_dma
, next_desc_dma
;
99 next_desc
= first_desc
;
101 cppi5_hdesc_get_obuf(first_desc
, &buf_dma
, &buf_dma_len
);
102 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &buf_dma
);
104 dma_unmap_single(tx_chn
->dma_dev
, buf_dma
, buf_dma_len
,
107 next_desc_dma
= cppi5_hdesc_get_next_hbdesc(first_desc
);
108 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &next_desc_dma
);
109 while (next_desc_dma
) {
110 next_desc
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
,
112 cppi5_hdesc_get_obuf(next_desc
, &buf_dma
, &buf_dma_len
);
113 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &buf_dma
);
115 dma_unmap_page(tx_chn
->dma_dev
, buf_dma
, buf_dma_len
,
118 next_desc_dma
= cppi5_hdesc_get_next_hbdesc(next_desc
);
119 k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn
->tx_chn
, &next_desc_dma
);
121 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, next_desc
);
124 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, first_desc
);
126 EXPORT_SYMBOL_GPL(prueth_xmit_free
);
128 int emac_tx_complete_packets(struct prueth_emac
*emac
, int chn
,
129 int budget
, bool *tdown
)
131 struct net_device
*ndev
= emac
->ndev
;
132 struct cppi5_host_desc_t
*desc_tx
;
133 struct netdev_queue
*netif_txq
;
134 struct prueth_tx_chn
*tx_chn
;
135 unsigned int total_bytes
= 0;
141 tx_chn
= &emac
->tx_chns
[chn
];
144 res
= k3_udma_glue_pop_tx_chn(tx_chn
->tx_chn
, &desc_dma
);
148 /* teardown completion */
149 if (cppi5_desc_is_tdcm(desc_dma
)) {
150 if (atomic_dec_and_test(&emac
->tdown_cnt
))
151 complete(&emac
->tdown_complete
);
156 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
,
158 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
160 /* was this command's TX complete? */
161 if (emac
->is_sr1
&& *(swdata
) == emac
->cmd_data
) {
162 prueth_xmit_free(tx_chn
, desc_tx
);
167 prueth_xmit_free(tx_chn
, desc_tx
);
170 ndev
->stats
.tx_packets
++;
171 ndev
->stats
.tx_bytes
+= skb
->len
;
172 total_bytes
+= skb
->len
;
173 napi_consume_skb(skb
, budget
);
180 netif_txq
= netdev_get_tx_queue(ndev
, chn
);
181 netdev_tx_completed_queue(netif_txq
, num_tx
, total_bytes
);
183 if (netif_tx_queue_stopped(netif_txq
)) {
184 /* If the TX queue was stopped, wake it now
185 * if we have enough room.
187 __netif_tx_lock(netif_txq
, smp_processor_id());
188 if (netif_running(ndev
) &&
189 (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) >=
191 netif_tx_wake_queue(netif_txq
);
192 __netif_tx_unlock(netif_txq
);
198 static enum hrtimer_restart
emac_tx_timer_callback(struct hrtimer
*timer
)
200 struct prueth_tx_chn
*tx_chns
=
201 container_of(timer
, struct prueth_tx_chn
, tx_hrtimer
);
203 enable_irq(tx_chns
->irq
);
204 return HRTIMER_NORESTART
;
207 static int emac_napi_tx_poll(struct napi_struct
*napi_tx
, int budget
)
209 struct prueth_tx_chn
*tx_chn
= prueth_napi_to_tx_chn(napi_tx
);
210 struct prueth_emac
*emac
= tx_chn
->emac
;
214 num_tx_packets
= emac_tx_complete_packets(emac
, tx_chn
->id
, budget
,
217 if (num_tx_packets
>= budget
)
220 if (napi_complete_done(napi_tx
, num_tx_packets
)) {
221 if (unlikely(tx_chn
->tx_pace_timeout_ns
&& !tdown
)) {
222 hrtimer_start(&tx_chn
->tx_hrtimer
,
223 ns_to_ktime(tx_chn
->tx_pace_timeout_ns
),
224 HRTIMER_MODE_REL_PINNED
);
226 enable_irq(tx_chn
->irq
);
230 return num_tx_packets
;
233 static irqreturn_t
prueth_tx_irq(int irq
, void *dev_id
)
235 struct prueth_tx_chn
*tx_chn
= dev_id
;
237 disable_irq_nosync(irq
);
238 napi_schedule(&tx_chn
->napi_tx
);
243 int prueth_ndev_add_tx_napi(struct prueth_emac
*emac
)
245 struct prueth
*prueth
= emac
->prueth
;
248 for (i
= 0; i
< emac
->tx_ch_num
; i
++) {
249 struct prueth_tx_chn
*tx_chn
= &emac
->tx_chns
[i
];
251 netif_napi_add_tx(emac
->ndev
, &tx_chn
->napi_tx
, emac_napi_tx_poll
);
252 hrtimer_init(&tx_chn
->tx_hrtimer
, CLOCK_MONOTONIC
,
253 HRTIMER_MODE_REL_PINNED
);
254 tx_chn
->tx_hrtimer
.function
= &emac_tx_timer_callback
;
255 ret
= request_irq(tx_chn
->irq
, prueth_tx_irq
,
256 IRQF_TRIGGER_HIGH
, tx_chn
->name
,
259 netif_napi_del(&tx_chn
->napi_tx
);
260 dev_err(prueth
->dev
, "unable to request TX IRQ %d\n",
268 prueth_ndev_del_tx_napi(emac
, i
);
271 EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi
);
273 int prueth_init_tx_chns(struct prueth_emac
*emac
)
275 static const struct k3_ring_cfg ring_cfg
= {
276 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
277 .mode
= K3_RINGACC_RING_MODE_RING
,
279 .size
= PRUETH_MAX_TX_DESC
,
281 struct k3_udma_glue_tx_channel_cfg tx_cfg
;
282 struct device
*dev
= emac
->prueth
->dev
;
283 struct net_device
*ndev
= emac
->ndev
;
287 slice
= prueth_emac_slice(emac
);
291 init_completion(&emac
->tdown_complete
);
293 hdesc_size
= cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE
,
294 PRUETH_NAV_SW_DATA_SIZE
);
295 memset(&tx_cfg
, 0, sizeof(tx_cfg
));
296 tx_cfg
.swdata_size
= PRUETH_NAV_SW_DATA_SIZE
;
297 tx_cfg
.tx_cfg
= ring_cfg
;
298 tx_cfg
.txcq_cfg
= ring_cfg
;
300 for (i
= 0; i
< emac
->tx_ch_num
; i
++) {
301 struct prueth_tx_chn
*tx_chn
= &emac
->tx_chns
[i
];
303 /* To differentiate channels for SLICE0 vs SLICE1 */
304 snprintf(tx_chn
->name
, sizeof(tx_chn
->name
),
305 "tx%d-%d", slice
, i
);
309 tx_chn
->descs_num
= PRUETH_MAX_TX_DESC
;
312 k3_udma_glue_request_tx_chn(dev
, tx_chn
->name
,
314 if (IS_ERR(tx_chn
->tx_chn
)) {
315 ret
= PTR_ERR(tx_chn
->tx_chn
);
316 tx_chn
->tx_chn
= NULL
;
318 "Failed to request tx dma ch: %d\n", ret
);
322 tx_chn
->dma_dev
= k3_udma_glue_tx_get_dma_device(tx_chn
->tx_chn
);
324 k3_cppi_desc_pool_create_name(tx_chn
->dma_dev
,
328 if (IS_ERR(tx_chn
->desc_pool
)) {
329 ret
= PTR_ERR(tx_chn
->desc_pool
);
330 tx_chn
->desc_pool
= NULL
;
331 netdev_err(ndev
, "Failed to create tx pool: %d\n", ret
);
335 ret
= k3_udma_glue_tx_get_irq(tx_chn
->tx_chn
);
337 netdev_err(ndev
, "failed to get tx irq\n");
342 snprintf(tx_chn
->name
, sizeof(tx_chn
->name
), "%s-tx%d",
343 dev_name(dev
), tx_chn
->id
);
349 prueth_cleanup_tx_chns(emac
);
352 EXPORT_SYMBOL_GPL(prueth_init_tx_chns
);
354 int prueth_init_rx_chns(struct prueth_emac
*emac
,
355 struct prueth_rx_chn
*rx_chn
,
356 char *name
, u32 max_rflows
,
359 struct k3_udma_glue_rx_channel_cfg rx_cfg
;
360 struct device
*dev
= emac
->prueth
->dev
;
361 struct net_device
*ndev
= emac
->ndev
;
362 u32 fdqring_id
, hdesc_size
;
363 int i
, ret
= 0, slice
;
366 slice
= prueth_emac_slice(emac
);
370 /* To differentiate channels for SLICE0 vs SLICE1 */
371 snprintf(rx_chn
->name
, sizeof(rx_chn
->name
), "%s%d", name
, slice
);
373 hdesc_size
= cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE
,
374 PRUETH_NAV_SW_DATA_SIZE
);
375 memset(&rx_cfg
, 0, sizeof(rx_cfg
));
376 rx_cfg
.swdata_size
= PRUETH_NAV_SW_DATA_SIZE
;
377 rx_cfg
.flow_id_num
= max_rflows
;
378 rx_cfg
.flow_id_base
= -1; /* udmax will auto select flow id base */
382 rx_chn
->descs_num
= max_desc_num
;
384 rx_chn
->rx_chn
= k3_udma_glue_request_rx_chn(dev
, rx_chn
->name
,
386 if (IS_ERR(rx_chn
->rx_chn
)) {
387 ret
= PTR_ERR(rx_chn
->rx_chn
);
388 rx_chn
->rx_chn
= NULL
;
389 netdev_err(ndev
, "Failed to request rx dma ch: %d\n", ret
);
393 rx_chn
->dma_dev
= k3_udma_glue_rx_get_dma_device(rx_chn
->rx_chn
);
394 rx_chn
->desc_pool
= k3_cppi_desc_pool_create_name(rx_chn
->dma_dev
,
398 if (IS_ERR(rx_chn
->desc_pool
)) {
399 ret
= PTR_ERR(rx_chn
->desc_pool
);
400 rx_chn
->desc_pool
= NULL
;
401 netdev_err(ndev
, "Failed to create rx pool: %d\n", ret
);
405 flow_id_base
= k3_udma_glue_rx_get_flow_id_base(rx_chn
->rx_chn
);
406 if (emac
->is_sr1
&& !strcmp(name
, "rxmgm")) {
407 emac
->rx_mgm_flow_id_base
= flow_id_base
;
408 netdev_dbg(ndev
, "mgm flow id base = %d\n", flow_id_base
);
410 emac
->rx_flow_id_base
= flow_id_base
;
411 netdev_dbg(ndev
, "flow id base = %d\n", flow_id_base
);
414 fdqring_id
= K3_RINGACC_RING_ID_ANY
;
415 for (i
= 0; i
< rx_cfg
.flow_id_num
; i
++) {
416 struct k3_ring_cfg rxring_cfg
= {
417 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
418 .mode
= K3_RINGACC_RING_MODE_RING
,
421 struct k3_ring_cfg fdqring_cfg
= {
422 .elm_size
= K3_RINGACC_RING_ELSIZE_8
,
423 .flags
= K3_RINGACC_RING_SHARED
,
425 struct k3_udma_glue_rx_flow_cfg rx_flow_cfg
= {
426 .rx_cfg
= rxring_cfg
,
427 .rxfdq_cfg
= fdqring_cfg
,
428 .ring_rxq_id
= K3_RINGACC_RING_ID_ANY
,
430 K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG
,
433 rx_flow_cfg
.ring_rxfdq0_id
= fdqring_id
;
434 rx_flow_cfg
.rx_cfg
.size
= max_desc_num
;
435 rx_flow_cfg
.rxfdq_cfg
.size
= max_desc_num
;
436 rx_flow_cfg
.rxfdq_cfg
.mode
= emac
->prueth
->pdata
.fdqring_mode
;
438 ret
= k3_udma_glue_rx_flow_init(rx_chn
->rx_chn
,
441 netdev_err(ndev
, "Failed to init rx flow%d %d\n",
446 fdqring_id
= k3_udma_glue_rx_flow_get_fdq_id(rx_chn
->rx_chn
,
448 ret
= k3_udma_glue_rx_get_irq(rx_chn
->rx_chn
, i
);
450 netdev_err(ndev
, "Failed to get rx dma irq");
453 rx_chn
->irq
[i
] = ret
;
459 prueth_cleanup_rx_chns(emac
, rx_chn
, max_rflows
);
462 EXPORT_SYMBOL_GPL(prueth_init_rx_chns
);
464 int prueth_dma_rx_push(struct prueth_emac
*emac
,
466 struct prueth_rx_chn
*rx_chn
)
468 struct net_device
*ndev
= emac
->ndev
;
469 struct cppi5_host_desc_t
*desc_rx
;
470 u32 pkt_len
= skb_tailroom(skb
);
475 desc_rx
= k3_cppi_desc_pool_alloc(rx_chn
->desc_pool
);
477 netdev_err(ndev
, "rx push: failed to allocate descriptor\n");
480 desc_dma
= k3_cppi_desc_pool_virt2dma(rx_chn
->desc_pool
, desc_rx
);
482 buf_dma
= dma_map_single(rx_chn
->dma_dev
, skb
->data
, pkt_len
, DMA_FROM_DEVICE
);
483 if (unlikely(dma_mapping_error(rx_chn
->dma_dev
, buf_dma
))) {
484 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
485 netdev_err(ndev
, "rx push: failed to map rx pkt buffer\n");
489 cppi5_hdesc_init(desc_rx
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
490 PRUETH_NAV_PS_DATA_SIZE
);
491 k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn
->rx_chn
, &buf_dma
);
492 cppi5_hdesc_attach_buf(desc_rx
, buf_dma
, skb_tailroom(skb
), buf_dma
, skb_tailroom(skb
));
494 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
497 return k3_udma_glue_push_rx_chn(rx_chn
->rx_chn
, 0,
500 EXPORT_SYMBOL_GPL(prueth_dma_rx_push
);
502 u64
icssg_ts_to_ns(u32 hi_sw
, u32 hi
, u32 lo
, u32 cycle_time_ns
)
504 u32 iepcount_lo
, iepcount_hi
, hi_rollover_count
;
507 iepcount_lo
= lo
& GENMASK(19, 0);
508 iepcount_hi
= (hi
& GENMASK(11, 0)) << 12 | lo
>> 20;
509 hi_rollover_count
= hi
>> 11;
511 ns
= ((u64
)hi_rollover_count
) << 23 | (iepcount_hi
+ hi_sw
);
512 ns
= ns
* cycle_time_ns
+ iepcount_lo
;
516 EXPORT_SYMBOL_GPL(icssg_ts_to_ns
);
518 void emac_rx_timestamp(struct prueth_emac
*emac
,
519 struct sk_buff
*skb
, u32
*psdata
)
521 struct skb_shared_hwtstamps
*ssh
;
525 ns
= (u64
)psdata
[1] << 32 | psdata
[0];
527 u32 hi_sw
= readl(emac
->prueth
->shram
.va
+
528 TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET
);
529 ns
= icssg_ts_to_ns(hi_sw
, psdata
[1], psdata
[0],
530 IEP_DEFAULT_CYCLE_TIME_NS
);
533 ssh
= skb_hwtstamps(skb
);
534 memset(ssh
, 0, sizeof(*ssh
));
535 ssh
->hwtstamp
= ns_to_ktime(ns
);
538 static int emac_rx_packet(struct prueth_emac
*emac
, u32 flow_id
)
540 struct prueth_rx_chn
*rx_chn
= &emac
->rx_chns
;
541 u32 buf_dma_len
, pkt_len
, port_id
= 0;
542 struct net_device
*ndev
= emac
->ndev
;
543 struct cppi5_host_desc_t
*desc_rx
;
544 struct sk_buff
*skb
, *new_skb
;
545 dma_addr_t desc_dma
, buf_dma
;
550 ret
= k3_udma_glue_pop_rx_chn(rx_chn
->rx_chn
, flow_id
, &desc_dma
);
553 netdev_err(ndev
, "rx pop: failed: %d\n", ret
);
557 if (cppi5_desc_is_tdcm(desc_dma
)) /* Teardown ? */
560 desc_rx
= k3_cppi_desc_pool_dma2virt(rx_chn
->desc_pool
, desc_dma
);
562 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
565 psdata
= cppi5_hdesc_get_psdata(desc_rx
);
566 /* RX HW timestamp */
567 if (emac
->rx_ts_enabled
)
568 emac_rx_timestamp(emac
, skb
, psdata
);
570 cppi5_hdesc_get_obuf(desc_rx
, &buf_dma
, &buf_dma_len
);
571 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn
->rx_chn
, &buf_dma
);
572 pkt_len
= cppi5_hdesc_get_pktlen(desc_rx
);
573 /* firmware adds 4 CRC bytes, strip them */
575 cppi5_desc_get_tags_ids(&desc_rx
->hdr
, &port_id
, NULL
);
577 dma_unmap_single(rx_chn
->dma_dev
, buf_dma
, buf_dma_len
, DMA_FROM_DEVICE
);
578 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
581 new_skb
= netdev_alloc_skb_ip_align(ndev
, PRUETH_MAX_PKT_SIZE
);
582 /* if allocation fails we drop the packet but push the
583 * descriptor back to the ring with old skb to prevent a stall
586 ndev
->stats
.rx_dropped
++;
589 /* send the filled skb up the n/w stack */
590 skb_put(skb
, pkt_len
);
591 if (emac
->prueth
->is_switch_mode
)
592 skb
->offload_fwd_mark
= emac
->offload_fwd_mark
;
593 skb
->protocol
= eth_type_trans(skb
, ndev
);
594 napi_gro_receive(&emac
->napi_rx
, skb
);
595 ndev
->stats
.rx_bytes
+= pkt_len
;
596 ndev
->stats
.rx_packets
++;
599 /* queue another RX DMA */
600 ret
= prueth_dma_rx_push(emac
, new_skb
, &emac
->rx_chns
);
601 if (WARN_ON(ret
< 0)) {
602 dev_kfree_skb_any(new_skb
);
603 ndev
->stats
.rx_errors
++;
604 ndev
->stats
.rx_dropped
++;
610 static void prueth_rx_cleanup(void *data
, dma_addr_t desc_dma
)
612 struct prueth_rx_chn
*rx_chn
= data
;
613 struct cppi5_host_desc_t
*desc_rx
;
619 desc_rx
= k3_cppi_desc_pool_dma2virt(rx_chn
->desc_pool
, desc_dma
);
620 swdata
= cppi5_hdesc_get_swdata(desc_rx
);
622 cppi5_hdesc_get_obuf(desc_rx
, &buf_dma
, &buf_dma_len
);
623 k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn
->rx_chn
, &buf_dma
);
625 dma_unmap_single(rx_chn
->dma_dev
, buf_dma
, buf_dma_len
,
627 k3_cppi_desc_pool_free(rx_chn
->desc_pool
, desc_rx
);
629 dev_kfree_skb_any(skb
);
632 static int prueth_tx_ts_cookie_get(struct prueth_emac
*emac
)
636 /* search and get the next free slot */
637 for (i
= 0; i
< PRUETH_MAX_TX_TS_REQUESTS
; i
++) {
638 if (!emac
->tx_ts_skb
[i
]) {
639 emac
->tx_ts_skb
[i
] = ERR_PTR(-EBUSY
); /* reserve slot */
648 * icssg_ndo_start_xmit - EMAC Transmit function
650 * @ndev: EMAC network adapter
652 * Called by the system to transmit a packet - we queue the packet in
653 * EMAC hardware transmit queue
654 * Doesn't wait for completion we'll check for TX completion in
655 * emac_tx_complete_packets().
657 * Return: enum netdev_tx
659 enum netdev_tx
icssg_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
661 struct cppi5_host_desc_t
*first_desc
, *next_desc
, *cur_desc
;
662 struct prueth_emac
*emac
= netdev_priv(ndev
);
663 struct prueth
*prueth
= emac
->prueth
;
664 struct netdev_queue
*netif_txq
;
665 struct prueth_tx_chn
*tx_chn
;
666 dma_addr_t desc_dma
, buf_dma
;
667 u32 pkt_len
, dst_tag_id
;
668 int i
, ret
= 0, q_idx
;
674 pkt_len
= skb_headlen(skb
);
675 q_idx
= skb_get_queue_mapping(skb
);
677 tx_chn
= &emac
->tx_chns
[q_idx
];
678 netif_txq
= netdev_get_tx_queue(ndev
, q_idx
);
680 /* Map the linear buffer */
681 buf_dma
= dma_map_single(tx_chn
->dma_dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
682 if (dma_mapping_error(tx_chn
->dma_dev
, buf_dma
)) {
683 netdev_err(ndev
, "tx: failed to map skb buffer\n");
688 first_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
690 netdev_dbg(ndev
, "tx: failed to allocate descriptor\n");
691 dma_unmap_single(tx_chn
->dma_dev
, buf_dma
, pkt_len
, DMA_TO_DEVICE
);
692 goto drop_stop_q_busy
;
695 cppi5_hdesc_init(first_desc
, CPPI5_INFO0_HDESC_EPIB_PRESENT
,
696 PRUETH_NAV_PS_DATA_SIZE
);
697 cppi5_hdesc_set_pkttype(first_desc
, 0);
698 epib
= first_desc
->epib
;
701 if (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
&&
702 emac
->tx_ts_enabled
) {
703 tx_ts_cookie
= prueth_tx_ts_cookie_get(emac
);
704 if (tx_ts_cookie
>= 0) {
705 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
706 /* Request TX timestamp */
707 epib
[0] = (u32
)tx_ts_cookie
;
708 epib
[1] = 0x80000000; /* TX TS request */
709 emac
->tx_ts_skb
[tx_ts_cookie
] = skb_get(skb
);
714 /* set dst tag to indicate internal qid at the firmware which is at
715 * bit8..bit15. bit0..bit7 indicates port num for directed
716 * packets in case of switch mode operation and port num 0
717 * for undirected packets in case of HSR offload mode
719 dst_tag_id
= emac
->port_id
| (q_idx
<< 8);
721 if (prueth
->is_hsr_offload_mode
&&
722 (ndev
->features
& NETIF_F_HW_HSR_DUP
))
723 dst_tag_id
= PRUETH_UNDIRECTED_PKT_DST_TAG
;
725 if (prueth
->is_hsr_offload_mode
&&
726 (ndev
->features
& NETIF_F_HW_HSR_TAG_INS
))
727 epib
[1] |= PRUETH_UNDIRECTED_PKT_TAG_INS
;
729 cppi5_desc_set_tags_ids(&first_desc
->hdr
, 0, dst_tag_id
);
730 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &buf_dma
);
731 cppi5_hdesc_attach_buf(first_desc
, buf_dma
, pkt_len
, buf_dma
, pkt_len
);
732 swdata
= cppi5_hdesc_get_swdata(first_desc
);
735 /* Handle the case where skb is fragmented in pages */
736 cur_desc
= first_desc
;
737 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
738 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
739 u32 frag_size
= skb_frag_size(frag
);
741 next_desc
= k3_cppi_desc_pool_alloc(tx_chn
->desc_pool
);
744 "tx: failed to allocate frag. descriptor\n");
745 goto free_desc_stop_q_busy_cleanup_tx_ts
;
748 buf_dma
= skb_frag_dma_map(tx_chn
->dma_dev
, frag
, 0, frag_size
,
750 if (dma_mapping_error(tx_chn
->dma_dev
, buf_dma
)) {
751 netdev_err(ndev
, "tx: Failed to map skb page\n");
752 k3_cppi_desc_pool_free(tx_chn
->desc_pool
, next_desc
);
757 cppi5_hdesc_reset_hbdesc(next_desc
);
758 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &buf_dma
);
759 cppi5_hdesc_attach_buf(next_desc
,
760 buf_dma
, frag_size
, buf_dma
, frag_size
);
762 desc_dma
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
,
764 k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn
->tx_chn
, &desc_dma
);
765 cppi5_hdesc_link_hbdesc(cur_desc
, desc_dma
);
767 pkt_len
+= frag_size
;
768 cur_desc
= next_desc
;
770 WARN_ON_ONCE(pkt_len
!= skb
->len
);
772 /* report bql before sending packet */
773 netdev_tx_sent_queue(netif_txq
, pkt_len
);
775 cppi5_hdesc_set_pktlen(first_desc
, pkt_len
);
776 desc_dma
= k3_cppi_desc_pool_virt2dma(tx_chn
->desc_pool
, first_desc
);
777 /* cppi5_desc_dump(first_desc, 64); */
779 skb_tx_timestamp(skb
); /* SW timestamp if SKBTX_IN_PROGRESS not set */
780 ret
= k3_udma_glue_push_tx_chn(tx_chn
->tx_chn
, first_desc
, desc_dma
);
782 netdev_err(ndev
, "tx: push failed: %d\n", ret
);
783 goto drop_free_descs
;
787 atomic_inc(&emac
->tx_ts_pending
);
789 if (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) < MAX_SKB_FRAGS
) {
790 netif_tx_stop_queue(netif_txq
);
791 /* Barrier, so that stop_queue visible to other cpus */
792 smp_mb__after_atomic();
794 if (k3_cppi_desc_pool_avail(tx_chn
->desc_pool
) >=
796 netif_tx_wake_queue(netif_txq
);
803 dev_kfree_skb_any(emac
->tx_ts_skb
[tx_ts_cookie
]);
804 emac
->tx_ts_skb
[tx_ts_cookie
] = NULL
;
808 prueth_xmit_free(tx_chn
, first_desc
);
811 dev_kfree_skb_any(skb
);
814 ndev
->stats
.tx_dropped
++;
815 netdev_err(ndev
, "tx: error: %d\n", ret
);
819 free_desc_stop_q_busy_cleanup_tx_ts
:
821 dev_kfree_skb_any(emac
->tx_ts_skb
[tx_ts_cookie
]);
822 emac
->tx_ts_skb
[tx_ts_cookie
] = NULL
;
824 prueth_xmit_free(tx_chn
, first_desc
);
827 netif_tx_stop_queue(netif_txq
);
828 return NETDEV_TX_BUSY
;
830 EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit
);
832 static void prueth_tx_cleanup(void *data
, dma_addr_t desc_dma
)
834 struct prueth_tx_chn
*tx_chn
= data
;
835 struct cppi5_host_desc_t
*desc_tx
;
839 desc_tx
= k3_cppi_desc_pool_dma2virt(tx_chn
->desc_pool
, desc_dma
);
840 swdata
= cppi5_hdesc_get_swdata(desc_tx
);
842 prueth_xmit_free(tx_chn
, desc_tx
);
844 dev_kfree_skb_any(skb
);
847 irqreturn_t
prueth_rx_irq(int irq
, void *dev_id
)
849 struct prueth_emac
*emac
= dev_id
;
851 disable_irq_nosync(irq
);
852 napi_schedule(&emac
->napi_rx
);
856 EXPORT_SYMBOL_GPL(prueth_rx_irq
);
858 void prueth_emac_stop(struct prueth_emac
*emac
)
860 struct prueth
*prueth
= emac
->prueth
;
863 switch (emac
->port_id
) {
864 case PRUETH_PORT_MII0
:
867 case PRUETH_PORT_MII1
:
871 netdev_err(emac
->ndev
, "invalid port\n");
875 emac
->fw_running
= 0;
877 rproc_shutdown(prueth
->txpru
[slice
]);
878 rproc_shutdown(prueth
->rtu
[slice
]);
879 rproc_shutdown(prueth
->pru
[slice
]);
881 EXPORT_SYMBOL_GPL(prueth_emac_stop
);
883 void prueth_cleanup_tx_ts(struct prueth_emac
*emac
)
887 for (i
= 0; i
< PRUETH_MAX_TX_TS_REQUESTS
; i
++) {
888 if (emac
->tx_ts_skb
[i
]) {
889 dev_kfree_skb_any(emac
->tx_ts_skb
[i
]);
890 emac
->tx_ts_skb
[i
] = NULL
;
894 EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts
);
896 int icssg_napi_rx_poll(struct napi_struct
*napi_rx
, int budget
)
898 struct prueth_emac
*emac
= prueth_napi_to_emac(napi_rx
);
899 int rx_flow
= emac
->is_sr1
?
900 PRUETH_RX_FLOW_DATA_SR1
: PRUETH_RX_FLOW_DATA
;
901 int flow
= emac
->is_sr1
?
902 PRUETH_MAX_RX_FLOWS_SR1
: PRUETH_MAX_RX_FLOWS
;
908 cur_budget
= budget
- num_rx
;
910 while (cur_budget
--) {
911 ret
= emac_rx_packet(emac
, flow
);
917 if (num_rx
>= budget
)
921 if (num_rx
< budget
&& napi_complete_done(napi_rx
, num_rx
)) {
922 if (unlikely(emac
->rx_pace_timeout_ns
)) {
923 hrtimer_start(&emac
->rx_hrtimer
,
924 ns_to_ktime(emac
->rx_pace_timeout_ns
),
925 HRTIMER_MODE_REL_PINNED
);
927 enable_irq(emac
->rx_chns
.irq
[rx_flow
]);
933 EXPORT_SYMBOL_GPL(icssg_napi_rx_poll
);
935 int prueth_prepare_rx_chan(struct prueth_emac
*emac
,
936 struct prueth_rx_chn
*chn
,
942 for (i
= 0; i
< chn
->descs_num
; i
++) {
943 skb
= __netdev_alloc_skb_ip_align(NULL
, buf_size
, GFP_KERNEL
);
947 ret
= prueth_dma_rx_push(emac
, skb
, chn
);
949 netdev_err(emac
->ndev
,
950 "cannot submit skb for rx chan %s ret %d\n",
959 EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan
);
961 void prueth_reset_tx_chan(struct prueth_emac
*emac
, int ch_num
,
966 for (i
= 0; i
< ch_num
; i
++) {
968 k3_udma_glue_reset_tx_chn(emac
->tx_chns
[i
].tx_chn
,
971 k3_udma_glue_disable_tx_chn(emac
->tx_chns
[i
].tx_chn
);
974 EXPORT_SYMBOL_GPL(prueth_reset_tx_chan
);
976 void prueth_reset_rx_chan(struct prueth_rx_chn
*chn
,
977 int num_flows
, bool disable
)
981 for (i
= 0; i
< num_flows
; i
++)
982 k3_udma_glue_reset_rx_chn(chn
->rx_chn
, i
, chn
,
983 prueth_rx_cleanup
, !!i
);
985 k3_udma_glue_disable_rx_chn(chn
->rx_chn
);
987 EXPORT_SYMBOL_GPL(prueth_reset_rx_chan
);
989 void icssg_ndo_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
991 ndev
->stats
.tx_errors
++;
993 EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout
);
995 static int emac_set_ts_config(struct net_device
*ndev
, struct ifreq
*ifr
)
997 struct prueth_emac
*emac
= netdev_priv(ndev
);
998 struct hwtstamp_config config
;
1000 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
1003 switch (config
.tx_type
) {
1004 case HWTSTAMP_TX_OFF
:
1005 emac
->tx_ts_enabled
= 0;
1007 case HWTSTAMP_TX_ON
:
1008 emac
->tx_ts_enabled
= 1;
1014 switch (config
.rx_filter
) {
1015 case HWTSTAMP_FILTER_NONE
:
1016 emac
->rx_ts_enabled
= 0;
1018 case HWTSTAMP_FILTER_ALL
:
1019 case HWTSTAMP_FILTER_SOME
:
1020 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1021 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1022 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1023 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1024 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1025 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1026 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1027 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1028 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1029 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1030 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1031 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1032 case HWTSTAMP_FILTER_NTP_ALL
:
1033 emac
->rx_ts_enabled
= 1;
1034 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
1040 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
1044 static int emac_get_ts_config(struct net_device
*ndev
, struct ifreq
*ifr
)
1046 struct prueth_emac
*emac
= netdev_priv(ndev
);
1047 struct hwtstamp_config config
;
1050 config
.tx_type
= emac
->tx_ts_enabled
? HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
1051 config
.rx_filter
= emac
->rx_ts_enabled
? HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
;
1053 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
1057 int icssg_ndo_ioctl(struct net_device
*ndev
, struct ifreq
*ifr
, int cmd
)
1061 return emac_get_ts_config(ndev
, ifr
);
1063 return emac_set_ts_config(ndev
, ifr
);
1068 return phy_do_ioctl(ndev
, ifr
, cmd
);
1070 EXPORT_SYMBOL_GPL(icssg_ndo_ioctl
);
1072 void icssg_ndo_get_stats64(struct net_device
*ndev
,
1073 struct rtnl_link_stats64
*stats
)
1075 struct prueth_emac
*emac
= netdev_priv(ndev
);
1077 emac_update_hardware_stats(emac
);
1079 stats
->rx_packets
= emac_get_stat_by_name(emac
, "rx_packets");
1080 stats
->rx_bytes
= emac_get_stat_by_name(emac
, "rx_bytes");
1081 stats
->tx_packets
= emac_get_stat_by_name(emac
, "tx_packets");
1082 stats
->tx_bytes
= emac_get_stat_by_name(emac
, "tx_bytes");
1083 stats
->rx_crc_errors
= emac_get_stat_by_name(emac
, "rx_crc_errors");
1084 stats
->rx_over_errors
= emac_get_stat_by_name(emac
, "rx_over_errors");
1085 stats
->multicast
= emac_get_stat_by_name(emac
, "rx_multicast_frames");
1087 stats
->rx_errors
= ndev
->stats
.rx_errors
;
1088 stats
->rx_dropped
= ndev
->stats
.rx_dropped
;
1089 stats
->tx_errors
= ndev
->stats
.tx_errors
;
1090 stats
->tx_dropped
= ndev
->stats
.tx_dropped
;
1092 EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64
);
1094 int icssg_ndo_get_phys_port_name(struct net_device
*ndev
, char *name
,
1097 struct prueth_emac
*emac
= netdev_priv(ndev
);
1100 ret
= snprintf(name
, len
, "p%d", emac
->port_id
);
1106 EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name
);
1108 /* get emac_port corresponding to eth_node name */
1109 int prueth_node_port(struct device_node
*eth_node
)
1114 ret
= of_property_read_u32(eth_node
, "reg", &port_id
);
1119 return PRUETH_PORT_MII0
;
1120 else if (port_id
== 1)
1121 return PRUETH_PORT_MII1
;
1123 return PRUETH_PORT_INVALID
;
1125 EXPORT_SYMBOL_GPL(prueth_node_port
);
1127 /* get MAC instance corresponding to eth_node name */
1128 int prueth_node_mac(struct device_node
*eth_node
)
1133 ret
= of_property_read_u32(eth_node
, "reg", &port_id
);
1139 else if (port_id
== 1)
1142 return PRUETH_MAC_INVALID
;
1144 EXPORT_SYMBOL_GPL(prueth_node_mac
);
1146 void prueth_netdev_exit(struct prueth
*prueth
,
1147 struct device_node
*eth_node
)
1149 struct prueth_emac
*emac
;
1150 enum prueth_mac mac
;
1152 mac
= prueth_node_mac(eth_node
);
1153 if (mac
== PRUETH_MAC_INVALID
)
1156 emac
= prueth
->emac
[mac
];
1160 if (of_phy_is_fixed_link(emac
->phy_node
))
1161 of_phy_deregister_fixed_link(emac
->phy_node
);
1163 netif_napi_del(&emac
->napi_rx
);
1165 pruss_release_mem_region(prueth
->pruss
, &emac
->dram
);
1166 destroy_workqueue(emac
->cmd_wq
);
1167 free_netdev(emac
->ndev
);
1168 prueth
->emac
[mac
] = NULL
;
1170 EXPORT_SYMBOL_GPL(prueth_netdev_exit
);
1172 int prueth_get_cores(struct prueth
*prueth
, int slice
, bool is_sr1
)
1174 struct device
*dev
= prueth
->dev
;
1175 enum pruss_pru_id pruss_id
;
1176 struct device_node
*np
;
1186 idx
= is_sr1
? 2 : 3;
1192 prueth
->pru
[slice
] = pru_rproc_get(np
, idx
, &pruss_id
);
1193 if (IS_ERR(prueth
->pru
[slice
])) {
1194 ret
= PTR_ERR(prueth
->pru
[slice
]);
1195 prueth
->pru
[slice
] = NULL
;
1196 return dev_err_probe(dev
, ret
, "unable to get PRU%d\n", slice
);
1198 prueth
->pru_id
[slice
] = pruss_id
;
1201 prueth
->rtu
[slice
] = pru_rproc_get(np
, idx
, NULL
);
1202 if (IS_ERR(prueth
->rtu
[slice
])) {
1203 ret
= PTR_ERR(prueth
->rtu
[slice
]);
1204 prueth
->rtu
[slice
] = NULL
;
1205 return dev_err_probe(dev
, ret
, "unable to get RTU%d\n", slice
);
1212 prueth
->txpru
[slice
] = pru_rproc_get(np
, idx
, NULL
);
1213 if (IS_ERR(prueth
->txpru
[slice
])) {
1214 ret
= PTR_ERR(prueth
->txpru
[slice
]);
1215 prueth
->txpru
[slice
] = NULL
;
1216 return dev_err_probe(dev
, ret
, "unable to get TX_PRU%d\n", slice
);
1221 EXPORT_SYMBOL_GPL(prueth_get_cores
);
1223 void prueth_put_cores(struct prueth
*prueth
, int slice
)
1225 if (prueth
->txpru
[slice
])
1226 pru_rproc_put(prueth
->txpru
[slice
]);
1228 if (prueth
->rtu
[slice
])
1229 pru_rproc_put(prueth
->rtu
[slice
]);
1231 if (prueth
->pru
[slice
])
1232 pru_rproc_put(prueth
->pru
[slice
]);
1234 EXPORT_SYMBOL_GPL(prueth_put_cores
);
1236 #ifdef CONFIG_PM_SLEEP
1237 static int prueth_suspend(struct device
*dev
)
1239 struct prueth
*prueth
= dev_get_drvdata(dev
);
1240 struct net_device
*ndev
;
1243 for (i
= 0; i
< PRUETH_NUM_MACS
; i
++) {
1244 ndev
= prueth
->registered_netdevs
[i
];
1249 if (netif_running(ndev
)) {
1250 netif_device_detach(ndev
);
1251 ret
= ndev
->netdev_ops
->ndo_stop(ndev
);
1253 netdev_err(ndev
, "failed to stop: %d", ret
);
1262 static int prueth_resume(struct device
*dev
)
1264 struct prueth
*prueth
= dev_get_drvdata(dev
);
1265 struct net_device
*ndev
;
1268 for (i
= 0; i
< PRUETH_NUM_MACS
; i
++) {
1269 ndev
= prueth
->registered_netdevs
[i
];
1274 if (netif_running(ndev
)) {
1275 ret
= ndev
->netdev_ops
->ndo_open(ndev
);
1277 netdev_err(ndev
, "failed to start: %d", ret
);
1280 netif_device_attach(ndev
);
1286 #endif /* CONFIG_PM_SLEEP */
1288 const struct dev_pm_ops prueth_dev_pm_ops
= {
1289 SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend
, prueth_resume
)
1291 EXPORT_SYMBOL_GPL(prueth_dev_pm_ops
);
1293 MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
1294 MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
1295 MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module");
1296 MODULE_LICENSE("GPL");