1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2020 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/fsl/ptp_qoriq.h>
18 #include <linux/ptp_classify.h>
19 #include <net/pkt_cls.h>
22 #include "dpaa2-eth.h"
24 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
25 * using trace events only need to #include <trace/events/sched.h>
27 #define CREATE_TRACE_POINTS
28 #include "dpaa2-eth-trace.h"
30 MODULE_LICENSE("Dual BSD/GPL");
31 MODULE_AUTHOR("Freescale Semiconductor, Inc");
32 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
34 struct ptp_qoriq
*dpaa2_ptp
;
35 EXPORT_SYMBOL(dpaa2_ptp
);
37 static void *dpaa2_iova_to_virt(struct iommu_domain
*domain
,
40 phys_addr_t phys_addr
;
42 phys_addr
= domain
? iommu_iova_to_phys(domain
, iova_addr
) : iova_addr
;
44 return phys_to_virt(phys_addr
);
47 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv
*priv
,
51 skb_checksum_none_assert(skb
);
53 /* HW checksum validation is disabled, nothing to do here */
54 if (!(priv
->net_dev
->features
& NETIF_F_RXCSUM
))
57 /* Read checksum validation bits */
58 if (!((fd_status
& DPAA2_FAS_L3CV
) &&
59 (fd_status
& DPAA2_FAS_L4CV
)))
62 /* Inform the stack there's no need to compute L3/L4 csum anymore */
63 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
66 /* Free a received FD.
67 * Not to be used for Tx conf FDs or on any other paths.
69 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv
*priv
,
70 const struct dpaa2_fd
*fd
,
73 struct device
*dev
= priv
->net_dev
->dev
.parent
;
74 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
75 u8 fd_format
= dpaa2_fd_get_format(fd
);
76 struct dpaa2_sg_entry
*sgt
;
80 /* If single buffer frame, just free the data buffer */
81 if (fd_format
== dpaa2_fd_single
)
83 else if (fd_format
!= dpaa2_fd_sg
)
84 /* We don't support any other format */
87 /* For S/G frames, we first need to free all SG entries
88 * except the first one, which was taken care of already
90 sgt
= vaddr
+ dpaa2_fd_get_offset(fd
);
91 for (i
= 1; i
< DPAA2_ETH_MAX_SG_ENTRIES
; i
++) {
92 addr
= dpaa2_sg_get_addr(&sgt
[i
]);
93 sg_vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
94 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
97 free_pages((unsigned long)sg_vaddr
, 0);
98 if (dpaa2_sg_is_final(&sgt
[i
]))
103 free_pages((unsigned long)vaddr
, 0);
106 /* Build a linear skb based on a single-buffer frame descriptor */
107 static struct sk_buff
*dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel
*ch
,
108 const struct dpaa2_fd
*fd
,
111 struct sk_buff
*skb
= NULL
;
112 u16 fd_offset
= dpaa2_fd_get_offset(fd
);
113 u32 fd_length
= dpaa2_fd_get_len(fd
);
117 skb
= build_skb(fd_vaddr
, DPAA2_ETH_RX_BUF_RAW_SIZE
);
121 skb_reserve(skb
, fd_offset
);
122 skb_put(skb
, fd_length
);
127 /* Build a non linear (fragmented) skb based on a S/G table */
128 static struct sk_buff
*dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv
*priv
,
129 struct dpaa2_eth_channel
*ch
,
130 struct dpaa2_sg_entry
*sgt
)
132 struct sk_buff
*skb
= NULL
;
133 struct device
*dev
= priv
->net_dev
->dev
.parent
;
138 struct page
*page
, *head_page
;
142 for (i
= 0; i
< DPAA2_ETH_MAX_SG_ENTRIES
; i
++) {
143 struct dpaa2_sg_entry
*sge
= &sgt
[i
];
145 /* NOTE: We only support SG entries in dpaa2_sg_single format,
146 * but this is the only format we may receive from HW anyway
149 /* Get the address and length from the S/G entry */
150 sg_addr
= dpaa2_sg_get_addr(sge
);
151 sg_vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, sg_addr
);
152 dma_unmap_page(dev
, sg_addr
, priv
->rx_buf_size
,
155 sg_length
= dpaa2_sg_get_len(sge
);
158 /* We build the skb around the first data buffer */
159 skb
= build_skb(sg_vaddr
, DPAA2_ETH_RX_BUF_RAW_SIZE
);
160 if (unlikely(!skb
)) {
161 /* Free the first SG entry now, since we already
162 * unmapped it and obtained the virtual address
164 free_pages((unsigned long)sg_vaddr
, 0);
166 /* We still need to subtract the buffers used
167 * by this FD from our software counter
169 while (!dpaa2_sg_is_final(&sgt
[i
]) &&
170 i
< DPAA2_ETH_MAX_SG_ENTRIES
)
175 sg_offset
= dpaa2_sg_get_offset(sge
);
176 skb_reserve(skb
, sg_offset
);
177 skb_put(skb
, sg_length
);
179 /* Rest of the data buffers are stored as skb frags */
180 page
= virt_to_page(sg_vaddr
);
181 head_page
= virt_to_head_page(sg_vaddr
);
183 /* Offset in page (which may be compound).
184 * Data in subsequent SG entries is stored from the
185 * beginning of the buffer, so we don't need to add the
188 page_offset
= ((unsigned long)sg_vaddr
&
190 (page_address(page
) - page_address(head_page
));
192 skb_add_rx_frag(skb
, i
- 1, head_page
, page_offset
,
193 sg_length
, priv
->rx_buf_size
);
196 if (dpaa2_sg_is_final(sge
))
200 WARN_ONCE(i
== DPAA2_ETH_MAX_SG_ENTRIES
, "Final bit not set in SGT");
202 /* Count all data buffers + SG table buffer */
203 ch
->buf_count
-= i
+ 2;
208 /* Free buffers acquired from the buffer pool or which were meant to
209 * be released in the pool
211 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv
*priv
, u64
*buf_array
,
214 struct device
*dev
= priv
->net_dev
->dev
.parent
;
218 for (i
= 0; i
< count
; i
++) {
219 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, buf_array
[i
]);
220 dma_unmap_page(dev
, buf_array
[i
], priv
->rx_buf_size
,
222 free_pages((unsigned long)vaddr
, 0);
226 static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv
*priv
,
227 struct dpaa2_eth_channel
*ch
,
233 ch
->xdp
.drop_bufs
[ch
->xdp
.drop_cnt
++] = addr
;
234 if (ch
->xdp
.drop_cnt
< DPAA2_ETH_BUFS_PER_CMD
)
237 while ((err
= dpaa2_io_service_release(ch
->dpio
, priv
->bpid
,
239 ch
->xdp
.drop_cnt
)) == -EBUSY
) {
240 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
246 dpaa2_eth_free_bufs(priv
, ch
->xdp
.drop_bufs
, ch
->xdp
.drop_cnt
);
247 ch
->buf_count
-= ch
->xdp
.drop_cnt
;
250 ch
->xdp
.drop_cnt
= 0;
253 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv
*priv
,
254 struct dpaa2_eth_fq
*fq
,
255 struct dpaa2_eth_xdp_fds
*xdp_fds
)
257 int total_enqueued
= 0, retries
= 0, enqueued
;
258 struct dpaa2_eth_drv_stats
*percpu_extras
;
259 int num_fds
, err
, max_retries
;
260 struct dpaa2_fd
*fds
;
262 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
264 /* try to enqueue all the FDs until the max number of retries is hit */
266 num_fds
= xdp_fds
->num
;
267 max_retries
= num_fds
* DPAA2_ETH_ENQUEUE_RETRIES
;
268 while (total_enqueued
< num_fds
&& retries
< max_retries
) {
269 err
= priv
->enqueue(priv
, fq
, &fds
[total_enqueued
],
270 0, num_fds
- total_enqueued
, &enqueued
);
272 percpu_extras
->tx_portal_busy
+= ++retries
;
275 total_enqueued
+= enqueued
;
279 return total_enqueued
;
282 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv
*priv
,
283 struct dpaa2_eth_channel
*ch
,
284 struct dpaa2_eth_fq
*fq
)
286 struct rtnl_link_stats64
*percpu_stats
;
287 struct dpaa2_fd
*fds
;
290 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
292 // enqueue the array of XDP_TX frames
293 enqueued
= dpaa2_eth_xdp_flush(priv
, fq
, &fq
->xdp_tx_fds
);
295 /* update statistics */
296 percpu_stats
->tx_packets
+= enqueued
;
297 fds
= fq
->xdp_tx_fds
.fds
;
298 for (i
= 0; i
< enqueued
; i
++) {
299 percpu_stats
->tx_bytes
+= dpaa2_fd_get_len(&fds
[i
]);
302 for (i
= enqueued
; i
< fq
->xdp_tx_fds
.num
; i
++) {
303 dpaa2_eth_xdp_release_buf(priv
, ch
, dpaa2_fd_get_addr(&fds
[i
]));
304 percpu_stats
->tx_errors
++;
305 ch
->stats
.xdp_tx_err
++;
307 fq
->xdp_tx_fds
.num
= 0;
310 static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv
*priv
,
311 struct dpaa2_eth_channel
*ch
,
313 void *buf_start
, u16 queue_id
)
315 struct dpaa2_faead
*faead
;
316 struct dpaa2_fd
*dest_fd
;
317 struct dpaa2_eth_fq
*fq
;
320 /* Mark the egress frame hardware annotation area as valid */
321 frc
= dpaa2_fd_get_frc(fd
);
322 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FAEADV
);
323 dpaa2_fd_set_ctrl(fd
, DPAA2_FD_CTRL_ASAL
);
325 /* Instruct hardware to release the FD buffer directly into
326 * the buffer pool once transmission is completed, instead of
327 * sending a Tx confirmation frame to us
329 ctrl
= DPAA2_FAEAD_A4V
| DPAA2_FAEAD_A2V
| DPAA2_FAEAD_EBDDV
;
330 faead
= dpaa2_get_faead(buf_start
, false);
331 faead
->ctrl
= cpu_to_le32(ctrl
);
332 faead
->conf_fqid
= 0;
334 fq
= &priv
->fq
[queue_id
];
335 dest_fd
= &fq
->xdp_tx_fds
.fds
[fq
->xdp_tx_fds
.num
++];
336 memcpy(dest_fd
, fd
, sizeof(*dest_fd
));
338 if (fq
->xdp_tx_fds
.num
< DEV_MAP_BULK_SIZE
)
341 dpaa2_eth_xdp_tx_flush(priv
, ch
, fq
);
344 static u32
dpaa2_eth_run_xdp(struct dpaa2_eth_priv
*priv
,
345 struct dpaa2_eth_channel
*ch
,
346 struct dpaa2_eth_fq
*rx_fq
,
347 struct dpaa2_fd
*fd
, void *vaddr
)
349 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
350 struct bpf_prog
*xdp_prog
;
352 u32 xdp_act
= XDP_PASS
;
357 xdp_prog
= READ_ONCE(ch
->xdp
.prog
);
361 xdp
.data
= vaddr
+ dpaa2_fd_get_offset(fd
);
362 xdp
.data_end
= xdp
.data
+ dpaa2_fd_get_len(fd
);
363 xdp
.data_hard_start
= xdp
.data
- XDP_PACKET_HEADROOM
;
364 xdp_set_data_meta_invalid(&xdp
);
365 xdp
.rxq
= &ch
->xdp_rxq
;
367 xdp
.frame_sz
= DPAA2_ETH_RX_BUF_RAW_SIZE
-
368 (dpaa2_fd_get_offset(fd
) - XDP_PACKET_HEADROOM
);
370 xdp_act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
372 /* xdp.data pointer may have changed */
373 dpaa2_fd_set_offset(fd
, xdp
.data
- vaddr
);
374 dpaa2_fd_set_len(fd
, xdp
.data_end
- xdp
.data
);
380 dpaa2_eth_xdp_enqueue(priv
, ch
, fd
, vaddr
, rx_fq
->flowid
);
383 bpf_warn_invalid_xdp_action(xdp_act
);
386 trace_xdp_exception(priv
->net_dev
, xdp_prog
, xdp_act
);
389 dpaa2_eth_xdp_release_buf(priv
, ch
, addr
);
390 ch
->stats
.xdp_drop
++;
393 dma_unmap_page(priv
->net_dev
->dev
.parent
, addr
,
394 priv
->rx_buf_size
, DMA_BIDIRECTIONAL
);
397 /* Allow redirect use of full headroom */
398 xdp
.data_hard_start
= vaddr
;
399 xdp
.frame_sz
= DPAA2_ETH_RX_BUF_RAW_SIZE
;
401 err
= xdp_do_redirect(priv
->net_dev
, &xdp
, xdp_prog
);
403 ch
->stats
.xdp_drop
++;
405 ch
->stats
.xdp_redirect
++;
409 ch
->xdp
.res
|= xdp_act
;
415 /* Main Rx frame processing routine */
416 static void dpaa2_eth_rx(struct dpaa2_eth_priv
*priv
,
417 struct dpaa2_eth_channel
*ch
,
418 const struct dpaa2_fd
*fd
,
419 struct dpaa2_eth_fq
*fq
)
421 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
422 u8 fd_format
= dpaa2_fd_get_format(fd
);
425 struct rtnl_link_stats64
*percpu_stats
;
426 struct dpaa2_eth_drv_stats
*percpu_extras
;
427 struct device
*dev
= priv
->net_dev
->dev
.parent
;
428 struct dpaa2_fas
*fas
;
434 trace_dpaa2_rx_fd(priv
->net_dev
, fd
);
436 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
437 dma_sync_single_for_cpu(dev
, addr
, priv
->rx_buf_size
,
440 fas
= dpaa2_get_fas(vaddr
, false);
442 buf_data
= vaddr
+ dpaa2_fd_get_offset(fd
);
445 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
446 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
448 if (fd_format
== dpaa2_fd_single
) {
449 xdp_act
= dpaa2_eth_run_xdp(priv
, ch
, fq
, (struct dpaa2_fd
*)fd
, vaddr
);
450 if (xdp_act
!= XDP_PASS
) {
451 percpu_stats
->rx_packets
++;
452 percpu_stats
->rx_bytes
+= dpaa2_fd_get_len(fd
);
456 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
458 skb
= dpaa2_eth_build_linear_skb(ch
, fd
, vaddr
);
459 } else if (fd_format
== dpaa2_fd_sg
) {
460 WARN_ON(priv
->xdp_prog
);
462 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
464 skb
= dpaa2_eth_build_frag_skb(priv
, ch
, buf_data
);
465 free_pages((unsigned long)vaddr
, 0);
466 percpu_extras
->rx_sg_frames
++;
467 percpu_extras
->rx_sg_bytes
+= dpaa2_fd_get_len(fd
);
469 /* We don't support any other format */
470 goto err_frame_format
;
478 /* Get the timestamp value */
479 if (priv
->rx_tstamp
) {
480 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
481 __le64
*ts
= dpaa2_get_ts(vaddr
, false);
484 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
486 ns
= DPAA2_PTP_CLK_PERIOD_NS
* le64_to_cpup(ts
);
487 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
490 /* Check if we need to validate the L4 csum */
491 if (likely(dpaa2_fd_get_frc(fd
) & DPAA2_FD_FRC_FASV
)) {
492 status
= le32_to_cpu(fas
->status
);
493 dpaa2_eth_validate_rx_csum(priv
, status
, skb
);
496 skb
->protocol
= eth_type_trans(skb
, priv
->net_dev
);
497 skb_record_rx_queue(skb
, fq
->flowid
);
499 percpu_stats
->rx_packets
++;
500 percpu_stats
->rx_bytes
+= dpaa2_fd_get_len(fd
);
502 list_add_tail(&skb
->list
, ch
->rx_list
);
507 dpaa2_eth_free_rx_fd(priv
, fd
, vaddr
);
509 percpu_stats
->rx_dropped
++;
512 /* Processing of Rx frames received on the error FQ
513 * We check and print the error bits and then free the frame
515 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv
*priv
,
516 struct dpaa2_eth_channel
*ch
,
517 const struct dpaa2_fd
*fd
,
518 struct dpaa2_eth_fq
*fq __always_unused
)
520 struct device
*dev
= priv
->net_dev
->dev
.parent
;
521 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
522 u8 fd_format
= dpaa2_fd_get_format(fd
);
523 struct rtnl_link_stats64
*percpu_stats
;
524 struct dpaa2_eth_trap_item
*trap_item
;
525 struct dpaa2_fapr
*fapr
;
530 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
531 dma_sync_single_for_cpu(dev
, addr
, priv
->rx_buf_size
,
534 buf_data
= vaddr
+ dpaa2_fd_get_offset(fd
);
536 if (fd_format
== dpaa2_fd_single
) {
537 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
539 skb
= dpaa2_eth_build_linear_skb(ch
, fd
, vaddr
);
540 } else if (fd_format
== dpaa2_fd_sg
) {
541 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
543 skb
= dpaa2_eth_build_frag_skb(priv
, ch
, buf_data
);
544 free_pages((unsigned long)vaddr
, 0);
546 /* We don't support any other format */
547 dpaa2_eth_free_rx_fd(priv
, fd
, vaddr
);
548 goto err_frame_format
;
551 fapr
= dpaa2_get_fapr(vaddr
, false);
552 trap_item
= dpaa2_eth_dl_get_trap(priv
, fapr
);
554 devlink_trap_report(priv
->devlink
, skb
, trap_item
->trap_ctx
,
555 &priv
->devlink_port
, NULL
);
559 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
560 percpu_stats
->rx_errors
++;
564 /* Consume all frames pull-dequeued into the store. This is the simplest way to
565 * make sure we don't accidentally issue another volatile dequeue which would
566 * overwrite (leak) frames already in the store.
568 * Observance of NAPI budget is not our concern, leaving that to the caller.
570 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel
*ch
,
571 struct dpaa2_eth_fq
**src
)
573 struct dpaa2_eth_priv
*priv
= ch
->priv
;
574 struct dpaa2_eth_fq
*fq
= NULL
;
576 const struct dpaa2_fd
*fd
;
577 int cleaned
= 0, retries
= 0;
581 dq
= dpaa2_io_store_next(ch
->store
, &is_last
);
583 /* If we're here, we *must* have placed a
584 * volatile dequeue comnmand, so keep reading through
585 * the store until we get some sort of valid response
586 * token (either a valid frame or an "empty dequeue")
588 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
) {
589 netdev_err_once(priv
->net_dev
,
590 "Unable to read a valid dequeue response\n");
596 fd
= dpaa2_dq_fd(dq
);
597 fq
= (struct dpaa2_eth_fq
*)(uintptr_t)dpaa2_dq_fqd_ctx(dq
);
599 fq
->consume(priv
, ch
, fd
, fq
);
607 fq
->stats
.frames
+= cleaned
;
608 ch
->stats
.frames
+= cleaned
;
610 /* A dequeue operation only pulls frames from a single queue
611 * into the store. Return the frame queue as an out param.
619 static int dpaa2_eth_ptp_parse(struct sk_buff
*skb
,
620 u8
*msgtype
, u8
*twostep
, u8
*udp
,
621 u16
*correction_offset
,
622 u16
*origintimestamp_offset
)
624 unsigned int ptp_class
;
625 struct ptp_header
*hdr
;
629 ptp_class
= ptp_classify_raw(skb
);
630 if (ptp_class
== PTP_CLASS_NONE
)
633 hdr
= ptp_parse_header(skb
, ptp_class
);
637 *msgtype
= ptp_get_msgtype(hdr
, ptp_class
);
638 *twostep
= hdr
->flag_field
[0] & 0x2;
640 type
= ptp_class
& PTP_CLASS_PMASK
;
641 if (type
== PTP_CLASS_IPV4
||
642 type
== PTP_CLASS_IPV6
)
647 base
= skb_mac_header(skb
);
648 *correction_offset
= (u8
*)&hdr
->correction
- base
;
649 *origintimestamp_offset
= (u8
*)hdr
+ sizeof(struct ptp_header
) - base
;
654 /* Configure the egress frame annotation for timestamp update */
655 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv
*priv
,
660 struct ptp_tstamp origin_timestamp
;
661 struct dpni_single_step_cfg cfg
;
662 u8 msgtype
, twostep
, udp
;
663 struct dpaa2_faead
*faead
;
664 struct dpaa2_fas
*fas
;
665 struct timespec64 ts
;
666 u16 offset1
, offset2
;
671 /* Mark the egress frame annotation area as valid */
672 frc
= dpaa2_fd_get_frc(fd
);
673 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FAEADV
);
675 /* Set hardware annotation size */
676 ctrl
= dpaa2_fd_get_ctrl(fd
);
677 dpaa2_fd_set_ctrl(fd
, ctrl
| DPAA2_FD_CTRL_ASAL
);
679 /* enable UPD (update prepanded data) bit in FAEAD field of
680 * hardware frame annotation area
682 ctrl
= DPAA2_FAEAD_A2V
| DPAA2_FAEAD_UPDV
| DPAA2_FAEAD_UPD
;
683 faead
= dpaa2_get_faead(buf_start
, true);
684 faead
->ctrl
= cpu_to_le32(ctrl
);
686 if (skb
->cb
[0] == TX_TSTAMP_ONESTEP_SYNC
) {
687 if (dpaa2_eth_ptp_parse(skb
, &msgtype
, &twostep
, &udp
,
688 &offset1
, &offset2
) ||
689 msgtype
!= PTP_MSGTYPE_SYNC
|| twostep
) {
690 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
694 /* Mark the frame annotation status as valid */
695 frc
= dpaa2_fd_get_frc(fd
);
696 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FASV
);
698 /* Mark the PTP flag for one step timestamping */
699 fas
= dpaa2_get_fas(buf_start
, true);
700 fas
->status
= cpu_to_le32(DPAA2_FAS_PTP
);
702 dpaa2_ptp
->caps
.gettime64(&dpaa2_ptp
->caps
, &ts
);
703 ns
= dpaa2_get_ts(buf_start
, true);
704 *ns
= cpu_to_le64(timespec64_to_ns(&ts
) /
705 DPAA2_PTP_CLK_PERIOD_NS
);
707 /* Update current time to PTP message originTimestamp field */
708 ns_to_ptp_tstamp(&origin_timestamp
, le64_to_cpup(ns
));
709 data
= skb_mac_header(skb
);
710 *(__be16
*)(data
+ offset2
) = htons(origin_timestamp
.sec_msb
);
711 *(__be32
*)(data
+ offset2
+ 2) =
712 htonl(origin_timestamp
.sec_lsb
);
713 *(__be32
*)(data
+ offset2
+ 6) = htonl(origin_timestamp
.nsec
);
717 cfg
.offset
= offset1
;
720 if (dpni_set_single_step_cfg(priv
->mc_io
, 0, priv
->mc_token
,
722 WARN_ONCE(1, "Failed to set single step register");
726 /* Create a frame descriptor based on a fragmented skb */
727 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv
*priv
,
732 struct device
*dev
= priv
->net_dev
->dev
.parent
;
733 void *sgt_buf
= NULL
;
735 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
736 struct dpaa2_sg_entry
*sgt
;
739 struct scatterlist
*scl
, *crt_scl
;
742 struct dpaa2_eth_swa
*swa
;
744 /* Create and map scatterlist.
745 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
746 * to go beyond nr_frags+1.
747 * Note: We don't support chained scatterlists
749 if (unlikely(PAGE_SIZE
/ sizeof(struct scatterlist
) < nr_frags
+ 1))
752 scl
= kmalloc_array(nr_frags
+ 1, sizeof(struct scatterlist
), GFP_ATOMIC
);
756 sg_init_table(scl
, nr_frags
+ 1);
757 num_sg
= skb_to_sgvec(skb
, scl
, 0, skb
->len
);
758 if (unlikely(num_sg
< 0)) {
760 goto dma_map_sg_failed
;
762 num_dma_bufs
= dma_map_sg(dev
, scl
, num_sg
, DMA_BIDIRECTIONAL
);
763 if (unlikely(!num_dma_bufs
)) {
765 goto dma_map_sg_failed
;
768 /* Prepare the HW SGT structure */
769 sgt_buf_size
= priv
->tx_data_offset
+
770 sizeof(struct dpaa2_sg_entry
) * num_dma_bufs
;
771 sgt_buf
= napi_alloc_frag(sgt_buf_size
+ DPAA2_ETH_TX_BUF_ALIGN
);
772 if (unlikely(!sgt_buf
)) {
774 goto sgt_buf_alloc_failed
;
776 sgt_buf
= PTR_ALIGN(sgt_buf
, DPAA2_ETH_TX_BUF_ALIGN
);
777 memset(sgt_buf
, 0, sgt_buf_size
);
779 sgt
= (struct dpaa2_sg_entry
*)(sgt_buf
+ priv
->tx_data_offset
);
781 /* Fill in the HW SGT structure.
783 * sgt_buf is zeroed out, so the following fields are implicit
784 * in all sgt entries:
786 * - format is 'dpaa2_sg_single'
788 for_each_sg(scl
, crt_scl
, num_dma_bufs
, i
) {
789 dpaa2_sg_set_addr(&sgt
[i
], sg_dma_address(crt_scl
));
790 dpaa2_sg_set_len(&sgt
[i
], sg_dma_len(crt_scl
));
792 dpaa2_sg_set_final(&sgt
[i
- 1], true);
794 /* Store the skb backpointer in the SGT buffer.
795 * Fit the scatterlist and the number of buffers alongside the
796 * skb backpointer in the software annotation area. We'll need
797 * all of them on Tx Conf.
799 *swa_addr
= (void *)sgt_buf
;
800 swa
= (struct dpaa2_eth_swa
*)sgt_buf
;
801 swa
->type
= DPAA2_ETH_SWA_SG
;
804 swa
->sg
.num_sg
= num_sg
;
805 swa
->sg
.sgt_size
= sgt_buf_size
;
807 /* Separately map the SGT buffer */
808 addr
= dma_map_single(dev
, sgt_buf
, sgt_buf_size
, DMA_BIDIRECTIONAL
);
809 if (unlikely(dma_mapping_error(dev
, addr
))) {
811 goto dma_map_single_failed
;
813 dpaa2_fd_set_offset(fd
, priv
->tx_data_offset
);
814 dpaa2_fd_set_format(fd
, dpaa2_fd_sg
);
815 dpaa2_fd_set_addr(fd
, addr
);
816 dpaa2_fd_set_len(fd
, skb
->len
);
817 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
821 dma_map_single_failed
:
822 skb_free_frag(sgt_buf
);
823 sgt_buf_alloc_failed
:
824 dma_unmap_sg(dev
, scl
, num_sg
, DMA_BIDIRECTIONAL
);
830 /* Create a SG frame descriptor based on a linear skb.
832 * This function is used on the Tx path when the skb headroom is not large
833 * enough for the HW requirements, thus instead of realloc-ing the skb we
834 * create a SG frame descriptor with only one entry.
836 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv
*priv
,
841 struct device
*dev
= priv
->net_dev
->dev
.parent
;
842 struct dpaa2_eth_sgt_cache
*sgt_cache
;
843 struct dpaa2_sg_entry
*sgt
;
844 struct dpaa2_eth_swa
*swa
;
845 dma_addr_t addr
, sgt_addr
;
846 void *sgt_buf
= NULL
;
850 /* Prepare the HW SGT structure */
851 sgt_cache
= this_cpu_ptr(priv
->sgt_cache
);
852 sgt_buf_size
= priv
->tx_data_offset
+ sizeof(struct dpaa2_sg_entry
);
854 if (sgt_cache
->count
== 0)
855 sgt_buf
= kzalloc(sgt_buf_size
+ DPAA2_ETH_TX_BUF_ALIGN
,
858 sgt_buf
= sgt_cache
->buf
[--sgt_cache
->count
];
859 if (unlikely(!sgt_buf
))
862 sgt_buf
= PTR_ALIGN(sgt_buf
, DPAA2_ETH_TX_BUF_ALIGN
);
863 sgt
= (struct dpaa2_sg_entry
*)(sgt_buf
+ priv
->tx_data_offset
);
865 addr
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_BIDIRECTIONAL
);
866 if (unlikely(dma_mapping_error(dev
, addr
))) {
868 goto data_map_failed
;
871 /* Fill in the HW SGT structure */
872 dpaa2_sg_set_addr(sgt
, addr
);
873 dpaa2_sg_set_len(sgt
, skb
->len
);
874 dpaa2_sg_set_final(sgt
, true);
876 /* Store the skb backpointer in the SGT buffer */
877 *swa_addr
= (void *)sgt_buf
;
878 swa
= (struct dpaa2_eth_swa
*)sgt_buf
;
879 swa
->type
= DPAA2_ETH_SWA_SINGLE
;
880 swa
->single
.skb
= skb
;
881 swa
->single
.sgt_size
= sgt_buf_size
;
883 /* Separately map the SGT buffer */
884 sgt_addr
= dma_map_single(dev
, sgt_buf
, sgt_buf_size
, DMA_BIDIRECTIONAL
);
885 if (unlikely(dma_mapping_error(dev
, sgt_addr
))) {
890 dpaa2_fd_set_offset(fd
, priv
->tx_data_offset
);
891 dpaa2_fd_set_format(fd
, dpaa2_fd_sg
);
892 dpaa2_fd_set_addr(fd
, sgt_addr
);
893 dpaa2_fd_set_len(fd
, skb
->len
);
894 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
899 dma_unmap_single(dev
, addr
, skb
->len
, DMA_BIDIRECTIONAL
);
901 if (sgt_cache
->count
>= DPAA2_ETH_SGT_CACHE_SIZE
)
904 sgt_cache
->buf
[sgt_cache
->count
++] = sgt_buf
;
909 /* Create a frame descriptor based on a linear skb */
910 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv
*priv
,
915 struct device
*dev
= priv
->net_dev
->dev
.parent
;
916 u8
*buffer_start
, *aligned_start
;
917 struct dpaa2_eth_swa
*swa
;
920 buffer_start
= skb
->data
- dpaa2_eth_needed_headroom(skb
);
922 /* If there's enough room to align the FD address, do it.
923 * It will help hardware optimize accesses.
925 aligned_start
= PTR_ALIGN(buffer_start
- DPAA2_ETH_TX_BUF_ALIGN
,
926 DPAA2_ETH_TX_BUF_ALIGN
);
927 if (aligned_start
>= skb
->head
)
928 buffer_start
= aligned_start
;
930 /* Store a backpointer to the skb at the beginning of the buffer
931 * (in the private data area) such that we can release it
934 *swa_addr
= (void *)buffer_start
;
935 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
936 swa
->type
= DPAA2_ETH_SWA_SINGLE
;
937 swa
->single
.skb
= skb
;
939 addr
= dma_map_single(dev
, buffer_start
,
940 skb_tail_pointer(skb
) - buffer_start
,
942 if (unlikely(dma_mapping_error(dev
, addr
)))
945 dpaa2_fd_set_addr(fd
, addr
);
946 dpaa2_fd_set_offset(fd
, (u16
)(skb
->data
- buffer_start
));
947 dpaa2_fd_set_len(fd
, skb
->len
);
948 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
949 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
954 /* FD freeing routine on the Tx path
956 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
957 * back-pointed to is also freed.
958 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
961 static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv
*priv
,
962 struct dpaa2_eth_fq
*fq
,
963 const struct dpaa2_fd
*fd
, bool in_napi
)
965 struct device
*dev
= priv
->net_dev
->dev
.parent
;
966 dma_addr_t fd_addr
, sg_addr
;
967 struct sk_buff
*skb
= NULL
;
968 unsigned char *buffer_start
;
969 struct dpaa2_eth_swa
*swa
;
970 u8 fd_format
= dpaa2_fd_get_format(fd
);
971 u32 fd_len
= dpaa2_fd_get_len(fd
);
973 struct dpaa2_eth_sgt_cache
*sgt_cache
;
974 struct dpaa2_sg_entry
*sgt
;
976 fd_addr
= dpaa2_fd_get_addr(fd
);
977 buffer_start
= dpaa2_iova_to_virt(priv
->iommu_domain
, fd_addr
);
978 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
980 if (fd_format
== dpaa2_fd_single
) {
981 if (swa
->type
== DPAA2_ETH_SWA_SINGLE
) {
982 skb
= swa
->single
.skb
;
983 /* Accessing the skb buffer is safe before dma unmap,
984 * because we didn't map the actual skb shell.
986 dma_unmap_single(dev
, fd_addr
,
987 skb_tail_pointer(skb
) - buffer_start
,
990 WARN_ONCE(swa
->type
!= DPAA2_ETH_SWA_XDP
, "Wrong SWA type");
991 dma_unmap_single(dev
, fd_addr
, swa
->xdp
.dma_size
,
994 } else if (fd_format
== dpaa2_fd_sg
) {
995 if (swa
->type
== DPAA2_ETH_SWA_SG
) {
998 /* Unmap the scatterlist */
999 dma_unmap_sg(dev
, swa
->sg
.scl
, swa
->sg
.num_sg
,
1003 /* Unmap the SGT buffer */
1004 dma_unmap_single(dev
, fd_addr
, swa
->sg
.sgt_size
,
1007 skb
= swa
->single
.skb
;
1009 /* Unmap the SGT Buffer */
1010 dma_unmap_single(dev
, fd_addr
, swa
->single
.sgt_size
,
1013 sgt
= (struct dpaa2_sg_entry
*)(buffer_start
+
1014 priv
->tx_data_offset
);
1015 sg_addr
= dpaa2_sg_get_addr(sgt
);
1016 dma_unmap_single(dev
, sg_addr
, skb
->len
, DMA_BIDIRECTIONAL
);
1019 netdev_dbg(priv
->net_dev
, "Invalid FD format\n");
1023 if (swa
->type
!= DPAA2_ETH_SWA_XDP
&& in_napi
) {
1025 fq
->dq_bytes
+= fd_len
;
1028 if (swa
->type
== DPAA2_ETH_SWA_XDP
) {
1029 xdp_return_frame(swa
->xdp
.xdpf
);
1033 /* Get the timestamp value */
1034 if (skb
->cb
[0] == TX_TSTAMP
) {
1035 struct skb_shared_hwtstamps shhwtstamps
;
1036 __le64
*ts
= dpaa2_get_ts(buffer_start
, true);
1039 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
1041 ns
= DPAA2_PTP_CLK_PERIOD_NS
* le64_to_cpup(ts
);
1042 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
1043 skb_tstamp_tx(skb
, &shhwtstamps
);
1044 } else if (skb
->cb
[0] == TX_TSTAMP_ONESTEP_SYNC
) {
1045 mutex_unlock(&priv
->onestep_tstamp_lock
);
1048 /* Free SGT buffer allocated on tx */
1049 if (fd_format
!= dpaa2_fd_single
) {
1050 sgt_cache
= this_cpu_ptr(priv
->sgt_cache
);
1051 if (swa
->type
== DPAA2_ETH_SWA_SG
) {
1052 skb_free_frag(buffer_start
);
1054 if (sgt_cache
->count
>= DPAA2_ETH_SGT_CACHE_SIZE
)
1055 kfree(buffer_start
);
1057 sgt_cache
->buf
[sgt_cache
->count
++] = buffer_start
;
1061 /* Move on with skb release */
1062 napi_consume_skb(skb
, in_napi
);
1065 static netdev_tx_t
__dpaa2_eth_tx(struct sk_buff
*skb
,
1066 struct net_device
*net_dev
)
1068 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1070 struct rtnl_link_stats64
*percpu_stats
;
1071 struct dpaa2_eth_drv_stats
*percpu_extras
;
1072 struct dpaa2_eth_fq
*fq
;
1073 struct netdev_queue
*nq
;
1075 unsigned int needed_headroom
;
1081 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
1082 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
1084 needed_headroom
= dpaa2_eth_needed_headroom(skb
);
1086 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1087 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1089 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1090 if (unlikely(!skb
)) {
1091 /* skb_unshare() has already freed the skb */
1092 percpu_stats
->tx_dropped
++;
1093 return NETDEV_TX_OK
;
1096 /* Setup the FD fields */
1097 memset(&fd
, 0, sizeof(fd
));
1099 if (skb_is_nonlinear(skb
)) {
1100 err
= dpaa2_eth_build_sg_fd(priv
, skb
, &fd
, &swa
);
1101 percpu_extras
->tx_sg_frames
++;
1102 percpu_extras
->tx_sg_bytes
+= skb
->len
;
1103 } else if (skb_headroom(skb
) < needed_headroom
) {
1104 err
= dpaa2_eth_build_sg_fd_single_buf(priv
, skb
, &fd
, &swa
);
1105 percpu_extras
->tx_sg_frames
++;
1106 percpu_extras
->tx_sg_bytes
+= skb
->len
;
1107 percpu_extras
->tx_converted_sg_frames
++;
1108 percpu_extras
->tx_converted_sg_bytes
+= skb
->len
;
1110 err
= dpaa2_eth_build_single_fd(priv
, skb
, &fd
, &swa
);
1113 if (unlikely(err
)) {
1114 percpu_stats
->tx_dropped
++;
1119 dpaa2_eth_enable_tx_tstamp(priv
, &fd
, swa
, skb
);
1122 trace_dpaa2_tx_fd(net_dev
, &fd
);
1124 /* TxConf FQ selection relies on queue id from the stack.
1125 * In case of a forwarded frame from another DPNI interface, we choose
1126 * a queue affined to the same core that processed the Rx frame
1128 queue_mapping
= skb_get_queue_mapping(skb
);
1130 if (net_dev
->num_tc
) {
1131 prio
= netdev_txq_to_tc(net_dev
, queue_mapping
);
1132 /* Hardware interprets priority level 0 as being the highest,
1133 * so we need to do a reverse mapping to the netdev tc index
1135 prio
= net_dev
->num_tc
- prio
- 1;
1136 /* We have only one FQ array entry for all Tx hardware queues
1137 * with the same flow id (but different priority levels)
1139 queue_mapping
%= dpaa2_eth_queue_count(priv
);
1141 fq
= &priv
->fq
[queue_mapping
];
1143 fd_len
= dpaa2_fd_get_len(&fd
);
1144 nq
= netdev_get_tx_queue(net_dev
, queue_mapping
);
1145 netdev_tx_sent_queue(nq
, fd_len
);
1147 /* Everything that happens after this enqueues might race with
1148 * the Tx confirmation callback for this frame
1150 for (i
= 0; i
< DPAA2_ETH_ENQUEUE_RETRIES
; i
++) {
1151 err
= priv
->enqueue(priv
, fq
, &fd
, prio
, 1, NULL
);
1155 percpu_extras
->tx_portal_busy
+= i
;
1156 if (unlikely(err
< 0)) {
1157 percpu_stats
->tx_errors
++;
1158 /* Clean up everything, including freeing the skb */
1159 dpaa2_eth_free_tx_fd(priv
, fq
, &fd
, false);
1160 netdev_tx_completed_queue(nq
, 1, fd_len
);
1162 percpu_stats
->tx_packets
++;
1163 percpu_stats
->tx_bytes
+= fd_len
;
1166 return NETDEV_TX_OK
;
1171 return NETDEV_TX_OK
;
1174 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct
*work
)
1176 struct dpaa2_eth_priv
*priv
= container_of(work
, struct dpaa2_eth_priv
,
1178 struct sk_buff
*skb
;
1181 skb
= skb_dequeue(&priv
->tx_skbs
);
1185 /* Lock just before TX one-step timestamping packet,
1186 * and release the lock in dpaa2_eth_free_tx_fd when
1187 * confirm the packet has been sent on hardware, or
1188 * when clean up during transmit failure.
1190 mutex_lock(&priv
->onestep_tstamp_lock
);
1191 __dpaa2_eth_tx(skb
, priv
->net_dev
);
1195 static netdev_tx_t
dpaa2_eth_tx(struct sk_buff
*skb
, struct net_device
*net_dev
)
1197 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1198 u8 msgtype
, twostep
, udp
;
1199 u16 offset1
, offset2
;
1201 /* Utilize skb->cb[0] for timestamping request per skb */
1204 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && dpaa2_ptp
) {
1205 if (priv
->tx_tstamp_type
== HWTSTAMP_TX_ON
)
1206 skb
->cb
[0] = TX_TSTAMP
;
1207 else if (priv
->tx_tstamp_type
== HWTSTAMP_TX_ONESTEP_SYNC
)
1208 skb
->cb
[0] = TX_TSTAMP_ONESTEP_SYNC
;
1211 /* TX for one-step timestamping PTP Sync packet */
1212 if (skb
->cb
[0] == TX_TSTAMP_ONESTEP_SYNC
) {
1213 if (!dpaa2_eth_ptp_parse(skb
, &msgtype
, &twostep
, &udp
,
1214 &offset1
, &offset2
))
1215 if (msgtype
== PTP_MSGTYPE_SYNC
&& twostep
== 0) {
1216 skb_queue_tail(&priv
->tx_skbs
, skb
);
1217 queue_work(priv
->dpaa2_ptp_wq
,
1218 &priv
->tx_onestep_tstamp
);
1219 return NETDEV_TX_OK
;
1221 /* Use two-step timestamping if not one-step timestamping
1224 skb
->cb
[0] = TX_TSTAMP
;
1227 /* TX for other packets */
1228 return __dpaa2_eth_tx(skb
, net_dev
);
1231 /* Tx confirmation frame processing routine */
1232 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv
*priv
,
1233 struct dpaa2_eth_channel
*ch __always_unused
,
1234 const struct dpaa2_fd
*fd
,
1235 struct dpaa2_eth_fq
*fq
)
1237 struct rtnl_link_stats64
*percpu_stats
;
1238 struct dpaa2_eth_drv_stats
*percpu_extras
;
1239 u32 fd_len
= dpaa2_fd_get_len(fd
);
1243 trace_dpaa2_tx_conf_fd(priv
->net_dev
, fd
);
1245 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
1246 percpu_extras
->tx_conf_frames
++;
1247 percpu_extras
->tx_conf_bytes
+= fd_len
;
1249 /* Check frame errors in the FD field */
1250 fd_errors
= dpaa2_fd_get_ctrl(fd
) & DPAA2_FD_TX_ERR_MASK
;
1251 dpaa2_eth_free_tx_fd(priv
, fq
, fd
, true);
1253 if (likely(!fd_errors
))
1256 if (net_ratelimit())
1257 netdev_dbg(priv
->net_dev
, "TX frame FD error: 0x%08x\n",
1260 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
1261 /* Tx-conf logically pertains to the egress path. */
1262 percpu_stats
->tx_errors
++;
1265 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv
*priv
, bool enable
)
1269 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1270 DPNI_OFF_RX_L3_CSUM
, enable
);
1272 netdev_err(priv
->net_dev
,
1273 "dpni_set_offload(RX_L3_CSUM) failed\n");
1277 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1278 DPNI_OFF_RX_L4_CSUM
, enable
);
1280 netdev_err(priv
->net_dev
,
1281 "dpni_set_offload(RX_L4_CSUM) failed\n");
1288 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv
*priv
, bool enable
)
1292 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1293 DPNI_OFF_TX_L3_CSUM
, enable
);
1295 netdev_err(priv
->net_dev
, "dpni_set_offload(TX_L3_CSUM) failed\n");
1299 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1300 DPNI_OFF_TX_L4_CSUM
, enable
);
1302 netdev_err(priv
->net_dev
, "dpni_set_offload(TX_L4_CSUM) failed\n");
1309 /* Perform a single release command to add buffers
1310 * to the specified buffer pool
1312 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv
*priv
,
1313 struct dpaa2_eth_channel
*ch
, u16 bpid
)
1315 struct device
*dev
= priv
->net_dev
->dev
.parent
;
1316 u64 buf_array
[DPAA2_ETH_BUFS_PER_CMD
];
1322 for (i
= 0; i
< DPAA2_ETH_BUFS_PER_CMD
; i
++) {
1323 /* Allocate buffer visible to WRIOP + skb shared info +
1326 /* allocate one page for each Rx buffer. WRIOP sees
1327 * the entire page except for a tailroom reserved for
1330 page
= dev_alloc_pages(0);
1334 addr
= dma_map_page(dev
, page
, 0, priv
->rx_buf_size
,
1336 if (unlikely(dma_mapping_error(dev
, addr
)))
1339 buf_array
[i
] = addr
;
1342 trace_dpaa2_eth_buf_seed(priv
->net_dev
,
1343 page
, DPAA2_ETH_RX_BUF_RAW_SIZE
,
1344 addr
, priv
->rx_buf_size
,
1349 /* In case the portal is busy, retry until successful */
1350 while ((err
= dpaa2_io_service_release(ch
->dpio
, bpid
,
1351 buf_array
, i
)) == -EBUSY
) {
1352 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
1357 /* If release command failed, clean up and bail out;
1358 * not much else we can do about it
1361 dpaa2_eth_free_bufs(priv
, buf_array
, i
);
1368 __free_pages(page
, 0);
1370 /* If we managed to allocate at least some buffers,
1371 * release them to hardware
1379 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv
*priv
, u16 bpid
)
1384 for (j
= 0; j
< priv
->num_channels
; j
++) {
1385 for (i
= 0; i
< DPAA2_ETH_NUM_BUFS
;
1386 i
+= DPAA2_ETH_BUFS_PER_CMD
) {
1387 new_count
= dpaa2_eth_add_bufs(priv
, priv
->channel
[j
], bpid
);
1388 priv
->channel
[j
]->buf_count
+= new_count
;
1390 if (new_count
< DPAA2_ETH_BUFS_PER_CMD
) {
1400 * Drain the specified number of buffers from the DPNI's private buffer pool.
1401 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1403 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv
*priv
, int count
)
1405 u64 buf_array
[DPAA2_ETH_BUFS_PER_CMD
];
1410 ret
= dpaa2_io_service_acquire(NULL
, priv
->bpid
,
1413 if (ret
== -EBUSY
&&
1414 retries
++ < DPAA2_ETH_SWP_BUSY_RETRIES
)
1416 netdev_err(priv
->net_dev
, "dpaa2_io_service_acquire() failed\n");
1419 dpaa2_eth_free_bufs(priv
, buf_array
, ret
);
1424 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv
*priv
)
1428 dpaa2_eth_drain_bufs(priv
, DPAA2_ETH_BUFS_PER_CMD
);
1429 dpaa2_eth_drain_bufs(priv
, 1);
1431 for (i
= 0; i
< priv
->num_channels
; i
++)
1432 priv
->channel
[i
]->buf_count
= 0;
1435 /* Function is called from softirq context only, so we don't need to guard
1436 * the access to percpu count
1438 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv
*priv
,
1439 struct dpaa2_eth_channel
*ch
,
1444 if (likely(ch
->buf_count
>= DPAA2_ETH_REFILL_THRESH
))
1448 new_count
= dpaa2_eth_add_bufs(priv
, ch
, bpid
);
1449 if (unlikely(!new_count
)) {
1450 /* Out of memory; abort for now, we'll try later on */
1453 ch
->buf_count
+= new_count
;
1454 } while (ch
->buf_count
< DPAA2_ETH_NUM_BUFS
);
1456 if (unlikely(ch
->buf_count
< DPAA2_ETH_NUM_BUFS
))
1462 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv
*priv
)
1464 struct dpaa2_eth_sgt_cache
*sgt_cache
;
1468 for_each_possible_cpu(k
) {
1469 sgt_cache
= per_cpu_ptr(priv
->sgt_cache
, k
);
1470 count
= sgt_cache
->count
;
1472 for (i
= 0; i
< count
; i
++)
1473 kfree(sgt_cache
->buf
[i
]);
1474 sgt_cache
->count
= 0;
1478 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel
*ch
)
1483 /* Retry while portal is busy */
1485 err
= dpaa2_io_service_pull_channel(ch
->dpio
, ch
->ch_id
,
1489 } while (err
== -EBUSY
&& dequeues
< DPAA2_ETH_SWP_BUSY_RETRIES
);
1491 ch
->stats
.dequeue_portal_busy
+= dequeues
;
1493 ch
->stats
.pull_err
++;
1498 /* NAPI poll routine
1500 * Frames are dequeued from the QMan channel associated with this NAPI context.
1501 * Rx, Tx confirmation and (if configured) Rx error frames all count
1502 * towards the NAPI budget.
1504 static int dpaa2_eth_poll(struct napi_struct
*napi
, int budget
)
1506 struct dpaa2_eth_channel
*ch
;
1507 struct dpaa2_eth_priv
*priv
;
1508 int rx_cleaned
= 0, txconf_cleaned
= 0;
1509 struct dpaa2_eth_fq
*fq
, *txc_fq
= NULL
;
1510 struct netdev_queue
*nq
;
1511 int store_cleaned
, work_done
;
1512 struct list_head rx_list
;
1517 ch
= container_of(napi
, struct dpaa2_eth_channel
, napi
);
1521 INIT_LIST_HEAD(&rx_list
);
1522 ch
->rx_list
= &rx_list
;
1525 err
= dpaa2_eth_pull_channel(ch
);
1529 /* Refill pool if appropriate */
1530 dpaa2_eth_refill_pool(priv
, ch
, priv
->bpid
);
1532 store_cleaned
= dpaa2_eth_consume_frames(ch
, &fq
);
1533 if (store_cleaned
<= 0)
1535 if (fq
->type
== DPAA2_RX_FQ
) {
1536 rx_cleaned
+= store_cleaned
;
1537 flowid
= fq
->flowid
;
1539 txconf_cleaned
+= store_cleaned
;
1540 /* We have a single Tx conf FQ on this channel */
1544 /* If we either consumed the whole NAPI budget with Rx frames
1545 * or we reached the Tx confirmations threshold, we're done.
1547 if (rx_cleaned
>= budget
||
1548 txconf_cleaned
>= DPAA2_ETH_TXCONF_PER_NAPI
) {
1552 } while (store_cleaned
);
1554 /* We didn't consume the entire budget, so finish napi and
1555 * re-enable data availability notifications
1557 napi_complete_done(napi
, rx_cleaned
);
1559 err
= dpaa2_io_service_rearm(ch
->dpio
, &ch
->nctx
);
1561 } while (err
== -EBUSY
&& retries
++ < DPAA2_ETH_SWP_BUSY_RETRIES
);
1562 WARN_ONCE(err
, "CDAN notifications rearm failed on core %d",
1563 ch
->nctx
.desired_cpu
);
1565 work_done
= max(rx_cleaned
, 1);
1568 netif_receive_skb_list(ch
->rx_list
);
1570 if (txc_fq
&& txc_fq
->dq_frames
) {
1571 nq
= netdev_get_tx_queue(priv
->net_dev
, txc_fq
->flowid
);
1572 netdev_tx_completed_queue(nq
, txc_fq
->dq_frames
,
1574 txc_fq
->dq_frames
= 0;
1575 txc_fq
->dq_bytes
= 0;
1578 if (ch
->xdp
.res
& XDP_REDIRECT
)
1580 else if (rx_cleaned
&& ch
->xdp
.res
& XDP_TX
)
1581 dpaa2_eth_xdp_tx_flush(priv
, ch
, &priv
->fq
[flowid
]);
1586 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv
*priv
)
1588 struct dpaa2_eth_channel
*ch
;
1591 for (i
= 0; i
< priv
->num_channels
; i
++) {
1592 ch
= priv
->channel
[i
];
1593 napi_enable(&ch
->napi
);
1597 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv
*priv
)
1599 struct dpaa2_eth_channel
*ch
;
1602 for (i
= 0; i
< priv
->num_channels
; i
++) {
1603 ch
= priv
->channel
[i
];
1604 napi_disable(&ch
->napi
);
1608 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv
*priv
,
1609 bool tx_pause
, bool pfc
)
1611 struct dpni_taildrop td
= {0};
1612 struct dpaa2_eth_fq
*fq
;
1615 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1616 * flow control is disabled (as it might interfere with either the
1617 * buffer pool depletion trigger for pause frames or with the group
1618 * congestion trigger for PFC frames)
1620 td
.enable
= !tx_pause
;
1621 if (priv
->rx_fqtd_enabled
== td
.enable
)
1624 td
.threshold
= DPAA2_ETH_FQ_TAILDROP_THRESH
;
1625 td
.units
= DPNI_CONGESTION_UNIT_BYTES
;
1627 for (i
= 0; i
< priv
->num_fqs
; i
++) {
1629 if (fq
->type
!= DPAA2_RX_FQ
)
1631 err
= dpni_set_taildrop(priv
->mc_io
, 0, priv
->mc_token
,
1632 DPNI_CP_QUEUE
, DPNI_QUEUE_RX
,
1633 fq
->tc
, fq
->flowid
, &td
);
1635 netdev_err(priv
->net_dev
,
1636 "dpni_set_taildrop(FQ) failed\n");
1641 priv
->rx_fqtd_enabled
= td
.enable
;
1644 /* Congestion group taildrop: threshold is in frames, per group
1645 * of FQs belonging to the same traffic class
1646 * Enabled if general Tx pause disabled or if PFCs are enabled
1647 * (congestion group threhsold for PFC generation is lower than the
1648 * CG taildrop threshold, so it won't interfere with it; we also
1649 * want frames in non-PFC enabled traffic classes to be kept in check)
1651 td
.enable
= !tx_pause
|| (tx_pause
&& pfc
);
1652 if (priv
->rx_cgtd_enabled
== td
.enable
)
1655 td
.threshold
= DPAA2_ETH_CG_TAILDROP_THRESH(priv
);
1656 td
.units
= DPNI_CONGESTION_UNIT_FRAMES
;
1657 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
1658 err
= dpni_set_taildrop(priv
->mc_io
, 0, priv
->mc_token
,
1659 DPNI_CP_GROUP
, DPNI_QUEUE_RX
,
1662 netdev_err(priv
->net_dev
,
1663 "dpni_set_taildrop(CG) failed\n");
1668 priv
->rx_cgtd_enabled
= td
.enable
;
1671 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv
*priv
)
1673 struct dpni_link_state state
= {0};
1677 err
= dpni_get_link_state(priv
->mc_io
, 0, priv
->mc_token
, &state
);
1678 if (unlikely(err
)) {
1679 netdev_err(priv
->net_dev
,
1680 "dpni_get_link_state() failed\n");
1684 /* If Tx pause frame settings have changed, we need to update
1685 * Rx FQ taildrop configuration as well. We configure taildrop
1686 * only when pause frame generation is disabled.
1688 tx_pause
= dpaa2_eth_tx_pause_enabled(state
.options
);
1689 dpaa2_eth_set_rx_taildrop(priv
, tx_pause
, priv
->pfc_enabled
);
1691 /* When we manage the MAC/PHY using phylink there is no need
1692 * to manually update the netif_carrier.
1697 /* Chech link state; speed / duplex changes are not treated yet */
1698 if (priv
->link_state
.up
== state
.up
)
1702 netif_carrier_on(priv
->net_dev
);
1703 netif_tx_start_all_queues(priv
->net_dev
);
1705 netif_tx_stop_all_queues(priv
->net_dev
);
1706 netif_carrier_off(priv
->net_dev
);
1709 netdev_info(priv
->net_dev
, "Link Event: state %s\n",
1710 state
.up
? "up" : "down");
1713 priv
->link_state
= state
;
1718 static int dpaa2_eth_open(struct net_device
*net_dev
)
1720 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1723 err
= dpaa2_eth_seed_pool(priv
, priv
->bpid
);
1725 /* Not much to do; the buffer pool, though not filled up,
1726 * may still contain some buffers which would enable us
1729 netdev_err(net_dev
, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1730 priv
->dpbp_dev
->obj_desc
.id
, priv
->bpid
);
1734 /* We'll only start the txqs when the link is actually ready;
1735 * make sure we don't race against the link up notification,
1736 * which may come immediately after dpni_enable();
1738 netif_tx_stop_all_queues(net_dev
);
1740 /* Also, explicitly set carrier off, otherwise
1741 * netif_carrier_ok() will return true and cause 'ip link show'
1742 * to report the LOWER_UP flag, even though the link
1743 * notification wasn't even received.
1745 netif_carrier_off(net_dev
);
1747 dpaa2_eth_enable_ch_napi(priv
);
1749 err
= dpni_enable(priv
->mc_io
, 0, priv
->mc_token
);
1751 netdev_err(net_dev
, "dpni_enable() failed\n");
1756 phylink_start(priv
->mac
->phylink
);
1761 dpaa2_eth_disable_ch_napi(priv
);
1762 dpaa2_eth_drain_pool(priv
);
1766 /* Total number of in-flight frames on ingress queues */
1767 static u32
dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv
*priv
)
1769 struct dpaa2_eth_fq
*fq
;
1770 u32 fcnt
= 0, bcnt
= 0, total
= 0;
1773 for (i
= 0; i
< priv
->num_fqs
; i
++) {
1775 err
= dpaa2_io_query_fq_count(NULL
, fq
->fqid
, &fcnt
, &bcnt
);
1777 netdev_warn(priv
->net_dev
, "query_fq_count failed");
1786 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv
*priv
)
1792 pending
= dpaa2_eth_ingress_fq_count(priv
);
1795 } while (pending
&& --retries
);
1798 #define DPNI_TX_PENDING_VER_MAJOR 7
1799 #define DPNI_TX_PENDING_VER_MINOR 13
1800 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv
*priv
)
1802 union dpni_statistics stats
;
1806 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_TX_PENDING_VER_MAJOR
,
1807 DPNI_TX_PENDING_VER_MINOR
) < 0)
1811 err
= dpni_get_statistics(priv
->mc_io
, 0, priv
->mc_token
, 6,
1815 if (stats
.page_6
.tx_pending_frames
== 0)
1817 } while (--retries
);
1823 static int dpaa2_eth_stop(struct net_device
*net_dev
)
1825 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1826 int dpni_enabled
= 0;
1830 netif_tx_stop_all_queues(net_dev
);
1831 netif_carrier_off(net_dev
);
1833 phylink_stop(priv
->mac
->phylink
);
1836 /* On dpni_disable(), the MC firmware will:
1837 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1838 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1839 * of all in flight Tx frames is finished (and corresponding Tx conf
1840 * frames are enqueued back to software)
1842 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1843 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1844 * and Tx conf queues are consumed on NAPI poll.
1846 dpaa2_eth_wait_for_egress_fq_empty(priv
);
1849 dpni_disable(priv
->mc_io
, 0, priv
->mc_token
);
1850 dpni_is_enabled(priv
->mc_io
, 0, priv
->mc_token
, &dpni_enabled
);
1852 /* Allow the hardware some slack */
1854 } while (dpni_enabled
&& --retries
);
1856 netdev_warn(net_dev
, "Retry count exceeded disabling DPNI\n");
1857 /* Must go on and disable NAPI nonetheless, so we don't crash at
1858 * the next "ifconfig up"
1862 dpaa2_eth_wait_for_ingress_fq_empty(priv
);
1863 dpaa2_eth_disable_ch_napi(priv
);
1865 /* Empty the buffer pool */
1866 dpaa2_eth_drain_pool(priv
);
1868 /* Empty the Scatter-Gather Buffer cache */
1869 dpaa2_eth_sgt_cache_drain(priv
);
1874 static int dpaa2_eth_set_addr(struct net_device
*net_dev
, void *addr
)
1876 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1877 struct device
*dev
= net_dev
->dev
.parent
;
1880 err
= eth_mac_addr(net_dev
, addr
);
1882 dev_err(dev
, "eth_mac_addr() failed (%d)\n", err
);
1886 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1889 dev_err(dev
, "dpni_set_primary_mac_addr() failed (%d)\n", err
);
1896 /** Fill in counters maintained by the GPP driver. These may be different from
1897 * the hardware counters obtained by ethtool.
1899 static void dpaa2_eth_get_stats(struct net_device
*net_dev
,
1900 struct rtnl_link_stats64
*stats
)
1902 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1903 struct rtnl_link_stats64
*percpu_stats
;
1905 u64
*netstats
= (u64
*)stats
;
1907 int num
= sizeof(struct rtnl_link_stats64
) / sizeof(u64
);
1909 for_each_possible_cpu(i
) {
1910 percpu_stats
= per_cpu_ptr(priv
->percpu_stats
, i
);
1911 cpustats
= (u64
*)percpu_stats
;
1912 for (j
= 0; j
< num
; j
++)
1913 netstats
[j
] += cpustats
[j
];
1917 /* Copy mac unicast addresses from @net_dev to @priv.
1918 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1920 static void dpaa2_eth_add_uc_hw_addr(const struct net_device
*net_dev
,
1921 struct dpaa2_eth_priv
*priv
)
1923 struct netdev_hw_addr
*ha
;
1926 netdev_for_each_uc_addr(ha
, net_dev
) {
1927 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1930 netdev_warn(priv
->net_dev
,
1931 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1936 /* Copy mac multicast addresses from @net_dev to @priv
1937 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1939 static void dpaa2_eth_add_mc_hw_addr(const struct net_device
*net_dev
,
1940 struct dpaa2_eth_priv
*priv
)
1942 struct netdev_hw_addr
*ha
;
1945 netdev_for_each_mc_addr(ha
, net_dev
) {
1946 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1949 netdev_warn(priv
->net_dev
,
1950 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1955 static void dpaa2_eth_set_rx_mode(struct net_device
*net_dev
)
1957 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1958 int uc_count
= netdev_uc_count(net_dev
);
1959 int mc_count
= netdev_mc_count(net_dev
);
1960 u8 max_mac
= priv
->dpni_attrs
.mac_filter_entries
;
1961 u32 options
= priv
->dpni_attrs
.options
;
1962 u16 mc_token
= priv
->mc_token
;
1963 struct fsl_mc_io
*mc_io
= priv
->mc_io
;
1966 /* Basic sanity checks; these probably indicate a misconfiguration */
1967 if (options
& DPNI_OPT_NO_MAC_FILTER
&& max_mac
!= 0)
1968 netdev_info(net_dev
,
1969 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1972 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1973 if (uc_count
> max_mac
) {
1974 netdev_info(net_dev
,
1975 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1979 if (mc_count
+ uc_count
> max_mac
) {
1980 netdev_info(net_dev
,
1981 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1982 uc_count
+ mc_count
, max_mac
);
1983 goto force_mc_promisc
;
1986 /* Adjust promisc settings due to flag combinations */
1987 if (net_dev
->flags
& IFF_PROMISC
)
1989 if (net_dev
->flags
& IFF_ALLMULTI
) {
1990 /* First, rebuild unicast filtering table. This should be done
1991 * in promisc mode, in order to avoid frame loss while we
1992 * progressively add entries to the table.
1993 * We don't know whether we had been in promisc already, and
1994 * making an MC call to find out is expensive; so set uc promisc
1997 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
1999 netdev_warn(net_dev
, "Can't set uc promisc\n");
2001 /* Actual uc table reconstruction. */
2002 err
= dpni_clear_mac_filters(mc_io
, 0, mc_token
, 1, 0);
2004 netdev_warn(net_dev
, "Can't clear uc filters\n");
2005 dpaa2_eth_add_uc_hw_addr(net_dev
, priv
);
2007 /* Finally, clear uc promisc and set mc promisc as requested. */
2008 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 0);
2010 netdev_warn(net_dev
, "Can't clear uc promisc\n");
2011 goto force_mc_promisc
;
2014 /* Neither unicast, nor multicast promisc will be on... eventually.
2015 * For now, rebuild mac filtering tables while forcing both of them on.
2017 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
2019 netdev_warn(net_dev
, "Can't set uc promisc (%d)\n", err
);
2020 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 1);
2022 netdev_warn(net_dev
, "Can't set mc promisc (%d)\n", err
);
2024 /* Actual mac filtering tables reconstruction */
2025 err
= dpni_clear_mac_filters(mc_io
, 0, mc_token
, 1, 1);
2027 netdev_warn(net_dev
, "Can't clear mac filters\n");
2028 dpaa2_eth_add_mc_hw_addr(net_dev
, priv
);
2029 dpaa2_eth_add_uc_hw_addr(net_dev
, priv
);
2031 /* Now we can clear both ucast and mcast promisc, without risking
2032 * to drop legitimate frames anymore.
2034 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 0);
2036 netdev_warn(net_dev
, "Can't clear ucast promisc\n");
2037 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 0);
2039 netdev_warn(net_dev
, "Can't clear mcast promisc\n");
2044 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
2046 netdev_warn(net_dev
, "Can't set ucast promisc\n");
2048 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 1);
2050 netdev_warn(net_dev
, "Can't set mcast promisc\n");
2053 static int dpaa2_eth_set_features(struct net_device
*net_dev
,
2054 netdev_features_t features
)
2056 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2057 netdev_features_t changed
= features
^ net_dev
->features
;
2061 if (changed
& NETIF_F_RXCSUM
) {
2062 enable
= !!(features
& NETIF_F_RXCSUM
);
2063 err
= dpaa2_eth_set_rx_csum(priv
, enable
);
2068 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
2069 enable
= !!(features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
2070 err
= dpaa2_eth_set_tx_csum(priv
, enable
);
2078 static int dpaa2_eth_ts_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2080 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2081 struct hwtstamp_config config
;
2086 if (copy_from_user(&config
, rq
->ifr_data
, sizeof(config
)))
2089 switch (config
.tx_type
) {
2090 case HWTSTAMP_TX_OFF
:
2091 case HWTSTAMP_TX_ON
:
2092 case HWTSTAMP_TX_ONESTEP_SYNC
:
2093 priv
->tx_tstamp_type
= config
.tx_type
;
2099 if (config
.rx_filter
== HWTSTAMP_FILTER_NONE
) {
2100 priv
->rx_tstamp
= false;
2102 priv
->rx_tstamp
= true;
2103 /* TS is set for all frame types, not only those requested */
2104 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2107 return copy_to_user(rq
->ifr_data
, &config
, sizeof(config
)) ?
2111 static int dpaa2_eth_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2113 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2115 if (cmd
== SIOCSHWTSTAMP
)
2116 return dpaa2_eth_ts_ioctl(dev
, rq
, cmd
);
2119 return phylink_mii_ioctl(priv
->mac
->phylink
, rq
, cmd
);
2124 static bool xdp_mtu_valid(struct dpaa2_eth_priv
*priv
, int mtu
)
2126 int mfl
, linear_mfl
;
2128 mfl
= DPAA2_ETH_L2_MAX_FRM(mtu
);
2129 linear_mfl
= priv
->rx_buf_size
- DPAA2_ETH_RX_HWA_SIZE
-
2130 dpaa2_eth_rx_head_room(priv
) - XDP_PACKET_HEADROOM
;
2132 if (mfl
> linear_mfl
) {
2133 netdev_warn(priv
->net_dev
, "Maximum MTU for XDP is %d\n",
2134 linear_mfl
- VLAN_ETH_HLEN
);
2141 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv
*priv
, int mtu
, bool has_xdp
)
2145 /* We enforce a maximum Rx frame length based on MTU only if we have
2146 * an XDP program attached (in order to avoid Rx S/G frames).
2147 * Otherwise, we accept all incoming frames as long as they are not
2148 * larger than maximum size supported in hardware
2151 mfl
= DPAA2_ETH_L2_MAX_FRM(mtu
);
2153 mfl
= DPAA2_ETH_MFL
;
2155 err
= dpni_set_max_frame_length(priv
->mc_io
, 0, priv
->mc_token
, mfl
);
2157 netdev_err(priv
->net_dev
, "dpni_set_max_frame_length failed\n");
2164 static int dpaa2_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
2166 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2169 if (!priv
->xdp_prog
)
2172 if (!xdp_mtu_valid(priv
, new_mtu
))
2175 err
= dpaa2_eth_set_rx_mfl(priv
, new_mtu
, true);
2184 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv
*priv
, bool has_xdp
)
2186 struct dpni_buffer_layout buf_layout
= {0};
2189 err
= dpni_get_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2190 DPNI_QUEUE_RX
, &buf_layout
);
2192 netdev_err(priv
->net_dev
, "dpni_get_buffer_layout failed\n");
2196 /* Reserve extra headroom for XDP header size changes */
2197 buf_layout
.data_head_room
= dpaa2_eth_rx_head_room(priv
) +
2198 (has_xdp
? XDP_PACKET_HEADROOM
: 0);
2199 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM
;
2200 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2201 DPNI_QUEUE_RX
, &buf_layout
);
2203 netdev_err(priv
->net_dev
, "dpni_set_buffer_layout failed\n");
2210 static int dpaa2_eth_setup_xdp(struct net_device
*dev
, struct bpf_prog
*prog
)
2212 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2213 struct dpaa2_eth_channel
*ch
;
2214 struct bpf_prog
*old
;
2215 bool up
, need_update
;
2218 if (prog
&& !xdp_mtu_valid(priv
, dev
->mtu
))
2222 bpf_prog_add(prog
, priv
->num_channels
);
2224 up
= netif_running(dev
);
2225 need_update
= (!!priv
->xdp_prog
!= !!prog
);
2228 dpaa2_eth_stop(dev
);
2230 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2231 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2232 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2233 * so we are sure no old format buffers will be used from now on.
2236 err
= dpaa2_eth_set_rx_mfl(priv
, dev
->mtu
, !!prog
);
2239 err
= dpaa2_eth_update_rx_buffer_headroom(priv
, !!prog
);
2244 old
= xchg(&priv
->xdp_prog
, prog
);
2248 for (i
= 0; i
< priv
->num_channels
; i
++) {
2249 ch
= priv
->channel
[i
];
2250 old
= xchg(&ch
->xdp
.prog
, prog
);
2256 err
= dpaa2_eth_open(dev
);
2265 bpf_prog_sub(prog
, priv
->num_channels
);
2267 dpaa2_eth_open(dev
);
2272 static int dpaa2_eth_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
2274 switch (xdp
->command
) {
2275 case XDP_SETUP_PROG
:
2276 return dpaa2_eth_setup_xdp(dev
, xdp
->prog
);
2284 static int dpaa2_eth_xdp_create_fd(struct net_device
*net_dev
,
2285 struct xdp_frame
*xdpf
,
2286 struct dpaa2_fd
*fd
)
2288 struct device
*dev
= net_dev
->dev
.parent
;
2289 unsigned int needed_headroom
;
2290 struct dpaa2_eth_swa
*swa
;
2291 void *buffer_start
, *aligned_start
;
2294 /* We require a minimum headroom to be able to transmit the frame.
2295 * Otherwise return an error and let the original net_device handle it
2297 needed_headroom
= dpaa2_eth_needed_headroom(NULL
);
2298 if (xdpf
->headroom
< needed_headroom
)
2301 /* Setup the FD fields */
2302 memset(fd
, 0, sizeof(*fd
));
2304 /* Align FD address, if possible */
2305 buffer_start
= xdpf
->data
- needed_headroom
;
2306 aligned_start
= PTR_ALIGN(buffer_start
- DPAA2_ETH_TX_BUF_ALIGN
,
2307 DPAA2_ETH_TX_BUF_ALIGN
);
2308 if (aligned_start
>= xdpf
->data
- xdpf
->headroom
)
2309 buffer_start
= aligned_start
;
2311 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
2312 /* fill in necessary fields here */
2313 swa
->type
= DPAA2_ETH_SWA_XDP
;
2314 swa
->xdp
.dma_size
= xdpf
->data
+ xdpf
->len
- buffer_start
;
2315 swa
->xdp
.xdpf
= xdpf
;
2317 addr
= dma_map_single(dev
, buffer_start
,
2320 if (unlikely(dma_mapping_error(dev
, addr
)))
2323 dpaa2_fd_set_addr(fd
, addr
);
2324 dpaa2_fd_set_offset(fd
, xdpf
->data
- buffer_start
);
2325 dpaa2_fd_set_len(fd
, xdpf
->len
);
2326 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
2327 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
2332 static int dpaa2_eth_xdp_xmit(struct net_device
*net_dev
, int n
,
2333 struct xdp_frame
**frames
, u32 flags
)
2335 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2336 struct dpaa2_eth_xdp_fds
*xdp_redirect_fds
;
2337 struct rtnl_link_stats64
*percpu_stats
;
2338 struct dpaa2_eth_fq
*fq
;
2339 struct dpaa2_fd
*fds
;
2340 int enqueued
, i
, err
;
2342 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
2345 if (!netif_running(net_dev
))
2348 fq
= &priv
->fq
[smp_processor_id()];
2349 xdp_redirect_fds
= &fq
->xdp_redirect_fds
;
2350 fds
= xdp_redirect_fds
->fds
;
2352 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
2354 /* create a FD for each xdp_frame in the list received */
2355 for (i
= 0; i
< n
; i
++) {
2356 err
= dpaa2_eth_xdp_create_fd(net_dev
, frames
[i
], &fds
[i
]);
2360 xdp_redirect_fds
->num
= i
;
2362 /* enqueue all the frame descriptors */
2363 enqueued
= dpaa2_eth_xdp_flush(priv
, fq
, xdp_redirect_fds
);
2365 /* update statistics */
2366 percpu_stats
->tx_packets
+= enqueued
;
2367 for (i
= 0; i
< enqueued
; i
++)
2368 percpu_stats
->tx_bytes
+= dpaa2_fd_get_len(&fds
[i
]);
2369 for (i
= enqueued
; i
< n
; i
++)
2370 xdp_return_frame_rx_napi(frames
[i
]);
2375 static int update_xps(struct dpaa2_eth_priv
*priv
)
2377 struct net_device
*net_dev
= priv
->net_dev
;
2378 struct cpumask xps_mask
;
2379 struct dpaa2_eth_fq
*fq
;
2380 int i
, num_queues
, netdev_queues
;
2383 num_queues
= dpaa2_eth_queue_count(priv
);
2384 netdev_queues
= (net_dev
->num_tc
? : 1) * num_queues
;
2386 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2387 * queues, so only process those
2389 for (i
= 0; i
< netdev_queues
; i
++) {
2390 fq
= &priv
->fq
[i
% num_queues
];
2392 cpumask_clear(&xps_mask
);
2393 cpumask_set_cpu(fq
->target_cpu
, &xps_mask
);
2395 err
= netif_set_xps_queue(net_dev
, &xps_mask
, i
);
2397 netdev_warn_once(net_dev
, "Error setting XPS queue\n");
2405 static int dpaa2_eth_setup_mqprio(struct net_device
*net_dev
,
2406 struct tc_mqprio_qopt
*mqprio
)
2408 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2409 u8 num_tc
, num_queues
;
2412 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
2413 num_queues
= dpaa2_eth_queue_count(priv
);
2414 num_tc
= mqprio
->num_tc
;
2416 if (num_tc
== net_dev
->num_tc
)
2419 if (num_tc
> dpaa2_eth_tc_count(priv
)) {
2420 netdev_err(net_dev
, "Max %d traffic classes supported\n",
2421 dpaa2_eth_tc_count(priv
));
2426 netdev_reset_tc(net_dev
);
2427 netif_set_real_num_tx_queues(net_dev
, num_queues
);
2431 netdev_set_num_tc(net_dev
, num_tc
);
2432 netif_set_real_num_tx_queues(net_dev
, num_tc
* num_queues
);
2434 for (i
= 0; i
< num_tc
; i
++)
2435 netdev_set_tc_queue(net_dev
, i
, num_queues
, i
* num_queues
);
2443 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2445 static int dpaa2_eth_setup_tbf(struct net_device
*net_dev
, struct tc_tbf_qopt_offload
*p
)
2447 struct tc_tbf_qopt_offload_replace_params
*cfg
= &p
->replace_params
;
2448 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2449 struct dpni_tx_shaping_cfg tx_cr_shaper
= { 0 };
2450 struct dpni_tx_shaping_cfg tx_er_shaper
= { 0 };
2453 if (p
->command
== TC_TBF_STATS
)
2456 /* Only per port Tx shaping */
2457 if (p
->parent
!= TC_H_ROOT
)
2460 if (p
->command
== TC_TBF_REPLACE
) {
2461 if (cfg
->max_size
> DPAA2_ETH_MAX_BURST_SIZE
) {
2462 netdev_err(net_dev
, "burst size cannot be greater than %d\n",
2463 DPAA2_ETH_MAX_BURST_SIZE
);
2467 tx_cr_shaper
.max_burst_size
= cfg
->max_size
;
2468 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2471 tx_cr_shaper
.rate_limit
= bps_to_mbits(cfg
->rate
.rate_bytes_ps
);
2474 err
= dpni_set_tx_shaping(priv
->mc_io
, 0, priv
->mc_token
, &tx_cr_shaper
,
2477 netdev_err(net_dev
, "dpni_set_tx_shaping() = %d\n", err
);
2484 static int dpaa2_eth_setup_tc(struct net_device
*net_dev
,
2485 enum tc_setup_type type
, void *type_data
)
2488 case TC_SETUP_QDISC_MQPRIO
:
2489 return dpaa2_eth_setup_mqprio(net_dev
, type_data
);
2490 case TC_SETUP_QDISC_TBF
:
2491 return dpaa2_eth_setup_tbf(net_dev
, type_data
);
2497 static const struct net_device_ops dpaa2_eth_ops
= {
2498 .ndo_open
= dpaa2_eth_open
,
2499 .ndo_start_xmit
= dpaa2_eth_tx
,
2500 .ndo_stop
= dpaa2_eth_stop
,
2501 .ndo_set_mac_address
= dpaa2_eth_set_addr
,
2502 .ndo_get_stats64
= dpaa2_eth_get_stats
,
2503 .ndo_set_rx_mode
= dpaa2_eth_set_rx_mode
,
2504 .ndo_set_features
= dpaa2_eth_set_features
,
2505 .ndo_do_ioctl
= dpaa2_eth_ioctl
,
2506 .ndo_change_mtu
= dpaa2_eth_change_mtu
,
2507 .ndo_bpf
= dpaa2_eth_xdp
,
2508 .ndo_xdp_xmit
= dpaa2_eth_xdp_xmit
,
2509 .ndo_setup_tc
= dpaa2_eth_setup_tc
,
2512 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx
*ctx
)
2514 struct dpaa2_eth_channel
*ch
;
2516 ch
= container_of(ctx
, struct dpaa2_eth_channel
, nctx
);
2518 /* Update NAPI statistics */
2521 napi_schedule(&ch
->napi
);
2524 /* Allocate and configure a DPCON object */
2525 static struct fsl_mc_device
*dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv
*priv
)
2527 struct fsl_mc_device
*dpcon
;
2528 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2531 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
),
2532 FSL_MC_POOL_DPCON
, &dpcon
);
2535 err
= -EPROBE_DEFER
;
2537 dev_info(dev
, "Not enough DPCONs, will go on as-is\n");
2538 return ERR_PTR(err
);
2541 err
= dpcon_open(priv
->mc_io
, 0, dpcon
->obj_desc
.id
, &dpcon
->mc_handle
);
2543 dev_err(dev
, "dpcon_open() failed\n");
2547 err
= dpcon_reset(priv
->mc_io
, 0, dpcon
->mc_handle
);
2549 dev_err(dev
, "dpcon_reset() failed\n");
2553 err
= dpcon_enable(priv
->mc_io
, 0, dpcon
->mc_handle
);
2555 dev_err(dev
, "dpcon_enable() failed\n");
2562 dpcon_close(priv
->mc_io
, 0, dpcon
->mc_handle
);
2564 fsl_mc_object_free(dpcon
);
2566 return ERR_PTR(err
);
2569 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv
*priv
,
2570 struct fsl_mc_device
*dpcon
)
2572 dpcon_disable(priv
->mc_io
, 0, dpcon
->mc_handle
);
2573 dpcon_close(priv
->mc_io
, 0, dpcon
->mc_handle
);
2574 fsl_mc_object_free(dpcon
);
2577 static struct dpaa2_eth_channel
*dpaa2_eth_alloc_channel(struct dpaa2_eth_priv
*priv
)
2579 struct dpaa2_eth_channel
*channel
;
2580 struct dpcon_attr attr
;
2581 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2584 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
2588 channel
->dpcon
= dpaa2_eth_setup_dpcon(priv
);
2589 if (IS_ERR(channel
->dpcon
)) {
2590 err
= PTR_ERR(channel
->dpcon
);
2594 err
= dpcon_get_attributes(priv
->mc_io
, 0, channel
->dpcon
->mc_handle
,
2597 dev_err(dev
, "dpcon_get_attributes() failed\n");
2601 channel
->dpcon_id
= attr
.id
;
2602 channel
->ch_id
= attr
.qbman_ch_id
;
2603 channel
->priv
= priv
;
2608 dpaa2_eth_free_dpcon(priv
, channel
->dpcon
);
2611 return ERR_PTR(err
);
2614 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv
*priv
,
2615 struct dpaa2_eth_channel
*channel
)
2617 dpaa2_eth_free_dpcon(priv
, channel
->dpcon
);
2621 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2622 * and register data availability notifications
2624 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv
*priv
)
2626 struct dpaa2_io_notification_ctx
*nctx
;
2627 struct dpaa2_eth_channel
*channel
;
2628 struct dpcon_notification_cfg dpcon_notif_cfg
;
2629 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2632 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2633 * many cores as possible, so we need one channel for each core
2634 * (unless there's fewer queues than cores, in which case the extra
2635 * channels would be wasted).
2636 * Allocate one channel per core and register it to the core's
2637 * affine DPIO. If not enough channels are available for all cores
2638 * or if some cores don't have an affine DPIO, there will be no
2639 * ingress frame processing on those cores.
2641 cpumask_clear(&priv
->dpio_cpumask
);
2642 for_each_online_cpu(i
) {
2643 /* Try to allocate a channel */
2644 channel
= dpaa2_eth_alloc_channel(priv
);
2645 if (IS_ERR_OR_NULL(channel
)) {
2646 err
= PTR_ERR_OR_ZERO(channel
);
2647 if (err
!= -EPROBE_DEFER
)
2649 "No affine channel for cpu %d and above\n", i
);
2653 priv
->channel
[priv
->num_channels
] = channel
;
2655 nctx
= &channel
->nctx
;
2657 nctx
->cb
= dpaa2_eth_cdan_cb
;
2658 nctx
->id
= channel
->ch_id
;
2659 nctx
->desired_cpu
= i
;
2661 /* Register the new context */
2662 channel
->dpio
= dpaa2_io_service_select(i
);
2663 err
= dpaa2_io_service_register(channel
->dpio
, nctx
, dev
);
2665 dev_dbg(dev
, "No affine DPIO for cpu %d\n", i
);
2666 /* If no affine DPIO for this core, there's probably
2667 * none available for next cores either. Signal we want
2668 * to retry later, in case the DPIO devices weren't
2671 err
= -EPROBE_DEFER
;
2672 goto err_service_reg
;
2675 /* Register DPCON notification with MC */
2676 dpcon_notif_cfg
.dpio_id
= nctx
->dpio_id
;
2677 dpcon_notif_cfg
.priority
= 0;
2678 dpcon_notif_cfg
.user_ctx
= nctx
->qman64
;
2679 err
= dpcon_set_notification(priv
->mc_io
, 0,
2680 channel
->dpcon
->mc_handle
,
2683 dev_err(dev
, "dpcon_set_notification failed()\n");
2687 /* If we managed to allocate a channel and also found an affine
2688 * DPIO for this core, add it to the final mask
2690 cpumask_set_cpu(i
, &priv
->dpio_cpumask
);
2691 priv
->num_channels
++;
2693 /* Stop if we already have enough channels to accommodate all
2694 * RX and TX conf queues
2696 if (priv
->num_channels
== priv
->dpni_attrs
.num_queues
)
2703 dpaa2_io_service_deregister(channel
->dpio
, nctx
, dev
);
2705 dpaa2_eth_free_channel(priv
, channel
);
2707 if (err
== -EPROBE_DEFER
) {
2708 for (i
= 0; i
< priv
->num_channels
; i
++) {
2709 channel
= priv
->channel
[i
];
2710 nctx
= &channel
->nctx
;
2711 dpaa2_io_service_deregister(channel
->dpio
, nctx
, dev
);
2712 dpaa2_eth_free_channel(priv
, channel
);
2714 priv
->num_channels
= 0;
2718 if (cpumask_empty(&priv
->dpio_cpumask
)) {
2719 dev_err(dev
, "No cpu with an affine DPIO/DPCON\n");
2723 dev_info(dev
, "Cores %*pbl available for processing ingress traffic\n",
2724 cpumask_pr_args(&priv
->dpio_cpumask
));
2729 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv
*priv
)
2731 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2732 struct dpaa2_eth_channel
*ch
;
2735 /* deregister CDAN notifications and free channels */
2736 for (i
= 0; i
< priv
->num_channels
; i
++) {
2737 ch
= priv
->channel
[i
];
2738 dpaa2_io_service_deregister(ch
->dpio
, &ch
->nctx
, dev
);
2739 dpaa2_eth_free_channel(priv
, ch
);
2743 static struct dpaa2_eth_channel
*dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv
*priv
,
2746 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2749 for (i
= 0; i
< priv
->num_channels
; i
++)
2750 if (priv
->channel
[i
]->nctx
.desired_cpu
== cpu
)
2751 return priv
->channel
[i
];
2753 /* We should never get here. Issue a warning and return
2754 * the first channel, because it's still better than nothing
2756 dev_warn(dev
, "No affine channel found for cpu %d\n", cpu
);
2758 return priv
->channel
[0];
2761 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv
*priv
)
2763 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2764 struct dpaa2_eth_fq
*fq
;
2765 int rx_cpu
, txc_cpu
;
2768 /* For each FQ, pick one channel/CPU to deliver frames to.
2769 * This may well change at runtime, either through irqbalance or
2770 * through direct user intervention.
2772 rx_cpu
= txc_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2774 for (i
= 0; i
< priv
->num_fqs
; i
++) {
2778 case DPAA2_RX_ERR_FQ
:
2779 fq
->target_cpu
= rx_cpu
;
2780 rx_cpu
= cpumask_next(rx_cpu
, &priv
->dpio_cpumask
);
2781 if (rx_cpu
>= nr_cpu_ids
)
2782 rx_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2784 case DPAA2_TX_CONF_FQ
:
2785 fq
->target_cpu
= txc_cpu
;
2786 txc_cpu
= cpumask_next(txc_cpu
, &priv
->dpio_cpumask
);
2787 if (txc_cpu
>= nr_cpu_ids
)
2788 txc_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2791 dev_err(dev
, "Unknown FQ type: %d\n", fq
->type
);
2793 fq
->channel
= dpaa2_eth_get_affine_channel(priv
, fq
->target_cpu
);
2799 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv
*priv
)
2803 /* We have one TxConf FQ per Tx flow.
2804 * The number of Tx and Rx queues is the same.
2805 * Tx queues come first in the fq array.
2807 for (i
= 0; i
< dpaa2_eth_queue_count(priv
); i
++) {
2808 priv
->fq
[priv
->num_fqs
].type
= DPAA2_TX_CONF_FQ
;
2809 priv
->fq
[priv
->num_fqs
].consume
= dpaa2_eth_tx_conf
;
2810 priv
->fq
[priv
->num_fqs
++].flowid
= (u16
)i
;
2813 for (j
= 0; j
< dpaa2_eth_tc_count(priv
); j
++) {
2814 for (i
= 0; i
< dpaa2_eth_queue_count(priv
); i
++) {
2815 priv
->fq
[priv
->num_fqs
].type
= DPAA2_RX_FQ
;
2816 priv
->fq
[priv
->num_fqs
].consume
= dpaa2_eth_rx
;
2817 priv
->fq
[priv
->num_fqs
].tc
= (u8
)j
;
2818 priv
->fq
[priv
->num_fqs
++].flowid
= (u16
)i
;
2822 /* We have exactly one Rx error queue per DPNI */
2823 priv
->fq
[priv
->num_fqs
].type
= DPAA2_RX_ERR_FQ
;
2824 priv
->fq
[priv
->num_fqs
++].consume
= dpaa2_eth_rx_err
;
2826 /* For each FQ, decide on which core to process incoming frames */
2827 dpaa2_eth_set_fq_affinity(priv
);
2830 /* Allocate and configure one buffer pool for each interface */
2831 static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv
*priv
)
2834 struct fsl_mc_device
*dpbp_dev
;
2835 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2836 struct dpbp_attr dpbp_attrs
;
2838 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
), FSL_MC_POOL_DPBP
,
2842 err
= -EPROBE_DEFER
;
2844 dev_err(dev
, "DPBP device allocation failed\n");
2848 priv
->dpbp_dev
= dpbp_dev
;
2850 err
= dpbp_open(priv
->mc_io
, 0, priv
->dpbp_dev
->obj_desc
.id
,
2851 &dpbp_dev
->mc_handle
);
2853 dev_err(dev
, "dpbp_open() failed\n");
2857 err
= dpbp_reset(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2859 dev_err(dev
, "dpbp_reset() failed\n");
2863 err
= dpbp_enable(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2865 dev_err(dev
, "dpbp_enable() failed\n");
2869 err
= dpbp_get_attributes(priv
->mc_io
, 0, dpbp_dev
->mc_handle
,
2872 dev_err(dev
, "dpbp_get_attributes() failed\n");
2875 priv
->bpid
= dpbp_attrs
.bpid
;
2880 dpbp_disable(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2883 dpbp_close(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2885 fsl_mc_object_free(dpbp_dev
);
2890 static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv
*priv
)
2892 dpaa2_eth_drain_pool(priv
);
2893 dpbp_disable(priv
->mc_io
, 0, priv
->dpbp_dev
->mc_handle
);
2894 dpbp_close(priv
->mc_io
, 0, priv
->dpbp_dev
->mc_handle
);
2895 fsl_mc_object_free(priv
->dpbp_dev
);
2898 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv
*priv
)
2900 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2901 struct dpni_buffer_layout buf_layout
= {0};
2905 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2906 * version, this number is not always provided correctly on rev1.
2907 * We need to check for both alternatives in this situation.
2909 if (priv
->dpni_attrs
.wriop_version
== DPAA2_WRIOP_VERSION(0, 0, 0) ||
2910 priv
->dpni_attrs
.wriop_version
== DPAA2_WRIOP_VERSION(1, 0, 0))
2911 rx_buf_align
= DPAA2_ETH_RX_BUF_ALIGN_REV1
;
2913 rx_buf_align
= DPAA2_ETH_RX_BUF_ALIGN
;
2915 /* We need to ensure that the buffer size seen by WRIOP is a multiple
2916 * of 64 or 256 bytes depending on the WRIOP version.
2918 priv
->rx_buf_size
= ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE
, rx_buf_align
);
2921 buf_layout
.private_data_size
= DPAA2_ETH_SWA_SIZE
;
2922 buf_layout
.pass_timestamp
= true;
2923 buf_layout
.pass_frame_status
= true;
2924 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE
|
2925 DPNI_BUF_LAYOUT_OPT_TIMESTAMP
|
2926 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
;
2927 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2928 DPNI_QUEUE_TX
, &buf_layout
);
2930 dev_err(dev
, "dpni_set_buffer_layout(TX) failed\n");
2934 /* tx-confirm buffer */
2935 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_TIMESTAMP
|
2936 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
;
2937 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2938 DPNI_QUEUE_TX_CONFIRM
, &buf_layout
);
2940 dev_err(dev
, "dpni_set_buffer_layout(TX_CONF) failed\n");
2944 /* Now that we've set our tx buffer layout, retrieve the minimum
2945 * required tx data offset.
2947 err
= dpni_get_tx_data_offset(priv
->mc_io
, 0, priv
->mc_token
,
2948 &priv
->tx_data_offset
);
2950 dev_err(dev
, "dpni_get_tx_data_offset() failed\n");
2954 if ((priv
->tx_data_offset
% 64) != 0)
2955 dev_warn(dev
, "Tx data offset (%d) not a multiple of 64B\n",
2956 priv
->tx_data_offset
);
2959 buf_layout
.pass_frame_status
= true;
2960 buf_layout
.pass_parser_result
= true;
2961 buf_layout
.data_align
= rx_buf_align
;
2962 buf_layout
.data_head_room
= dpaa2_eth_rx_head_room(priv
);
2963 buf_layout
.private_data_size
= 0;
2964 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_PARSER_RESULT
|
2965 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
|
2966 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN
|
2967 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM
|
2968 DPNI_BUF_LAYOUT_OPT_TIMESTAMP
;
2969 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2970 DPNI_QUEUE_RX
, &buf_layout
);
2972 dev_err(dev
, "dpni_set_buffer_layout(RX) failed\n");
2979 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2980 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
2982 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv
*priv
,
2983 struct dpaa2_eth_fq
*fq
,
2984 struct dpaa2_fd
*fd
, u8 prio
,
2985 u32 num_frames __always_unused
,
2986 int *frames_enqueued
)
2990 err
= dpaa2_io_service_enqueue_qd(fq
->channel
->dpio
,
2991 priv
->tx_qdid
, prio
,
2993 if (!err
&& frames_enqueued
)
2994 *frames_enqueued
= 1;
2998 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv
*priv
,
2999 struct dpaa2_eth_fq
*fq
,
3000 struct dpaa2_fd
*fd
,
3001 u8 prio
, u32 num_frames
,
3002 int *frames_enqueued
)
3006 err
= dpaa2_io_service_enqueue_multiple_fq(fq
->channel
->dpio
,
3013 if (frames_enqueued
)
3014 *frames_enqueued
= err
;
3018 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv
*priv
)
3020 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_ENQUEUE_FQID_VER_MAJOR
,
3021 DPNI_ENQUEUE_FQID_VER_MINOR
) < 0)
3022 priv
->enqueue
= dpaa2_eth_enqueue_qd
;
3024 priv
->enqueue
= dpaa2_eth_enqueue_fq_multiple
;
3027 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv
*priv
)
3029 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3030 struct dpni_link_cfg link_cfg
= {0};
3033 /* Get the default link options so we don't override other flags */
3034 err
= dpni_get_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &link_cfg
);
3036 dev_err(dev
, "dpni_get_link_cfg() failed\n");
3040 /* By default, enable both Rx and Tx pause frames */
3041 link_cfg
.options
|= DPNI_LINK_OPT_PAUSE
;
3042 link_cfg
.options
&= ~DPNI_LINK_OPT_ASYM_PAUSE
;
3043 err
= dpni_set_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &link_cfg
);
3045 dev_err(dev
, "dpni_set_link_cfg() failed\n");
3049 priv
->link_state
.options
= link_cfg
.options
;
3054 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv
*priv
)
3056 struct dpni_queue_id qid
= {0};
3057 struct dpaa2_eth_fq
*fq
;
3058 struct dpni_queue queue
;
3061 /* We only use Tx FQIDs for FQID-based enqueue, so check
3062 * if DPNI version supports it before updating FQIDs
3064 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_ENQUEUE_FQID_VER_MAJOR
,
3065 DPNI_ENQUEUE_FQID_VER_MINOR
) < 0)
3068 for (i
= 0; i
< priv
->num_fqs
; i
++) {
3070 if (fq
->type
!= DPAA2_TX_CONF_FQ
)
3072 for (j
= 0; j
< dpaa2_eth_tc_count(priv
); j
++) {
3073 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3074 DPNI_QUEUE_TX
, j
, fq
->flowid
,
3079 fq
->tx_fqid
[j
] = qid
.fqid
;
3080 if (fq
->tx_fqid
[j
] == 0)
3085 priv
->enqueue
= dpaa2_eth_enqueue_fq_multiple
;
3090 netdev_info(priv
->net_dev
,
3091 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3092 priv
->enqueue
= dpaa2_eth_enqueue_qd
;
3095 /* Configure ingress classification based on VLAN PCP */
3096 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv
*priv
)
3098 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3099 struct dpkg_profile_cfg kg_cfg
= {0};
3100 struct dpni_qos_tbl_cfg qos_cfg
= {0};
3101 struct dpni_rule_cfg key_params
;
3102 void *dma_mem
, *key
, *mask
;
3103 u8 key_size
= 2; /* VLAN TCI field */
3106 /* VLAN-based classification only makes sense if we have multiple
3108 * Also, we need to extract just the 3-bit PCP field from the VLAN
3109 * header and we can only do that by using a mask
3111 if (dpaa2_eth_tc_count(priv
) == 1 || !dpaa2_eth_fs_mask_enabled(priv
)) {
3112 dev_dbg(dev
, "VLAN-based QoS classification not supported\n");
3116 dma_mem
= kzalloc(DPAA2_CLASSIFIER_DMA_SIZE
, GFP_KERNEL
);
3120 kg_cfg
.num_extracts
= 1;
3121 kg_cfg
.extracts
[0].type
= DPKG_EXTRACT_FROM_HDR
;
3122 kg_cfg
.extracts
[0].extract
.from_hdr
.prot
= NET_PROT_VLAN
;
3123 kg_cfg
.extracts
[0].extract
.from_hdr
.type
= DPKG_FULL_FIELD
;
3124 kg_cfg
.extracts
[0].extract
.from_hdr
.field
= NH_FLD_VLAN_TCI
;
3126 err
= dpni_prepare_key_cfg(&kg_cfg
, dma_mem
);
3128 dev_err(dev
, "dpni_prepare_key_cfg failed\n");
3133 qos_cfg
.default_tc
= 0;
3134 qos_cfg
.discard_on_miss
= 0;
3135 qos_cfg
.key_cfg_iova
= dma_map_single(dev
, dma_mem
,
3136 DPAA2_CLASSIFIER_DMA_SIZE
,
3138 if (dma_mapping_error(dev
, qos_cfg
.key_cfg_iova
)) {
3139 dev_err(dev
, "QoS table DMA mapping failed\n");
3144 err
= dpni_set_qos_table(priv
->mc_io
, 0, priv
->mc_token
, &qos_cfg
);
3146 dev_err(dev
, "dpni_set_qos_table failed\n");
3150 /* Add QoS table entries */
3151 key
= kzalloc(key_size
* 2, GFP_KERNEL
);
3156 mask
= key
+ key_size
;
3157 *(__be16
*)mask
= cpu_to_be16(VLAN_PRIO_MASK
);
3159 key_params
.key_iova
= dma_map_single(dev
, key
, key_size
* 2,
3161 if (dma_mapping_error(dev
, key_params
.key_iova
)) {
3162 dev_err(dev
, "Qos table entry DMA mapping failed\n");
3167 key_params
.mask_iova
= key_params
.key_iova
+ key_size
;
3168 key_params
.key_size
= key_size
;
3170 /* We add rules for PCP-based distribution starting with highest
3171 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3172 * classes to accommodate all priority levels, the lowest ones end up
3173 * on TC 0 which was configured as default
3175 for (i
= dpaa2_eth_tc_count(priv
) - 1, pcp
= 7; i
>= 0; i
--, pcp
--) {
3176 *(__be16
*)key
= cpu_to_be16(pcp
<< VLAN_PRIO_SHIFT
);
3177 dma_sync_single_for_device(dev
, key_params
.key_iova
,
3178 key_size
* 2, DMA_TO_DEVICE
);
3180 err
= dpni_add_qos_entry(priv
->mc_io
, 0, priv
->mc_token
,
3183 dev_err(dev
, "dpni_add_qos_entry failed\n");
3184 dpni_clear_qos_table(priv
->mc_io
, 0, priv
->mc_token
);
3189 priv
->vlan_cls_enabled
= true;
3191 /* Table and key memory is not persistent, clean everything up after
3192 * configuration is finished
3195 dma_unmap_single(dev
, key_params
.key_iova
, key_size
* 2, DMA_TO_DEVICE
);
3199 dma_unmap_single(dev
, qos_cfg
.key_cfg_iova
, DPAA2_CLASSIFIER_DMA_SIZE
,
3207 /* Configure the DPNI object this interface is associated with */
3208 static int dpaa2_eth_setup_dpni(struct fsl_mc_device
*ls_dev
)
3210 struct device
*dev
= &ls_dev
->dev
;
3211 struct dpaa2_eth_priv
*priv
;
3212 struct net_device
*net_dev
;
3215 net_dev
= dev_get_drvdata(dev
);
3216 priv
= netdev_priv(net_dev
);
3218 /* get a handle for the DPNI object */
3219 err
= dpni_open(priv
->mc_io
, 0, ls_dev
->obj_desc
.id
, &priv
->mc_token
);
3221 dev_err(dev
, "dpni_open() failed\n");
3225 /* Check if we can work with this DPNI object */
3226 err
= dpni_get_api_version(priv
->mc_io
, 0, &priv
->dpni_ver_major
,
3227 &priv
->dpni_ver_minor
);
3229 dev_err(dev
, "dpni_get_api_version() failed\n");
3232 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_VER_MAJOR
, DPNI_VER_MINOR
) < 0) {
3233 dev_err(dev
, "DPNI version %u.%u not supported, need >= %u.%u\n",
3234 priv
->dpni_ver_major
, priv
->dpni_ver_minor
,
3235 DPNI_VER_MAJOR
, DPNI_VER_MINOR
);
3240 ls_dev
->mc_io
= priv
->mc_io
;
3241 ls_dev
->mc_handle
= priv
->mc_token
;
3243 err
= dpni_reset(priv
->mc_io
, 0, priv
->mc_token
);
3245 dev_err(dev
, "dpni_reset() failed\n");
3249 err
= dpni_get_attributes(priv
->mc_io
, 0, priv
->mc_token
,
3252 dev_err(dev
, "dpni_get_attributes() failed (err=%d)\n", err
);
3256 err
= dpaa2_eth_set_buffer_layout(priv
);
3260 dpaa2_eth_set_enqueue_mode(priv
);
3262 /* Enable pause frame support */
3263 if (dpaa2_eth_has_pause_support(priv
)) {
3264 err
= dpaa2_eth_set_pause(priv
);
3269 err
= dpaa2_eth_set_vlan_qos(priv
);
3270 if (err
&& err
!= -EOPNOTSUPP
)
3273 priv
->cls_rules
= devm_kcalloc(dev
, dpaa2_eth_fs_count(priv
),
3274 sizeof(struct dpaa2_eth_cls_rule
),
3276 if (!priv
->cls_rules
) {
3284 dpni_close(priv
->mc_io
, 0, priv
->mc_token
);
3289 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv
*priv
)
3293 err
= dpni_reset(priv
->mc_io
, 0, priv
->mc_token
);
3295 netdev_warn(priv
->net_dev
, "dpni_reset() failed (err %d)\n",
3298 dpni_close(priv
->mc_io
, 0, priv
->mc_token
);
3301 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv
*priv
,
3302 struct dpaa2_eth_fq
*fq
)
3304 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3305 struct dpni_queue queue
;
3306 struct dpni_queue_id qid
;
3309 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3310 DPNI_QUEUE_RX
, fq
->tc
, fq
->flowid
, &queue
, &qid
);
3312 dev_err(dev
, "dpni_get_queue(RX) failed\n");
3316 fq
->fqid
= qid
.fqid
;
3318 queue
.destination
.id
= fq
->channel
->dpcon_id
;
3319 queue
.destination
.type
= DPNI_DEST_DPCON
;
3320 queue
.destination
.priority
= 1;
3321 queue
.user_context
= (u64
)(uintptr_t)fq
;
3322 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
3323 DPNI_QUEUE_RX
, fq
->tc
, fq
->flowid
,
3324 DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
,
3327 dev_err(dev
, "dpni_set_queue(RX) failed\n");
3332 /* only once for each channel */
3336 err
= xdp_rxq_info_reg(&fq
->channel
->xdp_rxq
, priv
->net_dev
,
3339 dev_err(dev
, "xdp_rxq_info_reg failed\n");
3343 err
= xdp_rxq_info_reg_mem_model(&fq
->channel
->xdp_rxq
,
3344 MEM_TYPE_PAGE_ORDER0
, NULL
);
3346 dev_err(dev
, "xdp_rxq_info_reg_mem_model failed\n");
3353 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv
*priv
,
3354 struct dpaa2_eth_fq
*fq
)
3356 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3357 struct dpni_queue queue
;
3358 struct dpni_queue_id qid
;
3361 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3362 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3363 DPNI_QUEUE_TX
, i
, fq
->flowid
,
3366 dev_err(dev
, "dpni_get_queue(TX) failed\n");
3369 fq
->tx_fqid
[i
] = qid
.fqid
;
3372 /* All Tx queues belonging to the same flowid have the same qdbin */
3373 fq
->tx_qdbin
= qid
.qdbin
;
3375 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3376 DPNI_QUEUE_TX_CONFIRM
, 0, fq
->flowid
,
3379 dev_err(dev
, "dpni_get_queue(TX_CONF) failed\n");
3383 fq
->fqid
= qid
.fqid
;
3385 queue
.destination
.id
= fq
->channel
->dpcon_id
;
3386 queue
.destination
.type
= DPNI_DEST_DPCON
;
3387 queue
.destination
.priority
= 0;
3388 queue
.user_context
= (u64
)(uintptr_t)fq
;
3389 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
3390 DPNI_QUEUE_TX_CONFIRM
, 0, fq
->flowid
,
3391 DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
,
3394 dev_err(dev
, "dpni_set_queue(TX_CONF) failed\n");
3401 static int setup_rx_err_flow(struct dpaa2_eth_priv
*priv
,
3402 struct dpaa2_eth_fq
*fq
)
3404 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3405 struct dpni_queue q
= { { 0 } };
3406 struct dpni_queue_id qid
;
3407 u8 q_opt
= DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
;
3410 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3411 DPNI_QUEUE_RX_ERR
, 0, 0, &q
, &qid
);
3413 dev_err(dev
, "dpni_get_queue() failed (%d)\n", err
);
3417 fq
->fqid
= qid
.fqid
;
3419 q
.destination
.id
= fq
->channel
->dpcon_id
;
3420 q
.destination
.type
= DPNI_DEST_DPCON
;
3421 q
.destination
.priority
= 1;
3422 q
.user_context
= (u64
)(uintptr_t)fq
;
3423 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
3424 DPNI_QUEUE_RX_ERR
, 0, 0, q_opt
, &q
);
3426 dev_err(dev
, "dpni_set_queue() failed (%d)\n", err
);
3433 /* Supported header fields for Rx hash distribution key */
3434 static const struct dpaa2_eth_dist_fields dist_fields
[] = {
3437 .rxnfc_field
= RXH_L2DA
,
3438 .cls_prot
= NET_PROT_ETH
,
3439 .cls_field
= NH_FLD_ETH_DA
,
3440 .id
= DPAA2_ETH_DIST_ETHDST
,
3443 .cls_prot
= NET_PROT_ETH
,
3444 .cls_field
= NH_FLD_ETH_SA
,
3445 .id
= DPAA2_ETH_DIST_ETHSRC
,
3448 /* This is the last ethertype field parsed:
3449 * depending on frame format, it can be the MAC ethertype
3450 * or the VLAN etype.
3452 .cls_prot
= NET_PROT_ETH
,
3453 .cls_field
= NH_FLD_ETH_TYPE
,
3454 .id
= DPAA2_ETH_DIST_ETHTYPE
,
3458 .rxnfc_field
= RXH_VLAN
,
3459 .cls_prot
= NET_PROT_VLAN
,
3460 .cls_field
= NH_FLD_VLAN_TCI
,
3461 .id
= DPAA2_ETH_DIST_VLAN
,
3465 .rxnfc_field
= RXH_IP_SRC
,
3466 .cls_prot
= NET_PROT_IP
,
3467 .cls_field
= NH_FLD_IP_SRC
,
3468 .id
= DPAA2_ETH_DIST_IPSRC
,
3471 .rxnfc_field
= RXH_IP_DST
,
3472 .cls_prot
= NET_PROT_IP
,
3473 .cls_field
= NH_FLD_IP_DST
,
3474 .id
= DPAA2_ETH_DIST_IPDST
,
3477 .rxnfc_field
= RXH_L3_PROTO
,
3478 .cls_prot
= NET_PROT_IP
,
3479 .cls_field
= NH_FLD_IP_PROTO
,
3480 .id
= DPAA2_ETH_DIST_IPPROTO
,
3483 /* Using UDP ports, this is functionally equivalent to raw
3484 * byte pairs from L4 header.
3486 .rxnfc_field
= RXH_L4_B_0_1
,
3487 .cls_prot
= NET_PROT_UDP
,
3488 .cls_field
= NH_FLD_UDP_PORT_SRC
,
3489 .id
= DPAA2_ETH_DIST_L4SRC
,
3492 .rxnfc_field
= RXH_L4_B_2_3
,
3493 .cls_prot
= NET_PROT_UDP
,
3494 .cls_field
= NH_FLD_UDP_PORT_DST
,
3495 .id
= DPAA2_ETH_DIST_L4DST
,
3500 /* Configure the Rx hash key using the legacy API */
3501 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
3503 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3504 struct dpni_rx_tc_dist_cfg dist_cfg
;
3507 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
3509 dist_cfg
.key_cfg_iova
= key
;
3510 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
3511 dist_cfg
.dist_mode
= DPNI_DIST_MODE_HASH
;
3513 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3514 err
= dpni_set_rx_tc_dist(priv
->mc_io
, 0, priv
->mc_token
,
3517 dev_err(dev
, "dpni_set_rx_tc_dist failed\n");
3525 /* Configure the Rx hash key using the new API */
3526 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
3528 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3529 struct dpni_rx_dist_cfg dist_cfg
;
3532 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
3534 dist_cfg
.key_cfg_iova
= key
;
3535 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
3536 dist_cfg
.enable
= 1;
3538 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3540 err
= dpni_set_rx_hash_dist(priv
->mc_io
, 0, priv
->mc_token
,
3543 dev_err(dev
, "dpni_set_rx_hash_dist failed\n");
3547 /* If the flow steering / hashing key is shared between all
3548 * traffic classes, install it just once
3550 if (priv
->dpni_attrs
.options
& DPNI_OPT_SHARED_FS
)
3557 /* Configure the Rx flow classification key */
3558 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
3560 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3561 struct dpni_rx_dist_cfg dist_cfg
;
3564 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
3566 dist_cfg
.key_cfg_iova
= key
;
3567 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
3568 dist_cfg
.enable
= 1;
3570 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3572 err
= dpni_set_rx_fs_dist(priv
->mc_io
, 0, priv
->mc_token
,
3575 dev_err(dev
, "dpni_set_rx_fs_dist failed\n");
3579 /* If the flow steering / hashing key is shared between all
3580 * traffic classes, install it just once
3582 if (priv
->dpni_attrs
.options
& DPNI_OPT_SHARED_FS
)
3589 /* Size of the Rx flow classification key */
3590 int dpaa2_eth_cls_key_size(u64 fields
)
3594 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3595 if (!(fields
& dist_fields
[i
].id
))
3597 size
+= dist_fields
[i
].size
;
3603 /* Offset of header field in Rx classification key */
3604 int dpaa2_eth_cls_fld_off(int prot
, int field
)
3608 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3609 if (dist_fields
[i
].cls_prot
== prot
&&
3610 dist_fields
[i
].cls_field
== field
)
3612 off
+= dist_fields
[i
].size
;
3615 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3619 /* Prune unused fields from the classification rule.
3620 * Used when masking is not supported
3622 void dpaa2_eth_cls_trim_rule(void *key_mem
, u64 fields
)
3624 int off
= 0, new_off
= 0;
3627 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3628 size
= dist_fields
[i
].size
;
3629 if (dist_fields
[i
].id
& fields
) {
3630 memcpy(key_mem
+ new_off
, key_mem
+ off
, size
);
3637 /* Set Rx distribution (hash or flow classification) key
3638 * flags is a combination of RXH_ bits
3640 static int dpaa2_eth_set_dist_key(struct net_device
*net_dev
,
3641 enum dpaa2_eth_rx_dist type
, u64 flags
)
3643 struct device
*dev
= net_dev
->dev
.parent
;
3644 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3645 struct dpkg_profile_cfg cls_cfg
;
3646 u32 rx_hash_fields
= 0;
3647 dma_addr_t key_iova
;
3652 memset(&cls_cfg
, 0, sizeof(cls_cfg
));
3654 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3655 struct dpkg_extract
*key
=
3656 &cls_cfg
.extracts
[cls_cfg
.num_extracts
];
3658 /* For both Rx hashing and classification keys
3659 * we set only the selected fields.
3661 if (!(flags
& dist_fields
[i
].id
))
3663 if (type
== DPAA2_ETH_RX_DIST_HASH
)
3664 rx_hash_fields
|= dist_fields
[i
].rxnfc_field
;
3666 if (cls_cfg
.num_extracts
>= DPKG_MAX_NUM_OF_EXTRACTS
) {
3667 dev_err(dev
, "error adding key extraction rule, too many rules?\n");
3671 key
->type
= DPKG_EXTRACT_FROM_HDR
;
3672 key
->extract
.from_hdr
.prot
= dist_fields
[i
].cls_prot
;
3673 key
->extract
.from_hdr
.type
= DPKG_FULL_FIELD
;
3674 key
->extract
.from_hdr
.field
= dist_fields
[i
].cls_field
;
3675 cls_cfg
.num_extracts
++;
3678 dma_mem
= kzalloc(DPAA2_CLASSIFIER_DMA_SIZE
, GFP_KERNEL
);
3682 err
= dpni_prepare_key_cfg(&cls_cfg
, dma_mem
);
3684 dev_err(dev
, "dpni_prepare_key_cfg error %d\n", err
);
3688 /* Prepare for setting the rx dist */
3689 key_iova
= dma_map_single(dev
, dma_mem
, DPAA2_CLASSIFIER_DMA_SIZE
,
3691 if (dma_mapping_error(dev
, key_iova
)) {
3692 dev_err(dev
, "DMA mapping failed\n");
3697 if (type
== DPAA2_ETH_RX_DIST_HASH
) {
3698 if (dpaa2_eth_has_legacy_dist(priv
))
3699 err
= dpaa2_eth_config_legacy_hash_key(priv
, key_iova
);
3701 err
= dpaa2_eth_config_hash_key(priv
, key_iova
);
3703 err
= dpaa2_eth_config_cls_key(priv
, key_iova
);
3706 dma_unmap_single(dev
, key_iova
, DPAA2_CLASSIFIER_DMA_SIZE
,
3708 if (!err
&& type
== DPAA2_ETH_RX_DIST_HASH
)
3709 priv
->rx_hash_fields
= rx_hash_fields
;
3716 int dpaa2_eth_set_hash(struct net_device
*net_dev
, u64 flags
)
3718 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3722 if (!dpaa2_eth_hash_enabled(priv
))
3725 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++)
3726 if (dist_fields
[i
].rxnfc_field
& flags
)
3727 key
|= dist_fields
[i
].id
;
3729 return dpaa2_eth_set_dist_key(net_dev
, DPAA2_ETH_RX_DIST_HASH
, key
);
3732 int dpaa2_eth_set_cls(struct net_device
*net_dev
, u64 flags
)
3734 return dpaa2_eth_set_dist_key(net_dev
, DPAA2_ETH_RX_DIST_CLS
, flags
);
3737 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv
*priv
)
3739 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3742 /* Check if we actually support Rx flow classification */
3743 if (dpaa2_eth_has_legacy_dist(priv
)) {
3744 dev_dbg(dev
, "Rx cls not supported by current MC version\n");
3748 if (!dpaa2_eth_fs_enabled(priv
)) {
3749 dev_dbg(dev
, "Rx cls disabled in DPNI options\n");
3753 if (!dpaa2_eth_hash_enabled(priv
)) {
3754 dev_dbg(dev
, "Rx cls disabled for single queue DPNIs\n");
3758 /* If there is no support for masking in the classification table,
3759 * we don't set a default key, as it will depend on the rules
3760 * added by the user at runtime.
3762 if (!dpaa2_eth_fs_mask_enabled(priv
))
3765 err
= dpaa2_eth_set_cls(priv
->net_dev
, DPAA2_ETH_DIST_ALL
);
3770 priv
->rx_cls_enabled
= 1;
3775 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3776 * frame queues and channels
3778 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv
*priv
)
3780 struct net_device
*net_dev
= priv
->net_dev
;
3781 struct device
*dev
= net_dev
->dev
.parent
;
3782 struct dpni_pools_cfg pools_params
;
3783 struct dpni_error_cfg err_cfg
;
3787 pools_params
.num_dpbp
= 1;
3788 pools_params
.pools
[0].dpbp_id
= priv
->dpbp_dev
->obj_desc
.id
;
3789 pools_params
.pools
[0].backup_pool
= 0;
3790 pools_params
.pools
[0].buffer_size
= priv
->rx_buf_size
;
3791 err
= dpni_set_pools(priv
->mc_io
, 0, priv
->mc_token
, &pools_params
);
3793 dev_err(dev
, "dpni_set_pools() failed\n");
3797 /* have the interface implicitly distribute traffic based on
3798 * the default hash key
3800 err
= dpaa2_eth_set_hash(net_dev
, DPAA2_RXH_DEFAULT
);
3801 if (err
&& err
!= -EOPNOTSUPP
)
3802 dev_err(dev
, "Failed to configure hashing\n");
3804 /* Configure the flow classification key; it includes all
3805 * supported header fields and cannot be modified at runtime
3807 err
= dpaa2_eth_set_default_cls(priv
);
3808 if (err
&& err
!= -EOPNOTSUPP
)
3809 dev_err(dev
, "Failed to configure Rx classification key\n");
3811 /* Configure handling of error frames */
3812 err_cfg
.errors
= DPAA2_FAS_RX_ERR_MASK
;
3813 err_cfg
.set_frame_annotation
= 1;
3814 err_cfg
.error_action
= DPNI_ERROR_ACTION_DISCARD
;
3815 err
= dpni_set_errors_behavior(priv
->mc_io
, 0, priv
->mc_token
,
3818 dev_err(dev
, "dpni_set_errors_behavior failed\n");
3822 /* Configure Rx and Tx conf queues to generate CDANs */
3823 for (i
= 0; i
< priv
->num_fqs
; i
++) {
3824 switch (priv
->fq
[i
].type
) {
3826 err
= dpaa2_eth_setup_rx_flow(priv
, &priv
->fq
[i
]);
3828 case DPAA2_TX_CONF_FQ
:
3829 err
= dpaa2_eth_setup_tx_flow(priv
, &priv
->fq
[i
]);
3831 case DPAA2_RX_ERR_FQ
:
3832 err
= setup_rx_err_flow(priv
, &priv
->fq
[i
]);
3835 dev_err(dev
, "Invalid FQ type %d\n", priv
->fq
[i
].type
);
3842 err
= dpni_get_qdid(priv
->mc_io
, 0, priv
->mc_token
,
3843 DPNI_QUEUE_TX
, &priv
->tx_qdid
);
3845 dev_err(dev
, "dpni_get_qdid() failed\n");
3852 /* Allocate rings for storing incoming frame descriptors */
3853 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv
*priv
)
3855 struct net_device
*net_dev
= priv
->net_dev
;
3856 struct device
*dev
= net_dev
->dev
.parent
;
3859 for (i
= 0; i
< priv
->num_channels
; i
++) {
3860 priv
->channel
[i
]->store
=
3861 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE
, dev
);
3862 if (!priv
->channel
[i
]->store
) {
3863 netdev_err(net_dev
, "dpaa2_io_store_create() failed\n");
3871 for (i
= 0; i
< priv
->num_channels
; i
++) {
3872 if (!priv
->channel
[i
]->store
)
3874 dpaa2_io_store_destroy(priv
->channel
[i
]->store
);
3880 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv
*priv
)
3884 for (i
= 0; i
< priv
->num_channels
; i
++)
3885 dpaa2_io_store_destroy(priv
->channel
[i
]->store
);
3888 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv
*priv
)
3890 struct net_device
*net_dev
= priv
->net_dev
;
3891 struct device
*dev
= net_dev
->dev
.parent
;
3892 u8 mac_addr
[ETH_ALEN
], dpni_mac_addr
[ETH_ALEN
];
3895 /* Get firmware address, if any */
3896 err
= dpni_get_port_mac_addr(priv
->mc_io
, 0, priv
->mc_token
, mac_addr
);
3898 dev_err(dev
, "dpni_get_port_mac_addr() failed\n");
3902 /* Get DPNI attributes address, if any */
3903 err
= dpni_get_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
3906 dev_err(dev
, "dpni_get_primary_mac_addr() failed\n");
3910 /* First check if firmware has any address configured by bootloader */
3911 if (!is_zero_ether_addr(mac_addr
)) {
3912 /* If the DPMAC addr != DPNI addr, update it */
3913 if (!ether_addr_equal(mac_addr
, dpni_mac_addr
)) {
3914 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0,
3918 dev_err(dev
, "dpni_set_primary_mac_addr() failed\n");
3922 memcpy(net_dev
->dev_addr
, mac_addr
, net_dev
->addr_len
);
3923 } else if (is_zero_ether_addr(dpni_mac_addr
)) {
3924 /* No MAC address configured, fill in net_dev->dev_addr
3927 eth_hw_addr_random(net_dev
);
3928 dev_dbg_once(dev
, "device(s) have all-zero hwaddr, replaced with random\n");
3930 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
3933 dev_err(dev
, "dpni_set_primary_mac_addr() failed\n");
3937 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3938 * practical purposes, this will be our "permanent" mac address,
3939 * at least until the next reboot. This move will also permit
3940 * register_netdevice() to properly fill up net_dev->perm_addr.
3942 net_dev
->addr_assign_type
= NET_ADDR_PERM
;
3944 /* NET_ADDR_PERM is default, all we have to do is
3945 * fill in the device addr.
3947 memcpy(net_dev
->dev_addr
, dpni_mac_addr
, net_dev
->addr_len
);
3953 static int dpaa2_eth_netdev_init(struct net_device
*net_dev
)
3955 struct device
*dev
= net_dev
->dev
.parent
;
3956 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3957 u32 options
= priv
->dpni_attrs
.options
;
3958 u64 supported
= 0, not_supported
= 0;
3959 u8 bcast_addr
[ETH_ALEN
];
3963 net_dev
->netdev_ops
= &dpaa2_eth_ops
;
3964 net_dev
->ethtool_ops
= &dpaa2_ethtool_ops
;
3966 err
= dpaa2_eth_set_mac_addr(priv
);
3970 /* Explicitly add the broadcast address to the MAC filtering table */
3971 eth_broadcast_addr(bcast_addr
);
3972 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
, bcast_addr
);
3974 dev_err(dev
, "dpni_add_mac_addr() failed\n");
3978 /* Set MTU upper limit; lower limit is 68B (default value) */
3979 net_dev
->max_mtu
= DPAA2_ETH_MAX_MTU
;
3980 err
= dpni_set_max_frame_length(priv
->mc_io
, 0, priv
->mc_token
,
3983 dev_err(dev
, "dpni_set_max_frame_length() failed\n");
3987 /* Set actual number of queues in the net device */
3988 num_queues
= dpaa2_eth_queue_count(priv
);
3989 err
= netif_set_real_num_tx_queues(net_dev
, num_queues
);
3991 dev_err(dev
, "netif_set_real_num_tx_queues() failed\n");
3994 err
= netif_set_real_num_rx_queues(net_dev
, num_queues
);
3996 dev_err(dev
, "netif_set_real_num_rx_queues() failed\n");
4000 /* Capabilities listing */
4001 supported
|= IFF_LIVE_ADDR_CHANGE
;
4003 if (options
& DPNI_OPT_NO_MAC_FILTER
)
4004 not_supported
|= IFF_UNICAST_FLT
;
4006 supported
|= IFF_UNICAST_FLT
;
4008 net_dev
->priv_flags
|= supported
;
4009 net_dev
->priv_flags
&= ~not_supported
;
4012 net_dev
->features
= NETIF_F_RXCSUM
|
4013 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4014 NETIF_F_SG
| NETIF_F_HIGHDMA
|
4015 NETIF_F_LLTX
| NETIF_F_HW_TC
;
4016 net_dev
->hw_features
= net_dev
->features
;
4021 static int dpaa2_eth_poll_link_state(void *arg
)
4023 struct dpaa2_eth_priv
*priv
= (struct dpaa2_eth_priv
*)arg
;
4026 while (!kthread_should_stop()) {
4027 err
= dpaa2_eth_link_state_update(priv
);
4031 msleep(DPAA2_ETH_LINK_STATE_REFRESH
);
4037 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv
*priv
)
4039 struct fsl_mc_device
*dpni_dev
, *dpmac_dev
;
4040 struct dpaa2_mac
*mac
;
4043 dpni_dev
= to_fsl_mc_device(priv
->net_dev
->dev
.parent
);
4044 dpmac_dev
= fsl_mc_get_endpoint(dpni_dev
);
4045 if (IS_ERR_OR_NULL(dpmac_dev
) || dpmac_dev
->dev
.type
!= &fsl_mc_bus_dpmac_type
)
4048 if (dpaa2_mac_is_type_fixed(dpmac_dev
, priv
->mc_io
))
4051 mac
= kzalloc(sizeof(struct dpaa2_mac
), GFP_KERNEL
);
4055 mac
->mc_dev
= dpmac_dev
;
4056 mac
->mc_io
= priv
->mc_io
;
4057 mac
->net_dev
= priv
->net_dev
;
4059 err
= dpaa2_mac_connect(mac
);
4061 netdev_err(priv
->net_dev
, "Error connecting to the MAC endpoint\n");
4070 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv
*priv
)
4075 dpaa2_mac_disconnect(priv
->mac
);
4080 static irqreturn_t
dpni_irq0_handler_thread(int irq_num
, void *arg
)
4083 struct device
*dev
= (struct device
*)arg
;
4084 struct fsl_mc_device
*dpni_dev
= to_fsl_mc_device(dev
);
4085 struct net_device
*net_dev
= dev_get_drvdata(dev
);
4086 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
4089 err
= dpni_get_irq_status(dpni_dev
->mc_io
, 0, dpni_dev
->mc_handle
,
4090 DPNI_IRQ_INDEX
, &status
);
4091 if (unlikely(err
)) {
4092 netdev_err(net_dev
, "Can't get irq status (err %d)\n", err
);
4096 if (status
& DPNI_IRQ_EVENT_LINK_CHANGED
)
4097 dpaa2_eth_link_state_update(netdev_priv(net_dev
));
4099 if (status
& DPNI_IRQ_EVENT_ENDPOINT_CHANGED
) {
4100 dpaa2_eth_set_mac_addr(netdev_priv(net_dev
));
4101 dpaa2_eth_update_tx_fqids(priv
);
4105 dpaa2_eth_disconnect_mac(priv
);
4107 dpaa2_eth_connect_mac(priv
);
4114 static int dpaa2_eth_setup_irqs(struct fsl_mc_device
*ls_dev
)
4117 struct fsl_mc_device_irq
*irq
;
4119 err
= fsl_mc_allocate_irqs(ls_dev
);
4121 dev_err(&ls_dev
->dev
, "MC irqs allocation failed\n");
4125 irq
= ls_dev
->irqs
[0];
4126 err
= devm_request_threaded_irq(&ls_dev
->dev
, irq
->msi_desc
->irq
,
4127 NULL
, dpni_irq0_handler_thread
,
4128 IRQF_NO_SUSPEND
| IRQF_ONESHOT
,
4129 dev_name(&ls_dev
->dev
), &ls_dev
->dev
);
4131 dev_err(&ls_dev
->dev
, "devm_request_threaded_irq(): %d\n", err
);
4135 err
= dpni_set_irq_mask(ls_dev
->mc_io
, 0, ls_dev
->mc_handle
,
4136 DPNI_IRQ_INDEX
, DPNI_IRQ_EVENT_LINK_CHANGED
|
4137 DPNI_IRQ_EVENT_ENDPOINT_CHANGED
);
4139 dev_err(&ls_dev
->dev
, "dpni_set_irq_mask(): %d\n", err
);
4143 err
= dpni_set_irq_enable(ls_dev
->mc_io
, 0, ls_dev
->mc_handle
,
4146 dev_err(&ls_dev
->dev
, "dpni_set_irq_enable(): %d\n", err
);
4153 devm_free_irq(&ls_dev
->dev
, irq
->msi_desc
->irq
, &ls_dev
->dev
);
4155 fsl_mc_free_irqs(ls_dev
);
4160 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv
*priv
)
4163 struct dpaa2_eth_channel
*ch
;
4165 for (i
= 0; i
< priv
->num_channels
; i
++) {
4166 ch
= priv
->channel
[i
];
4167 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4168 netif_napi_add(priv
->net_dev
, &ch
->napi
, dpaa2_eth_poll
,
4173 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv
*priv
)
4176 struct dpaa2_eth_channel
*ch
;
4178 for (i
= 0; i
< priv
->num_channels
; i
++) {
4179 ch
= priv
->channel
[i
];
4180 netif_napi_del(&ch
->napi
);
4184 static int dpaa2_eth_probe(struct fsl_mc_device
*dpni_dev
)
4187 struct net_device
*net_dev
= NULL
;
4188 struct dpaa2_eth_priv
*priv
= NULL
;
4191 dev
= &dpni_dev
->dev
;
4194 net_dev
= alloc_etherdev_mq(sizeof(*priv
), DPAA2_ETH_MAX_NETDEV_QUEUES
);
4196 dev_err(dev
, "alloc_etherdev_mq() failed\n");
4200 SET_NETDEV_DEV(net_dev
, dev
);
4201 dev_set_drvdata(dev
, net_dev
);
4203 priv
= netdev_priv(net_dev
);
4204 priv
->net_dev
= net_dev
;
4206 priv
->iommu_domain
= iommu_get_domain_for_dev(dev
);
4208 priv
->tx_tstamp_type
= HWTSTAMP_TX_OFF
;
4209 priv
->rx_tstamp
= false;
4211 priv
->dpaa2_ptp_wq
= alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4212 if (!priv
->dpaa2_ptp_wq
) {
4217 INIT_WORK(&priv
->tx_onestep_tstamp
, dpaa2_eth_tx_onestep_tstamp
);
4219 skb_queue_head_init(&priv
->tx_skbs
);
4221 /* Obtain a MC portal */
4222 err
= fsl_mc_portal_allocate(dpni_dev
, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL
,
4226 err
= -EPROBE_DEFER
;
4228 dev_err(dev
, "MC portal allocation failed\n");
4229 goto err_portal_alloc
;
4232 /* MC objects initialization and configuration */
4233 err
= dpaa2_eth_setup_dpni(dpni_dev
);
4235 goto err_dpni_setup
;
4237 err
= dpaa2_eth_setup_dpio(priv
);
4239 goto err_dpio_setup
;
4241 dpaa2_eth_setup_fqs(priv
);
4243 err
= dpaa2_eth_setup_dpbp(priv
);
4245 goto err_dpbp_setup
;
4247 err
= dpaa2_eth_bind_dpni(priv
);
4251 /* Add a NAPI context for each channel */
4252 dpaa2_eth_add_ch_napi(priv
);
4254 /* Percpu statistics */
4255 priv
->percpu_stats
= alloc_percpu(*priv
->percpu_stats
);
4256 if (!priv
->percpu_stats
) {
4257 dev_err(dev
, "alloc_percpu(percpu_stats) failed\n");
4259 goto err_alloc_percpu_stats
;
4261 priv
->percpu_extras
= alloc_percpu(*priv
->percpu_extras
);
4262 if (!priv
->percpu_extras
) {
4263 dev_err(dev
, "alloc_percpu(percpu_extras) failed\n");
4265 goto err_alloc_percpu_extras
;
4268 priv
->sgt_cache
= alloc_percpu(*priv
->sgt_cache
);
4269 if (!priv
->sgt_cache
) {
4270 dev_err(dev
, "alloc_percpu(sgt_cache) failed\n");
4272 goto err_alloc_sgt_cache
;
4275 err
= dpaa2_eth_netdev_init(net_dev
);
4277 goto err_netdev_init
;
4279 /* Configure checksum offload based on current interface flags */
4280 err
= dpaa2_eth_set_rx_csum(priv
, !!(net_dev
->features
& NETIF_F_RXCSUM
));
4284 err
= dpaa2_eth_set_tx_csum(priv
,
4285 !!(net_dev
->features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)));
4289 err
= dpaa2_eth_alloc_rings(priv
);
4291 goto err_alloc_rings
;
4293 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4294 if (dpaa2_eth_has_pause_support(priv
) && priv
->vlan_cls_enabled
) {
4295 priv
->dcbx_mode
= DCB_CAP_DCBX_HOST
| DCB_CAP_DCBX_VER_IEEE
;
4296 net_dev
->dcbnl_ops
= &dpaa2_eth_dcbnl_ops
;
4298 dev_dbg(dev
, "PFC not supported\n");
4302 err
= dpaa2_eth_setup_irqs(dpni_dev
);
4304 netdev_warn(net_dev
, "Failed to set link interrupt, fall back to polling\n");
4305 priv
->poll_thread
= kthread_run(dpaa2_eth_poll_link_state
, priv
,
4306 "%s_poll_link", net_dev
->name
);
4307 if (IS_ERR(priv
->poll_thread
)) {
4308 dev_err(dev
, "Error starting polling thread\n");
4309 goto err_poll_thread
;
4311 priv
->do_link_poll
= true;
4314 err
= dpaa2_eth_connect_mac(priv
);
4316 goto err_connect_mac
;
4318 err
= dpaa2_eth_dl_register(priv
);
4320 goto err_dl_register
;
4322 err
= dpaa2_eth_dl_traps_register(priv
);
4324 goto err_dl_trap_register
;
4326 err
= dpaa2_eth_dl_port_add(priv
);
4328 goto err_dl_port_add
;
4330 err
= register_netdev(net_dev
);
4332 dev_err(dev
, "register_netdev() failed\n");
4333 goto err_netdev_reg
;
4336 #ifdef CONFIG_DEBUG_FS
4337 dpaa2_dbg_add(priv
);
4340 dev_info(dev
, "Probed interface %s\n", net_dev
->name
);
4344 dpaa2_eth_dl_port_del(priv
);
4346 dpaa2_eth_dl_traps_unregister(priv
);
4347 err_dl_trap_register
:
4348 dpaa2_eth_dl_unregister(priv
);
4350 dpaa2_eth_disconnect_mac(priv
);
4352 if (priv
->do_link_poll
)
4353 kthread_stop(priv
->poll_thread
);
4355 fsl_mc_free_irqs(dpni_dev
);
4357 dpaa2_eth_free_rings(priv
);
4361 free_percpu(priv
->sgt_cache
);
4362 err_alloc_sgt_cache
:
4363 free_percpu(priv
->percpu_extras
);
4364 err_alloc_percpu_extras
:
4365 free_percpu(priv
->percpu_stats
);
4366 err_alloc_percpu_stats
:
4367 dpaa2_eth_del_ch_napi(priv
);
4369 dpaa2_eth_free_dpbp(priv
);
4371 dpaa2_eth_free_dpio(priv
);
4373 dpaa2_eth_free_dpni(priv
);
4375 fsl_mc_portal_free(priv
->mc_io
);
4377 destroy_workqueue(priv
->dpaa2_ptp_wq
);
4379 dev_set_drvdata(dev
, NULL
);
4380 free_netdev(net_dev
);
4385 static int dpaa2_eth_remove(struct fsl_mc_device
*ls_dev
)
4388 struct net_device
*net_dev
;
4389 struct dpaa2_eth_priv
*priv
;
4392 net_dev
= dev_get_drvdata(dev
);
4393 priv
= netdev_priv(net_dev
);
4395 #ifdef CONFIG_DEBUG_FS
4396 dpaa2_dbg_remove(priv
);
4399 dpaa2_eth_disconnect_mac(priv
);
4402 unregister_netdev(net_dev
);
4404 dpaa2_eth_dl_port_del(priv
);
4405 dpaa2_eth_dl_traps_unregister(priv
);
4406 dpaa2_eth_dl_unregister(priv
);
4408 if (priv
->do_link_poll
)
4409 kthread_stop(priv
->poll_thread
);
4411 fsl_mc_free_irqs(ls_dev
);
4413 dpaa2_eth_free_rings(priv
);
4414 free_percpu(priv
->sgt_cache
);
4415 free_percpu(priv
->percpu_stats
);
4416 free_percpu(priv
->percpu_extras
);
4418 dpaa2_eth_del_ch_napi(priv
);
4419 dpaa2_eth_free_dpbp(priv
);
4420 dpaa2_eth_free_dpio(priv
);
4421 dpaa2_eth_free_dpni(priv
);
4423 fsl_mc_portal_free(priv
->mc_io
);
4425 free_netdev(net_dev
);
4427 dev_dbg(net_dev
->dev
.parent
, "Removed interface %s\n", net_dev
->name
);
4432 static const struct fsl_mc_device_id dpaa2_eth_match_id_table
[] = {
4434 .vendor
= FSL_MC_VENDOR_FREESCALE
,
4439 MODULE_DEVICE_TABLE(fslmc
, dpaa2_eth_match_id_table
);
4441 static struct fsl_mc_driver dpaa2_eth_driver
= {
4443 .name
= KBUILD_MODNAME
,
4444 .owner
= THIS_MODULE
,
4446 .probe
= dpaa2_eth_probe
,
4447 .remove
= dpaa2_eth_remove
,
4448 .match_id_table
= dpaa2_eth_match_id_table
4451 static int __init
dpaa2_eth_driver_init(void)
4455 dpaa2_eth_dbg_init();
4456 err
= fsl_mc_driver_register(&dpaa2_eth_driver
);
4458 dpaa2_eth_dbg_exit();
4465 static void __exit
dpaa2_eth_driver_exit(void)
4467 dpaa2_eth_dbg_exit();
4468 fsl_mc_driver_unregister(&dpaa2_eth_driver
);
4471 module_init(dpaa2_eth_driver_init
);
4472 module_exit(dpaa2_eth_driver_exit
);