WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / freescale / dpaa / dpaa_eth.c
blob4360ce4d3fb6a1a9a4be2796a01c848f479251db
1 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
2 * Copyright 2020 NXP
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 * * Redistributions of source code must retain the above copyright
7 * notice, this list of conditions and the following disclaimer.
8 * * Redistributions in binary form must reproduce the above copyright
9 * notice, this list of conditions and the following disclaimer in the
10 * documentation and/or other materials provided with the distribution.
11 * * Neither the name of Freescale Semiconductor nor the
12 * names of its contributors may be used to endorse or promote products
13 * derived from this software without specific prior written permission.
15 * ALTERNATIVELY, this software may be distributed under the terms of the
16 * GNU General Public License ("GPL") as published by the Free Software
17 * Foundation, either version 2 of that License or (at your option) any
18 * later version.
20 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
21 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
22 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
24 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
27 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/init.h>
35 #include <linux/module.h>
36 #include <linux/of_platform.h>
37 #include <linux/of_mdio.h>
38 #include <linux/of_net.h>
39 #include <linux/io.h>
40 #include <linux/if_arp.h>
41 #include <linux/if_vlan.h>
42 #include <linux/icmp.h>
43 #include <linux/ip.h>
44 #include <linux/ipv6.h>
45 #include <linux/udp.h>
46 #include <linux/tcp.h>
47 #include <linux/net.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/if_ether.h>
51 #include <linux/highmem.h>
52 #include <linux/percpu.h>
53 #include <linux/dma-mapping.h>
54 #include <linux/sort.h>
55 #include <linux/phy_fixed.h>
56 #include <linux/bpf.h>
57 #include <linux/bpf_trace.h>
58 #include <soc/fsl/bman.h>
59 #include <soc/fsl/qman.h>
60 #include "fman.h"
61 #include "fman_port.h"
62 #include "mac.h"
63 #include "dpaa_eth.h"
65 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files
66 * using trace events only need to #include <trace/events/sched.h>
68 #define CREATE_TRACE_POINTS
69 #include "dpaa_eth_trace.h"
71 static int debug = -1;
72 module_param(debug, int, 0444);
73 MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)");
75 static u16 tx_timeout = 1000;
76 module_param(tx_timeout, ushort, 0444);
77 MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
79 #define FM_FD_STAT_RX_ERRORS \
80 (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \
81 FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \
82 FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \
83 FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \
84 FM_FD_ERR_PRS_HDR_ERR)
86 #define FM_FD_STAT_TX_ERRORS \
87 (FM_FD_ERR_UNSUPPORTED_FORMAT | \
88 FM_FD_ERR_LENGTH | FM_FD_ERR_DMA)
90 #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
91 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
92 NETIF_MSG_IFDOWN | NETIF_MSG_HW)
94 #define DPAA_INGRESS_CS_THRESHOLD 0x10000000
95 /* Ingress congestion threshold on FMan ports
96 * The size in bytes of the ingress tail-drop threshold on FMan ports.
97 * Traffic piling up above this value will be rejected by QMan and discarded
98 * by FMan.
101 /* Size in bytes of the FQ taildrop threshold */
102 #define DPAA_FQ_TD 0x200000
104 #define DPAA_CS_THRESHOLD_1G 0x06000000
105 /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000
106 * The size in bytes of the egress Congestion State notification threshold on
107 * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a
108 * tight loop (e.g. by sending UDP datagrams at "while(1) speed"),
109 * and the larger the frame size, the more acute the problem.
110 * So we have to find a balance between these factors:
111 * - avoiding the device staying congested for a prolonged time (risking
112 * the netdev watchdog to fire - see also the tx_timeout module param);
113 * - affecting performance of protocols such as TCP, which otherwise
114 * behave well under the congestion notification mechanism;
115 * - preventing the Tx cores from tightly-looping (as if the congestion
116 * threshold was too low to be effective);
117 * - running out of memory if the CS threshold is set too high.
120 #define DPAA_CS_THRESHOLD_10G 0x10000000
121 /* The size in bytes of the egress Congestion State notification threshold on
122 * 10G ports, range 0x1000 .. 0x10000000
125 /* Largest value that the FQD's OAL field can hold */
126 #define FSL_QMAN_MAX_OAL 127
128 /* Default alignment for start of data in an Rx FD */
129 #ifdef CONFIG_DPAA_ERRATUM_A050385
130 /* aligning data start to 64 avoids DMA transaction splits, unless the buffer
131 * is crossing a 4k page boundary
133 #define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16)
134 /* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
135 * crossings; also, all SG fragments except the last must have a size multiple
136 * of 256 to avoid DMA transaction splits
138 #define DPAA_A050385_ALIGN 256
139 #define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
140 DPAA_A050385_ALIGN : 16)
141 #else
142 #define DPAA_FD_DATA_ALIGNMENT 16
143 #define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
144 #endif
146 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
147 #define DPAA_SGT_SIZE 256
149 /* Values for the L3R field of the FM Parse Results
151 /* L3 Type field: First IP Present IPv4 */
152 #define FM_L3_PARSE_RESULT_IPV4 0x8000
153 /* L3 Type field: First IP Present IPv6 */
154 #define FM_L3_PARSE_RESULT_IPV6 0x4000
155 /* Values for the L4R field of the FM Parse Results */
156 /* L4 Type field: UDP */
157 #define FM_L4_PARSE_RESULT_UDP 0x40
158 /* L4 Type field: TCP */
159 #define FM_L4_PARSE_RESULT_TCP 0x20
161 /* FD status field indicating whether the FM Parser has attempted to validate
162 * the L4 csum of the frame.
163 * Note that having this bit set doesn't necessarily imply that the checksum
164 * is valid. One would have to check the parse results to find that out.
166 #define FM_FD_STAT_L4CV 0x00000004
168 #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */
169 #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */
171 #define FSL_DPAA_BPID_INV 0xff
172 #define FSL_DPAA_ETH_MAX_BUF_COUNT 128
173 #define FSL_DPAA_ETH_REFILL_THRESHOLD 80
175 #define DPAA_TX_PRIV_DATA_SIZE 16
176 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
177 #define DPAA_TIME_STAMP_SIZE 8
178 #define DPAA_HASH_RESULTS_SIZE 8
179 #define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
180 + DPAA_HASH_RESULTS_SIZE)
181 #define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
182 XDP_PACKET_HEADROOM - DPAA_HWA_SIZE)
183 #ifdef CONFIG_DPAA_ERRATUM_A050385
184 #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
185 #define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
186 DPAA_RX_PRIV_DATA_A050385_SIZE : \
187 DPAA_RX_PRIV_DATA_DEFAULT_SIZE)
188 #else
189 #define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE
190 #endif
192 #define DPAA_ETH_PCD_RXQ_NUM 128
194 #define DPAA_ENQUEUE_RETRIES 100000
196 enum port_type {RX, TX};
198 struct fm_port_fqs {
199 struct dpaa_fq *tx_defq;
200 struct dpaa_fq *tx_errq;
201 struct dpaa_fq *rx_defq;
202 struct dpaa_fq *rx_errq;
203 struct dpaa_fq *rx_pcdq;
206 /* All the dpa bps in use at any moment */
207 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
209 #define DPAA_BP_RAW_SIZE 4096
211 #ifdef CONFIG_DPAA_ERRATUM_A050385
212 #define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
213 ~(DPAA_A050385_ALIGN - 1))
214 #else
215 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
216 #endif
218 static int dpaa_max_frm;
220 static int dpaa_rx_extra_headroom;
222 #define dpaa_get_max_mtu() \
223 (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
225 static int dpaa_netdev_init(struct net_device *net_dev,
226 const struct net_device_ops *dpaa_ops,
227 u16 tx_timeout)
229 struct dpaa_priv *priv = netdev_priv(net_dev);
230 struct device *dev = net_dev->dev.parent;
231 struct dpaa_percpu_priv *percpu_priv;
232 const u8 *mac_addr;
233 int i, err;
235 /* Although we access another CPU's private data here
236 * we do it at initialization so it is safe
238 for_each_possible_cpu(i) {
239 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
240 percpu_priv->net_dev = net_dev;
243 net_dev->netdev_ops = dpaa_ops;
244 mac_addr = priv->mac_dev->addr;
246 net_dev->mem_start = priv->mac_dev->res->start;
247 net_dev->mem_end = priv->mac_dev->res->end;
249 net_dev->min_mtu = ETH_MIN_MTU;
250 net_dev->max_mtu = dpaa_get_max_mtu();
252 net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
253 NETIF_F_LLTX | NETIF_F_RXHASH);
255 net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA;
256 /* The kernels enables GSO automatically, if we declare NETIF_F_SG.
257 * For conformity, we'll still declare GSO explicitly.
259 net_dev->features |= NETIF_F_GSO;
260 net_dev->features |= NETIF_F_RXCSUM;
262 net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
263 /* we do not want shared skbs on TX */
264 net_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
266 net_dev->features |= net_dev->hw_features;
267 net_dev->vlan_features = net_dev->features;
269 if (is_valid_ether_addr(mac_addr)) {
270 memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
271 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
272 } else {
273 eth_hw_addr_random(net_dev);
274 err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
275 (enet_addr_t *)net_dev->dev_addr);
276 if (err) {
277 dev_err(dev, "Failed to set random MAC address\n");
278 return -EINVAL;
280 dev_info(dev, "Using random MAC address: %pM\n",
281 net_dev->dev_addr);
284 net_dev->ethtool_ops = &dpaa_ethtool_ops;
286 net_dev->needed_headroom = priv->tx_headroom;
287 net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
289 /* start without the RUNNING flag, phylib controls it later */
290 netif_carrier_off(net_dev);
292 err = register_netdev(net_dev);
293 if (err < 0) {
294 dev_err(dev, "register_netdev() = %d\n", err);
295 return err;
298 return 0;
301 static int dpaa_stop(struct net_device *net_dev)
303 struct mac_device *mac_dev;
304 struct dpaa_priv *priv;
305 int i, err, error;
307 priv = netdev_priv(net_dev);
308 mac_dev = priv->mac_dev;
310 netif_tx_stop_all_queues(net_dev);
311 /* Allow the Fman (Tx) port to process in-flight frames before we
312 * try switching it off.
314 msleep(200);
316 err = mac_dev->stop(mac_dev);
317 if (err < 0)
318 netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
319 err);
321 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
322 error = fman_port_disable(mac_dev->port[i]);
323 if (error)
324 err = error;
327 if (net_dev->phydev)
328 phy_disconnect(net_dev->phydev);
329 net_dev->phydev = NULL;
331 msleep(200);
333 return err;
336 static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
338 struct dpaa_percpu_priv *percpu_priv;
339 const struct dpaa_priv *priv;
341 priv = netdev_priv(net_dev);
342 percpu_priv = this_cpu_ptr(priv->percpu_priv);
344 netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n",
345 jiffies_to_msecs(jiffies - dev_trans_start(net_dev)));
347 percpu_priv->stats.tx_errors++;
350 /* Calculates the statistics for the given device by adding the statistics
351 * collected by each CPU.
353 static void dpaa_get_stats64(struct net_device *net_dev,
354 struct rtnl_link_stats64 *s)
356 int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64);
357 struct dpaa_priv *priv = netdev_priv(net_dev);
358 struct dpaa_percpu_priv *percpu_priv;
359 u64 *netstats = (u64 *)s;
360 u64 *cpustats;
361 int i, j;
363 for_each_possible_cpu(i) {
364 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
366 cpustats = (u64 *)&percpu_priv->stats;
368 /* add stats from all CPUs */
369 for (j = 0; j < numstats; j++)
370 netstats[j] += cpustats[j];
374 static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
375 void *type_data)
377 struct dpaa_priv *priv = netdev_priv(net_dev);
378 struct tc_mqprio_qopt *mqprio = type_data;
379 u8 num_tc;
380 int i;
382 if (type != TC_SETUP_QDISC_MQPRIO)
383 return -EOPNOTSUPP;
385 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
386 num_tc = mqprio->num_tc;
388 if (num_tc == priv->num_tc)
389 return 0;
391 if (!num_tc) {
392 netdev_reset_tc(net_dev);
393 goto out;
396 if (num_tc > DPAA_TC_NUM) {
397 netdev_err(net_dev, "Too many traffic classes: max %d supported.\n",
398 DPAA_TC_NUM);
399 return -EINVAL;
402 netdev_set_num_tc(net_dev, num_tc);
404 for (i = 0; i < num_tc; i++)
405 netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
406 i * DPAA_TC_TXQ_NUM);
408 out:
409 priv->num_tc = num_tc ? : 1;
410 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
411 return 0;
414 static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev)
416 struct dpaa_eth_data *eth_data;
417 struct device *dpaa_dev;
418 struct mac_device *mac_dev;
420 dpaa_dev = &pdev->dev;
421 eth_data = dpaa_dev->platform_data;
422 if (!eth_data) {
423 dev_err(dpaa_dev, "eth_data missing\n");
424 return ERR_PTR(-ENODEV);
426 mac_dev = eth_data->mac_dev;
427 if (!mac_dev) {
428 dev_err(dpaa_dev, "mac_dev missing\n");
429 return ERR_PTR(-EINVAL);
432 return mac_dev;
435 static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
437 const struct dpaa_priv *priv;
438 struct mac_device *mac_dev;
439 struct sockaddr old_addr;
440 int err;
442 priv = netdev_priv(net_dev);
444 memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN);
446 err = eth_mac_addr(net_dev, addr);
447 if (err < 0) {
448 netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err);
449 return err;
452 mac_dev = priv->mac_dev;
454 err = mac_dev->change_addr(mac_dev->fman_mac,
455 (enet_addr_t *)net_dev->dev_addr);
456 if (err < 0) {
457 netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
458 err);
459 /* reverting to previous address */
460 eth_mac_addr(net_dev, &old_addr);
462 return err;
465 return 0;
468 static void dpaa_set_rx_mode(struct net_device *net_dev)
470 const struct dpaa_priv *priv;
471 int err;
473 priv = netdev_priv(net_dev);
475 if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) {
476 priv->mac_dev->promisc = !priv->mac_dev->promisc;
477 err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac,
478 priv->mac_dev->promisc);
479 if (err < 0)
480 netif_err(priv, drv, net_dev,
481 "mac_dev->set_promisc() = %d\n",
482 err);
485 if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
486 priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
487 err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
488 priv->mac_dev->allmulti);
489 if (err < 0)
490 netif_err(priv, drv, net_dev,
491 "mac_dev->set_allmulti() = %d\n",
492 err);
495 err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
496 if (err < 0)
497 netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
498 err);
501 static struct dpaa_bp *dpaa_bpid2pool(int bpid)
503 if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS))
504 return NULL;
506 return dpaa_bp_array[bpid];
509 /* checks if this bpool is already allocated */
510 static bool dpaa_bpid2pool_use(int bpid)
512 if (dpaa_bpid2pool(bpid)) {
513 refcount_inc(&dpaa_bp_array[bpid]->refs);
514 return true;
517 return false;
520 /* called only once per bpid by dpaa_bp_alloc_pool() */
521 static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp)
523 dpaa_bp_array[bpid] = dpaa_bp;
524 refcount_set(&dpaa_bp->refs, 1);
527 static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp)
529 int err;
531 if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) {
532 pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n",
533 __func__);
534 return -EINVAL;
537 /* If the pool is already specified, we only create one per bpid */
538 if (dpaa_bp->bpid != FSL_DPAA_BPID_INV &&
539 dpaa_bpid2pool_use(dpaa_bp->bpid))
540 return 0;
542 if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) {
543 dpaa_bp->pool = bman_new_pool();
544 if (!dpaa_bp->pool) {
545 pr_err("%s: bman_new_pool() failed\n",
546 __func__);
547 return -ENODEV;
550 dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool);
553 if (dpaa_bp->seed_cb) {
554 err = dpaa_bp->seed_cb(dpaa_bp);
555 if (err)
556 goto pool_seed_failed;
559 dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp);
561 return 0;
563 pool_seed_failed:
564 pr_err("%s: pool seeding failed\n", __func__);
565 bman_free_pool(dpaa_bp->pool);
567 return err;
570 /* remove and free all the buffers from the given buffer pool */
571 static void dpaa_bp_drain(struct dpaa_bp *bp)
573 u8 num = 8;
574 int ret;
576 do {
577 struct bm_buffer bmb[8];
578 int i;
580 ret = bman_acquire(bp->pool, bmb, num);
581 if (ret < 0) {
582 if (num == 8) {
583 /* we have less than 8 buffers left;
584 * drain them one by one
586 num = 1;
587 ret = 1;
588 continue;
589 } else {
590 /* Pool is fully drained */
591 break;
595 if (bp->free_buf_cb)
596 for (i = 0; i < num; i++)
597 bp->free_buf_cb(bp, &bmb[i]);
598 } while (ret > 0);
601 static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
603 struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid);
605 /* the mapping between bpid and dpaa_bp is done very late in the
606 * allocation procedure; if something failed before the mapping, the bp
607 * was not configured, therefore we don't need the below instructions
609 if (!bp)
610 return;
612 if (!refcount_dec_and_test(&bp->refs))
613 return;
615 if (bp->free_buf_cb)
616 dpaa_bp_drain(bp);
618 dpaa_bp_array[bp->bpid] = NULL;
619 bman_free_pool(bp->pool);
622 static void dpaa_bps_free(struct dpaa_priv *priv)
624 dpaa_bp_free(priv->dpaa_bp);
627 /* Use multiple WQs for FQ assignment:
628 * - Tx Confirmation queues go to WQ1.
629 * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance
630 * to be scheduled, in case there are many more FQs in WQ6).
631 * - Rx Default goes to WQ6.
632 * - Tx queues go to different WQs depending on their priority. Equal
633 * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and
634 * WQ0 (highest priority).
635 * This ensures that Tx-confirmed buffers are timely released. In particular,
636 * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they
637 * are greatly outnumbered by other FQs in the system, while
638 * dequeue scheduling is round-robin.
640 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
642 switch (fq->fq_type) {
643 case FQ_TYPE_TX_CONFIRM:
644 case FQ_TYPE_TX_CONF_MQ:
645 fq->wq = 1;
646 break;
647 case FQ_TYPE_RX_ERROR:
648 case FQ_TYPE_TX_ERROR:
649 fq->wq = 5;
650 break;
651 case FQ_TYPE_RX_DEFAULT:
652 case FQ_TYPE_RX_PCD:
653 fq->wq = 6;
654 break;
655 case FQ_TYPE_TX:
656 switch (idx / DPAA_TC_TXQ_NUM) {
657 case 0:
658 /* Low priority (best effort) */
659 fq->wq = 6;
660 break;
661 case 1:
662 /* Medium priority */
663 fq->wq = 2;
664 break;
665 case 2:
666 /* High priority */
667 fq->wq = 1;
668 break;
669 case 3:
670 /* Very high priority */
671 fq->wq = 0;
672 break;
673 default:
674 WARN(1, "Too many TX FQs: more than %d!\n",
675 DPAA_ETH_TXQ_NUM);
677 break;
678 default:
679 WARN(1, "Invalid FQ type %d for FQID %d!\n",
680 fq->fq_type, fq->fqid);
684 static struct dpaa_fq *dpaa_fq_alloc(struct device *dev,
685 u32 start, u32 count,
686 struct list_head *list,
687 enum dpaa_fq_type fq_type)
689 struct dpaa_fq *dpaa_fq;
690 int i;
692 dpaa_fq = devm_kcalloc(dev, count, sizeof(*dpaa_fq),
693 GFP_KERNEL);
694 if (!dpaa_fq)
695 return NULL;
697 for (i = 0; i < count; i++) {
698 dpaa_fq[i].fq_type = fq_type;
699 dpaa_fq[i].fqid = start ? start + i : 0;
700 list_add_tail(&dpaa_fq[i].list, list);
703 for (i = 0; i < count; i++)
704 dpaa_assign_wq(dpaa_fq + i, i);
706 return dpaa_fq;
709 static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
710 struct fm_port_fqs *port_fqs)
712 struct dpaa_fq *dpaa_fq;
713 u32 fq_base, fq_base_aligned, i;
715 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR);
716 if (!dpaa_fq)
717 goto fq_alloc_failed;
719 port_fqs->rx_errq = &dpaa_fq[0];
721 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT);
722 if (!dpaa_fq)
723 goto fq_alloc_failed;
725 port_fqs->rx_defq = &dpaa_fq[0];
727 /* the PCD FQIDs range needs to be aligned for correct operation */
728 if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM))
729 goto fq_alloc_failed;
731 fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM);
733 for (i = fq_base; i < fq_base_aligned; i++)
734 qman_release_fqid(i);
736 for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM;
737 i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++)
738 qman_release_fqid(i);
740 dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM,
741 list, FQ_TYPE_RX_PCD);
742 if (!dpaa_fq)
743 goto fq_alloc_failed;
745 port_fqs->rx_pcdq = &dpaa_fq[0];
747 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
748 goto fq_alloc_failed;
750 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
751 if (!dpaa_fq)
752 goto fq_alloc_failed;
754 port_fqs->tx_errq = &dpaa_fq[0];
756 dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM);
757 if (!dpaa_fq)
758 goto fq_alloc_failed;
760 port_fqs->tx_defq = &dpaa_fq[0];
762 if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
763 goto fq_alloc_failed;
765 return 0;
767 fq_alloc_failed:
768 dev_err(dev, "dpaa_fq_alloc() failed\n");
769 return -ENOMEM;
772 static u32 rx_pool_channel;
773 static DEFINE_SPINLOCK(rx_pool_channel_init);
775 static int dpaa_get_channel(void)
777 spin_lock(&rx_pool_channel_init);
778 if (!rx_pool_channel) {
779 u32 pool;
780 int ret;
782 ret = qman_alloc_pool(&pool);
784 if (!ret)
785 rx_pool_channel = pool;
787 spin_unlock(&rx_pool_channel_init);
788 if (!rx_pool_channel)
789 return -ENOMEM;
790 return rx_pool_channel;
793 static void dpaa_release_channel(void)
795 qman_release_pool(rx_pool_channel);
798 static void dpaa_eth_add_channel(u16 channel, struct device *dev)
800 u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
801 const cpumask_t *cpus = qman_affine_cpus();
802 struct qman_portal *portal;
803 int cpu;
805 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
806 portal = qman_get_affine_portal(cpu);
807 qman_p_static_dequeue_add(portal, pool);
808 qman_start_using_portal(portal, dev);
812 /* Congestion group state change notification callback.
813 * Stops the device's egress queues while they are congested and
814 * wakes them upon exiting congested state.
815 * Also updates some CGR-related stats.
817 static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr,
818 int congested)
820 struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr,
821 struct dpaa_priv, cgr_data.cgr);
823 if (congested) {
824 priv->cgr_data.congestion_start_jiffies = jiffies;
825 netif_tx_stop_all_queues(priv->net_dev);
826 priv->cgr_data.cgr_congested_count++;
827 } else {
828 priv->cgr_data.congested_jiffies +=
829 (jiffies - priv->cgr_data.congestion_start_jiffies);
830 netif_tx_wake_all_queues(priv->net_dev);
834 static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
836 struct qm_mcc_initcgr initcgr;
837 u32 cs_th;
838 int err;
840 err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid);
841 if (err < 0) {
842 if (netif_msg_drv(priv))
843 pr_err("%s: Error %d allocating CGR ID\n",
844 __func__, err);
845 goto out_error;
847 priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
849 /* Enable Congestion State Change Notifications and CS taildrop */
850 memset(&initcgr, 0, sizeof(initcgr));
851 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
852 initcgr.cgr.cscn_en = QM_CGR_EN;
854 /* Set different thresholds based on the MAC speed.
855 * This may turn suboptimal if the MAC is reconfigured at a speed
856 * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
857 * In such cases, we ought to reconfigure the threshold, too.
859 if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
860 cs_th = DPAA_CS_THRESHOLD_10G;
861 else
862 cs_th = DPAA_CS_THRESHOLD_1G;
863 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
865 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
866 initcgr.cgr.cstd_en = QM_CGR_EN;
868 err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT,
869 &initcgr);
870 if (err < 0) {
871 if (netif_msg_drv(priv))
872 pr_err("%s: Error %d creating CGR with ID %d\n",
873 __func__, err, priv->cgr_data.cgr.cgrid);
874 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
875 goto out_error;
877 if (netif_msg_drv(priv))
878 pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n",
879 priv->cgr_data.cgr.cgrid, priv->mac_dev->addr,
880 priv->cgr_data.cgr.chan);
882 out_error:
883 return err;
886 static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
887 struct dpaa_fq *fq,
888 const struct qman_fq *template)
890 fq->fq_base = *template;
891 fq->net_dev = priv->net_dev;
893 fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE;
894 fq->channel = priv->channel;
897 static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
898 struct dpaa_fq *fq,
899 struct fman_port *port,
900 const struct qman_fq *template)
902 fq->fq_base = *template;
903 fq->net_dev = priv->net_dev;
905 if (port) {
906 fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL;
907 fq->channel = (u16)fman_port_get_qman_channel_id(port);
908 } else {
909 fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
913 static void dpaa_fq_setup(struct dpaa_priv *priv,
914 const struct dpaa_fq_cbs *fq_cbs,
915 struct fman_port *tx_port)
917 int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
918 const cpumask_t *affine_cpus = qman_affine_cpus();
919 u16 channels[NR_CPUS];
920 struct dpaa_fq *fq;
922 for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
923 channels[num_portals++] = qman_affine_channel(cpu);
925 if (num_portals == 0)
926 dev_err(priv->net_dev->dev.parent,
927 "No Qman software (affine) channels found\n");
929 /* Initialize each FQ in the list */
930 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
931 switch (fq->fq_type) {
932 case FQ_TYPE_RX_DEFAULT:
933 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
934 break;
935 case FQ_TYPE_RX_ERROR:
936 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq);
937 break;
938 case FQ_TYPE_RX_PCD:
939 if (!num_portals)
940 continue;
941 dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq);
942 fq->channel = channels[portal_cnt++ % num_portals];
943 break;
944 case FQ_TYPE_TX:
945 dpaa_setup_egress(priv, fq, tx_port,
946 &fq_cbs->egress_ern);
947 /* If we have more Tx queues than the number of cores,
948 * just ignore the extra ones.
950 if (egress_cnt < DPAA_ETH_TXQ_NUM)
951 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
952 break;
953 case FQ_TYPE_TX_CONF_MQ:
954 priv->conf_fqs[conf_cnt++] = &fq->fq_base;
955 fallthrough;
956 case FQ_TYPE_TX_CONFIRM:
957 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq);
958 break;
959 case FQ_TYPE_TX_ERROR:
960 dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq);
961 break;
962 default:
963 dev_warn(priv->net_dev->dev.parent,
964 "Unknown FQ type detected!\n");
965 break;
969 /* Make sure all CPUs receive a corresponding Tx queue. */
970 while (egress_cnt < DPAA_ETH_TXQ_NUM) {
971 list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
972 if (fq->fq_type != FQ_TYPE_TX)
973 continue;
974 priv->egress_fqs[egress_cnt++] = &fq->fq_base;
975 if (egress_cnt == DPAA_ETH_TXQ_NUM)
976 break;
981 static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
982 struct qman_fq *tx_fq)
984 int i;
986 for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
987 if (priv->egress_fqs[i] == tx_fq)
988 return i;
990 return -EINVAL;
993 static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable)
995 const struct dpaa_priv *priv;
996 struct qman_fq *confq = NULL;
997 struct qm_mcc_initfq initfq;
998 struct device *dev;
999 struct qman_fq *fq;
1000 int queue_id;
1001 int err;
1003 priv = netdev_priv(dpaa_fq->net_dev);
1004 dev = dpaa_fq->net_dev->dev.parent;
1006 if (dpaa_fq->fqid == 0)
1007 dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
1009 dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY);
1011 err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base);
1012 if (err) {
1013 dev_err(dev, "qman_create_fq() failed\n");
1014 return err;
1016 fq = &dpaa_fq->fq_base;
1018 if (dpaa_fq->init) {
1019 memset(&initfq, 0, sizeof(initfq));
1021 initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL);
1022 /* Note: we may get to keep an empty FQ in cache */
1023 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE);
1025 /* Try to reduce the number of portal interrupts for
1026 * Tx Confirmation FQs.
1028 if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM)
1029 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK);
1031 /* FQ placement */
1032 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ);
1034 qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq);
1036 /* Put all egress queues in a congestion group of their own.
1037 * Sensu stricto, the Tx confirmation queues are Rx FQs,
1038 * rather than Tx - but they nonetheless account for the
1039 * memory footprint on behalf of egress traffic. We therefore
1040 * place them in the netdev's CGR, along with the Tx FQs.
1042 if (dpaa_fq->fq_type == FQ_TYPE_TX ||
1043 dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM ||
1044 dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) {
1045 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1046 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1047 initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid;
1048 /* Set a fixed overhead accounting, in an attempt to
1049 * reduce the impact of fixed-size skb shells and the
1050 * driver's needed headroom on system memory. This is
1051 * especially the case when the egress traffic is
1052 * composed of small datagrams.
1053 * Unfortunately, QMan's OAL value is capped to an
1054 * insufficient value, but even that is better than
1055 * no overhead accounting at all.
1057 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1058 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1059 qm_fqd_set_oal(&initfq.fqd,
1060 min(sizeof(struct sk_buff) +
1061 priv->tx_headroom,
1062 (size_t)FSL_QMAN_MAX_OAL));
1065 if (td_enable) {
1066 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH);
1067 qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1);
1068 initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE);
1071 if (dpaa_fq->fq_type == FQ_TYPE_TX) {
1072 queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base);
1073 if (queue_id >= 0)
1074 confq = priv->conf_fqs[queue_id];
1075 if (confq) {
1076 initfq.we_mask |=
1077 cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1078 /* ContextA: OVOM=1(use contextA2 bits instead of ICAD)
1079 * A2V=1 (contextA A2 field is valid)
1080 * A0V=1 (contextA A0 field is valid)
1081 * B0V=1 (contextB field is valid)
1082 * ContextA A2: EBD=1 (deallocate buffers inside FMan)
1083 * ContextB B0(ASPID): 0 (absolute Virtual Storage ID)
1085 qm_fqd_context_a_set64(&initfq.fqd,
1086 0x1e00000080000000ULL);
1090 /* Put all the ingress queues in our "ingress CGR". */
1091 if (priv->use_ingress_cgr &&
1092 (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1093 dpaa_fq->fq_type == FQ_TYPE_RX_ERROR ||
1094 dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) {
1095 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID);
1096 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE);
1097 initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid;
1098 /* Set a fixed overhead accounting, just like for the
1099 * egress CGR.
1101 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC);
1102 qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG);
1103 qm_fqd_set_oal(&initfq.fqd,
1104 min(sizeof(struct sk_buff) +
1105 priv->tx_headroom,
1106 (size_t)FSL_QMAN_MAX_OAL));
1109 /* Initialization common to all ingress queues */
1110 if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) {
1111 initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
1112 initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE |
1113 QM_FQCTRL_CTXASTASHING);
1114 initfq.fqd.context_a.stashing.exclusive =
1115 QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX |
1116 QM_STASHING_EXCL_ANNOTATION;
1117 qm_fqd_set_stashing(&initfq.fqd, 1, 2,
1118 DIV_ROUND_UP(sizeof(struct qman_fq),
1119 64));
1122 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq);
1123 if (err < 0) {
1124 dev_err(dev, "qman_init_fq(%u) = %d\n",
1125 qman_fq_fqid(fq), err);
1126 qman_destroy_fq(fq);
1127 return err;
1131 dpaa_fq->fqid = qman_fq_fqid(fq);
1133 if (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1134 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) {
1135 err = xdp_rxq_info_reg(&dpaa_fq->xdp_rxq, dpaa_fq->net_dev,
1136 dpaa_fq->fqid, 0);
1137 if (err) {
1138 dev_err(dev, "xdp_rxq_info_reg() = %d\n", err);
1139 return err;
1142 err = xdp_rxq_info_reg_mem_model(&dpaa_fq->xdp_rxq,
1143 MEM_TYPE_PAGE_ORDER0, NULL);
1144 if (err) {
1145 dev_err(dev, "xdp_rxq_info_reg_mem_model() = %d\n",
1146 err);
1147 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1148 return err;
1152 return 0;
1155 static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq)
1157 const struct dpaa_priv *priv;
1158 struct dpaa_fq *dpaa_fq;
1159 int err, error;
1161 err = 0;
1163 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
1164 priv = netdev_priv(dpaa_fq->net_dev);
1166 if (dpaa_fq->init) {
1167 err = qman_retire_fq(fq, NULL);
1168 if (err < 0 && netif_msg_drv(priv))
1169 dev_err(dev, "qman_retire_fq(%u) = %d\n",
1170 qman_fq_fqid(fq), err);
1172 error = qman_oos_fq(fq);
1173 if (error < 0 && netif_msg_drv(priv)) {
1174 dev_err(dev, "qman_oos_fq(%u) = %d\n",
1175 qman_fq_fqid(fq), error);
1176 if (err >= 0)
1177 err = error;
1181 if ((dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT ||
1182 dpaa_fq->fq_type == FQ_TYPE_RX_PCD) &&
1183 xdp_rxq_info_is_reg(&dpaa_fq->xdp_rxq))
1184 xdp_rxq_info_unreg(&dpaa_fq->xdp_rxq);
1186 qman_destroy_fq(fq);
1187 list_del(&dpaa_fq->list);
1189 return err;
1192 static int dpaa_fq_free(struct device *dev, struct list_head *list)
1194 struct dpaa_fq *dpaa_fq, *tmp;
1195 int err, error;
1197 err = 0;
1198 list_for_each_entry_safe(dpaa_fq, tmp, list, list) {
1199 error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq);
1200 if (error < 0 && err >= 0)
1201 err = error;
1204 return err;
1207 static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
1208 struct dpaa_fq *defq,
1209 struct dpaa_buffer_layout *buf_layout)
1211 struct fman_buffer_prefix_content buf_prefix_content;
1212 struct fman_port_params params;
1213 int err;
1215 memset(&params, 0, sizeof(params));
1216 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1218 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1219 buf_prefix_content.pass_prs_result = true;
1220 buf_prefix_content.pass_hash_result = true;
1221 buf_prefix_content.pass_time_stamp = true;
1222 buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
1224 params.specific_params.non_rx_params.err_fqid = errq->fqid;
1225 params.specific_params.non_rx_params.dflt_fqid = defq->fqid;
1227 err = fman_port_config(port, &params);
1228 if (err) {
1229 pr_err("%s: fman_port_config failed\n", __func__);
1230 return err;
1233 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1234 if (err) {
1235 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1236 __func__);
1237 return err;
1240 err = fman_port_init(port);
1241 if (err)
1242 pr_err("%s: fm_port_init failed\n", __func__);
1244 return err;
1247 static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
1248 struct dpaa_fq *errq,
1249 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
1250 struct dpaa_buffer_layout *buf_layout)
1252 struct fman_buffer_prefix_content buf_prefix_content;
1253 struct fman_port_rx_params *rx_p;
1254 struct fman_port_params params;
1255 int err;
1257 memset(&params, 0, sizeof(params));
1258 memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
1260 buf_prefix_content.priv_data_size = buf_layout->priv_data_size;
1261 buf_prefix_content.pass_prs_result = true;
1262 buf_prefix_content.pass_hash_result = true;
1263 buf_prefix_content.pass_time_stamp = true;
1264 buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
1266 rx_p = &params.specific_params.rx_params;
1267 rx_p->err_fqid = errq->fqid;
1268 rx_p->dflt_fqid = defq->fqid;
1269 if (pcdq) {
1270 rx_p->pcd_base_fqid = pcdq->fqid;
1271 rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
1274 rx_p->ext_buf_pools.num_of_pools_used = 1;
1275 rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid;
1276 rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
1278 err = fman_port_config(port, &params);
1279 if (err) {
1280 pr_err("%s: fman_port_config failed\n", __func__);
1281 return err;
1284 err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content);
1285 if (err) {
1286 pr_err("%s: fman_port_cfg_buf_prefix_content failed\n",
1287 __func__);
1288 return err;
1291 err = fman_port_init(port);
1292 if (err)
1293 pr_err("%s: fm_port_init failed\n", __func__);
1295 return err;
1298 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
1299 struct dpaa_bp *bp,
1300 struct fm_port_fqs *port_fqs,
1301 struct dpaa_buffer_layout *buf_layout,
1302 struct device *dev)
1304 struct fman_port *rxport = mac_dev->port[RX];
1305 struct fman_port *txport = mac_dev->port[TX];
1306 int err;
1308 err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq,
1309 port_fqs->tx_defq, &buf_layout[TX]);
1310 if (err)
1311 return err;
1313 err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
1314 port_fqs->rx_defq, port_fqs->rx_pcdq,
1315 &buf_layout[RX]);
1317 return err;
1320 static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp,
1321 struct bm_buffer *bmb, int cnt)
1323 int err;
1325 err = bman_release(dpaa_bp->pool, bmb, cnt);
1326 /* Should never occur, address anyway to avoid leaking the buffers */
1327 if (WARN_ON(err) && dpaa_bp->free_buf_cb)
1328 while (cnt-- > 0)
1329 dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]);
1331 return cnt;
1334 static void dpaa_release_sgt_members(struct qm_sg_entry *sgt)
1336 struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX];
1337 struct dpaa_bp *dpaa_bp;
1338 int i = 0, j;
1340 memset(bmb, 0, sizeof(bmb));
1342 do {
1343 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1344 if (!dpaa_bp)
1345 return;
1347 j = 0;
1348 do {
1349 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1351 bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i]));
1353 j++; i++;
1354 } while (j < ARRAY_SIZE(bmb) &&
1355 !qm_sg_entry_is_final(&sgt[i - 1]) &&
1356 sgt[i - 1].bpid == sgt[i].bpid);
1358 dpaa_bman_release(dpaa_bp, bmb, j);
1359 } while (!qm_sg_entry_is_final(&sgt[i - 1]));
1362 static void dpaa_fd_release(const struct net_device *net_dev,
1363 const struct qm_fd *fd)
1365 struct qm_sg_entry *sgt;
1366 struct dpaa_bp *dpaa_bp;
1367 struct bm_buffer bmb;
1368 dma_addr_t addr;
1369 void *vaddr;
1371 bmb.data = 0;
1372 bm_buffer_set64(&bmb, qm_fd_addr(fd));
1374 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1375 if (!dpaa_bp)
1376 return;
1378 if (qm_fd_get_format(fd) == qm_fd_sg) {
1379 vaddr = phys_to_virt(qm_fd_addr(fd));
1380 sgt = vaddr + qm_fd_get_offset(fd);
1382 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
1383 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1385 dpaa_release_sgt_members(sgt);
1387 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
1388 virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
1389 DMA_FROM_DEVICE);
1390 if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
1391 netdev_err(net_dev, "DMA mapping failed\n");
1392 return;
1394 bm_buffer_set64(&bmb, addr);
1397 dpaa_bman_release(dpaa_bp, &bmb, 1);
1400 static void count_ern(struct dpaa_percpu_priv *percpu_priv,
1401 const union qm_mr_entry *msg)
1403 switch (msg->ern.rc & QM_MR_RC_MASK) {
1404 case QM_MR_RC_CGR_TAILDROP:
1405 percpu_priv->ern_cnt.cg_tdrop++;
1406 break;
1407 case QM_MR_RC_WRED:
1408 percpu_priv->ern_cnt.wred++;
1409 break;
1410 case QM_MR_RC_ERROR:
1411 percpu_priv->ern_cnt.err_cond++;
1412 break;
1413 case QM_MR_RC_ORPWINDOW_EARLY:
1414 percpu_priv->ern_cnt.early_window++;
1415 break;
1416 case QM_MR_RC_ORPWINDOW_LATE:
1417 percpu_priv->ern_cnt.late_window++;
1418 break;
1419 case QM_MR_RC_FQ_TAILDROP:
1420 percpu_priv->ern_cnt.fq_tdrop++;
1421 break;
1422 case QM_MR_RC_ORPWINDOW_RETIRED:
1423 percpu_priv->ern_cnt.fq_retired++;
1424 break;
1425 case QM_MR_RC_ORP_ZERO:
1426 percpu_priv->ern_cnt.orp_zero++;
1427 break;
1431 /* Turn on HW checksum computation for this outgoing frame.
1432 * If the current protocol is not something we support in this regard
1433 * (or if the stack has already computed the SW checksum), we do nothing.
1435 * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value
1436 * otherwise.
1438 * Note that this function may modify the fd->cmd field and the skb data buffer
1439 * (the Parse Results area).
1441 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
1442 struct sk_buff *skb,
1443 struct qm_fd *fd,
1444 void *parse_results)
1446 struct fman_prs_result *parse_result;
1447 u16 ethertype = ntohs(skb->protocol);
1448 struct ipv6hdr *ipv6h = NULL;
1449 struct iphdr *iph;
1450 int retval = 0;
1451 u8 l4_proto;
1453 if (skb->ip_summed != CHECKSUM_PARTIAL)
1454 return 0;
1456 /* Note: L3 csum seems to be already computed in sw, but we can't choose
1457 * L4 alone from the FM configuration anyway.
1460 /* Fill in some fields of the Parse Results array, so the FMan
1461 * can find them as if they came from the FMan Parser.
1463 parse_result = (struct fman_prs_result *)parse_results;
1465 /* If we're dealing with VLAN, get the real Ethernet type */
1466 if (ethertype == ETH_P_8021Q) {
1467 /* We can't always assume the MAC header is set correctly
1468 * by the stack, so reset to beginning of skb->data
1470 skb_reset_mac_header(skb);
1471 ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
1474 /* Fill in the relevant L3 parse result fields
1475 * and read the L4 protocol type
1477 switch (ethertype) {
1478 case ETH_P_IP:
1479 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4);
1480 iph = ip_hdr(skb);
1481 WARN_ON(!iph);
1482 l4_proto = iph->protocol;
1483 break;
1484 case ETH_P_IPV6:
1485 parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6);
1486 ipv6h = ipv6_hdr(skb);
1487 WARN_ON(!ipv6h);
1488 l4_proto = ipv6h->nexthdr;
1489 break;
1490 default:
1491 /* We shouldn't even be here */
1492 if (net_ratelimit())
1493 netif_alert(priv, tx_err, priv->net_dev,
1494 "Can't compute HW csum for L3 proto 0x%x\n",
1495 ntohs(skb->protocol));
1496 retval = -EIO;
1497 goto return_error;
1500 /* Fill in the relevant L4 parse result fields */
1501 switch (l4_proto) {
1502 case IPPROTO_UDP:
1503 parse_result->l4r = FM_L4_PARSE_RESULT_UDP;
1504 break;
1505 case IPPROTO_TCP:
1506 parse_result->l4r = FM_L4_PARSE_RESULT_TCP;
1507 break;
1508 default:
1509 if (net_ratelimit())
1510 netif_alert(priv, tx_err, priv->net_dev,
1511 "Can't compute HW csum for L4 proto 0x%x\n",
1512 l4_proto);
1513 retval = -EIO;
1514 goto return_error;
1517 /* At index 0 is IPOffset_1 as defined in the Parse Results */
1518 parse_result->ip_off[0] = (u8)skb_network_offset(skb);
1519 parse_result->l4_off = (u8)skb_transport_offset(skb);
1521 /* Enable L3 (and L4, if TCP or UDP) HW checksum. */
1522 fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC);
1524 /* On P1023 and similar platforms fd->cmd interpretation could
1525 * be disabled by setting CONTEXT_A bit ICMD; currently this bit
1526 * is not set so we do not need to check; in the future, if/when
1527 * using context_a we need to check this bit
1530 return_error:
1531 return retval;
1534 static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
1536 struct net_device *net_dev = dpaa_bp->priv->net_dev;
1537 struct bm_buffer bmb[8];
1538 dma_addr_t addr;
1539 struct page *p;
1540 u8 i;
1542 for (i = 0; i < 8; i++) {
1543 p = dev_alloc_pages(0);
1544 if (unlikely(!p)) {
1545 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1546 goto release_previous_buffs;
1549 addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
1550 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1551 if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
1552 addr))) {
1553 netdev_err(net_dev, "DMA map failed\n");
1554 goto release_previous_buffs;
1557 bmb[i].data = 0;
1558 bm_buffer_set64(&bmb[i], addr);
1561 release_bufs:
1562 return dpaa_bman_release(dpaa_bp, bmb, i);
1564 release_previous_buffs:
1565 WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n");
1567 bm_buffer_set64(&bmb[i], 0);
1568 /* Avoid releasing a completely null buffer; bman_release() requires
1569 * at least one buffer.
1571 if (likely(i))
1572 goto release_bufs;
1574 return 0;
1577 static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp)
1579 int i;
1581 /* Give each CPU an allotment of "config_count" buffers */
1582 for_each_possible_cpu(i) {
1583 int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i);
1584 int j;
1586 /* Although we access another CPU's counters here
1587 * we do it at boot time so it is safe
1589 for (j = 0; j < dpaa_bp->config_count; j += 8)
1590 *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp);
1592 return 0;
1595 /* Add buffers/(pages) for Rx processing whenever bpool count falls below
1596 * REFILL_THRESHOLD.
1598 static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr)
1600 int count = *countptr;
1601 int new_bufs;
1603 if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) {
1604 do {
1605 new_bufs = dpaa_bp_add_8_bufs(dpaa_bp);
1606 if (unlikely(!new_bufs)) {
1607 /* Avoid looping forever if we've temporarily
1608 * run out of memory. We'll try again at the
1609 * next NAPI cycle.
1611 break;
1613 count += new_bufs;
1614 } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT);
1616 *countptr = count;
1617 if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT))
1618 return -ENOMEM;
1621 return 0;
1624 static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
1626 struct dpaa_bp *dpaa_bp;
1627 int *countptr;
1629 dpaa_bp = priv->dpaa_bp;
1630 if (!dpaa_bp)
1631 return -EINVAL;
1632 countptr = this_cpu_ptr(dpaa_bp->percpu_count);
1634 return dpaa_eth_refill_bpool(dpaa_bp, countptr);
1637 /* Cleanup function for outgoing frame descriptors that were built on Tx path,
1638 * either contiguous frames or scatter/gather ones.
1639 * Skb freeing is not handled here.
1641 * This function may be called on error paths in the Tx function, so guard
1642 * against cases when not all fd relevant fields were filled in. To avoid
1643 * reading the invalid transmission timestamp for the error paths set ts to
1644 * false.
1646 * Return the skb backpointer, since for S/G frames the buffer containing it
1647 * gets freed here.
1649 * No skb backpointer is set when transmitting XDP frames. Cleanup the buffer
1650 * and return NULL in this case.
1652 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1653 const struct qm_fd *fd, bool ts)
1655 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1656 struct device *dev = priv->net_dev->dev.parent;
1657 struct skb_shared_hwtstamps shhwtstamps;
1658 dma_addr_t addr = qm_fd_addr(fd);
1659 void *vaddr = phys_to_virt(addr);
1660 const struct qm_sg_entry *sgt;
1661 struct dpaa_eth_swbp *swbp;
1662 struct sk_buff *skb;
1663 u64 ns;
1664 int i;
1666 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1667 dma_unmap_page(priv->tx_dma_dev, addr,
1668 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1669 dma_dir);
1671 /* The sgt buffer has been allocated with netdev_alloc_frag(),
1672 * it's from lowmem.
1674 sgt = vaddr + qm_fd_get_offset(fd);
1676 /* sgt[0] is from lowmem, was dma_map_single()-ed */
1677 dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
1678 qm_sg_entry_get_len(&sgt[0]), dma_dir);
1680 /* remaining pages were mapped with skb_frag_dma_map() */
1681 for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
1682 !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
1683 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1685 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
1686 qm_sg_entry_get_len(&sgt[i]), dma_dir);
1688 } else {
1689 dma_unmap_single(priv->tx_dma_dev, addr,
1690 qm_fd_get_offset(fd) + qm_fd_get_length(fd),
1691 dma_dir);
1694 swbp = (struct dpaa_eth_swbp *)vaddr;
1695 skb = swbp->skb;
1697 /* No skb backpointer is set when running XDP. An xdp_frame
1698 * backpointer is saved instead.
1700 if (!skb) {
1701 xdp_return_frame(swbp->xdpf);
1702 return NULL;
1705 /* DMA unmapping is required before accessing the HW provided info */
1706 if (ts && priv->tx_tstamp &&
1707 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1708 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1710 if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
1711 &ns)) {
1712 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1713 skb_tstamp_tx(skb, &shhwtstamps);
1714 } else {
1715 dev_warn(dev, "fman_port_get_tstamp failed!\n");
1719 if (qm_fd_get_format(fd) == qm_fd_sg)
1720 /* Free the page that we allocated on Tx for the SGT */
1721 free_pages((unsigned long)vaddr, 0);
1723 return skb;
1726 static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
1728 /* The parser has run and performed L4 checksum validation.
1729 * We know there were no parser errors (and implicitly no
1730 * L4 csum error), otherwise we wouldn't be here.
1732 if ((priv->net_dev->features & NETIF_F_RXCSUM) &&
1733 (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV))
1734 return CHECKSUM_UNNECESSARY;
1736 /* We're here because either the parser didn't run or the L4 checksum
1737 * was not verified. This may include the case of a UDP frame with
1738 * checksum zero or an L4 proto other than TCP/UDP
1740 return CHECKSUM_NONE;
1743 #define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
1745 /* Build a linear skb around the received buffer.
1746 * We are guaranteed there is enough room at the end of the data buffer to
1747 * accommodate the shared info area of the skb.
1749 static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
1750 const struct qm_fd *fd)
1752 ssize_t fd_off = qm_fd_get_offset(fd);
1753 dma_addr_t addr = qm_fd_addr(fd);
1754 struct dpaa_bp *dpaa_bp;
1755 struct sk_buff *skb;
1756 void *vaddr;
1758 vaddr = phys_to_virt(addr);
1759 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1761 dpaa_bp = dpaa_bpid2pool(fd->bpid);
1762 if (!dpaa_bp)
1763 goto free_buffer;
1765 skb = build_skb(vaddr, dpaa_bp->size +
1766 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
1767 if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
1768 goto free_buffer;
1769 skb_reserve(skb, fd_off);
1770 skb_put(skb, qm_fd_get_length(fd));
1772 skb->ip_summed = rx_csum_offload(priv, fd);
1774 return skb;
1776 free_buffer:
1777 free_pages((unsigned long)vaddr, 0);
1778 return NULL;
1781 /* Build an skb with the data of the first S/G entry in the linear portion and
1782 * the rest of the frame as skb fragments.
1784 * The page fragment holding the S/G Table is recycled here.
1786 static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
1787 const struct qm_fd *fd)
1789 ssize_t fd_off = qm_fd_get_offset(fd);
1790 dma_addr_t addr = qm_fd_addr(fd);
1791 const struct qm_sg_entry *sgt;
1792 struct page *page, *head_page;
1793 struct dpaa_bp *dpaa_bp;
1794 void *vaddr, *sg_vaddr;
1795 int frag_off, frag_len;
1796 struct sk_buff *skb;
1797 dma_addr_t sg_addr;
1798 int page_offset;
1799 unsigned int sz;
1800 int *count_ptr;
1801 int i, j;
1803 vaddr = phys_to_virt(addr);
1804 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES));
1806 /* Iterate through the SGT entries and add data buffers to the skb */
1807 sgt = vaddr + fd_off;
1808 skb = NULL;
1809 for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) {
1810 /* Extension bit is not supported */
1811 WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
1813 sg_addr = qm_sg_addr(&sgt[i]);
1814 sg_vaddr = phys_to_virt(sg_addr);
1815 WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
1817 dma_unmap_page(priv->rx_dma_dev, sg_addr,
1818 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1820 /* We may use multiple Rx pools */
1821 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
1822 if (!dpaa_bp)
1823 goto free_buffers;
1825 if (!skb) {
1826 sz = dpaa_bp->size +
1827 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1828 skb = build_skb(sg_vaddr, sz);
1829 if (WARN_ON(!skb))
1830 goto free_buffers;
1832 skb->ip_summed = rx_csum_offload(priv, fd);
1834 /* Make sure forwarded skbs will have enough space
1835 * on Tx, if extra headers are added.
1837 WARN_ON(fd_off != priv->rx_headroom);
1838 skb_reserve(skb, fd_off);
1839 skb_put(skb, qm_sg_entry_get_len(&sgt[i]));
1840 } else {
1841 /* Not the first S/G entry; all data from buffer will
1842 * be added in an skb fragment; fragment index is offset
1843 * by one since first S/G entry was incorporated in the
1844 * linear part of the skb.
1846 * Caution: 'page' may be a tail page.
1848 page = virt_to_page(sg_vaddr);
1849 head_page = virt_to_head_page(sg_vaddr);
1851 /* Compute offset in (possibly tail) page */
1852 page_offset = ((unsigned long)sg_vaddr &
1853 (PAGE_SIZE - 1)) +
1854 (page_address(page) - page_address(head_page));
1855 /* page_offset only refers to the beginning of sgt[i];
1856 * but the buffer itself may have an internal offset.
1858 frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset;
1859 frag_len = qm_sg_entry_get_len(&sgt[i]);
1860 /* skb_add_rx_frag() does no checking on the page; if
1861 * we pass it a tail page, we'll end up with
1862 * bad page accounting and eventually with segafults.
1864 skb_add_rx_frag(skb, i - 1, head_page, frag_off,
1865 frag_len, dpaa_bp->size);
1868 /* Update the pool count for the current {cpu x bpool} */
1869 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1870 (*count_ptr)--;
1872 if (qm_sg_entry_is_final(&sgt[i]))
1873 break;
1875 WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
1877 /* free the SG table buffer */
1878 free_pages((unsigned long)vaddr, 0);
1880 return skb;
1882 free_buffers:
1883 /* free all the SG entries */
1884 for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) {
1885 sg_addr = qm_sg_addr(&sgt[j]);
1886 sg_vaddr = phys_to_virt(sg_addr);
1887 /* all pages 0..i were unmaped */
1888 if (j > i)
1889 dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]),
1890 DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
1891 free_pages((unsigned long)sg_vaddr, 0);
1892 /* counters 0..i-1 were decremented */
1893 if (j >= i) {
1894 dpaa_bp = dpaa_bpid2pool(sgt[j].bpid);
1895 if (dpaa_bp) {
1896 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
1897 (*count_ptr)--;
1901 if (qm_sg_entry_is_final(&sgt[j]))
1902 break;
1904 /* free the SGT fragment */
1905 free_pages((unsigned long)vaddr, 0);
1907 return NULL;
1910 static int skb_to_contig_fd(struct dpaa_priv *priv,
1911 struct sk_buff *skb, struct qm_fd *fd,
1912 int *offset)
1914 struct net_device *net_dev = priv->net_dev;
1915 enum dma_data_direction dma_dir;
1916 struct dpaa_eth_swbp *swbp;
1917 unsigned char *buff_start;
1918 dma_addr_t addr;
1919 int err;
1921 /* We are guaranteed to have at least tx_headroom bytes
1922 * available, so just use that for offset.
1924 fd->bpid = FSL_DPAA_BPID_INV;
1925 buff_start = skb->data - priv->tx_headroom;
1926 dma_dir = DMA_TO_DEVICE;
1928 swbp = (struct dpaa_eth_swbp *)buff_start;
1929 swbp->skb = skb;
1931 /* Enable L3/L4 hardware checksum computation.
1933 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1934 * need to write into the skb.
1936 err = dpaa_enable_tx_csum(priv, skb, fd,
1937 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1938 if (unlikely(err < 0)) {
1939 if (net_ratelimit())
1940 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1941 err);
1942 return err;
1945 /* Fill in the rest of the FD fields */
1946 qm_fd_set_contig(fd, priv->tx_headroom, skb->len);
1947 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
1949 /* Map the entire buffer size that may be seen by FMan, but no more */
1950 addr = dma_map_single(priv->tx_dma_dev, buff_start,
1951 priv->tx_headroom + skb->len, dma_dir);
1952 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
1953 if (net_ratelimit())
1954 netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
1955 return -EINVAL;
1957 qm_fd_addr_set64(fd, addr);
1959 return 0;
1962 static int skb_to_sg_fd(struct dpaa_priv *priv,
1963 struct sk_buff *skb, struct qm_fd *fd)
1965 const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
1966 const int nr_frags = skb_shinfo(skb)->nr_frags;
1967 struct net_device *net_dev = priv->net_dev;
1968 struct dpaa_eth_swbp *swbp;
1969 struct qm_sg_entry *sgt;
1970 void *buff_start;
1971 skb_frag_t *frag;
1972 dma_addr_t addr;
1973 size_t frag_len;
1974 struct page *p;
1975 int i, j, err;
1977 /* get a page to store the SGTable */
1978 p = dev_alloc_pages(0);
1979 if (unlikely(!p)) {
1980 netdev_err(net_dev, "dev_alloc_pages() failed\n");
1981 return -ENOMEM;
1983 buff_start = page_address(p);
1985 /* Enable L3/L4 hardware checksum computation.
1987 * We must do this before dma_map_single(DMA_TO_DEVICE), because we may
1988 * need to write into the skb.
1990 err = dpaa_enable_tx_csum(priv, skb, fd,
1991 buff_start + DPAA_TX_PRIV_DATA_SIZE);
1992 if (unlikely(err < 0)) {
1993 if (net_ratelimit())
1994 netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
1995 err);
1996 goto csum_failed;
1999 /* SGT[0] is used by the linear part */
2000 sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
2001 frag_len = skb_headlen(skb);
2002 qm_sg_entry_set_len(&sgt[0], frag_len);
2003 sgt[0].bpid = FSL_DPAA_BPID_INV;
2004 sgt[0].offset = 0;
2005 addr = dma_map_single(priv->tx_dma_dev, skb->data,
2006 skb_headlen(skb), dma_dir);
2007 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2008 netdev_err(priv->net_dev, "DMA mapping failed\n");
2009 err = -EINVAL;
2010 goto sg0_map_failed;
2012 qm_sg_entry_set64(&sgt[0], addr);
2014 /* populate the rest of SGT entries */
2015 for (i = 0; i < nr_frags; i++) {
2016 frag = &skb_shinfo(skb)->frags[i];
2017 frag_len = skb_frag_size(frag);
2018 WARN_ON(!skb_frag_page(frag));
2019 addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0,
2020 frag_len, dma_dir);
2021 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2022 netdev_err(priv->net_dev, "DMA mapping failed\n");
2023 err = -EINVAL;
2024 goto sg_map_failed;
2027 qm_sg_entry_set_len(&sgt[i + 1], frag_len);
2028 sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
2029 sgt[i + 1].offset = 0;
2031 /* keep the offset in the address */
2032 qm_sg_entry_set64(&sgt[i + 1], addr);
2035 /* Set the final bit in the last used entry of the SGT */
2036 qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
2038 /* set fd offset to priv->tx_headroom */
2039 qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
2041 /* DMA map the SGT page */
2042 swbp = (struct dpaa_eth_swbp *)buff_start;
2043 swbp->skb = skb;
2045 addr = dma_map_page(priv->tx_dma_dev, p, 0,
2046 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
2047 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2048 netdev_err(priv->net_dev, "DMA mapping failed\n");
2049 err = -EINVAL;
2050 goto sgt_map_failed;
2053 fd->bpid = FSL_DPAA_BPID_INV;
2054 fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2055 qm_fd_addr_set64(fd, addr);
2057 return 0;
2059 sgt_map_failed:
2060 sg_map_failed:
2061 for (j = 0; j < i; j++)
2062 dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]),
2063 qm_sg_entry_get_len(&sgt[j]), dma_dir);
2064 sg0_map_failed:
2065 csum_failed:
2066 free_pages((unsigned long)buff_start, 0);
2068 return err;
2071 static inline int dpaa_xmit(struct dpaa_priv *priv,
2072 struct rtnl_link_stats64 *percpu_stats,
2073 int queue,
2074 struct qm_fd *fd)
2076 struct qman_fq *egress_fq;
2077 int err, i;
2079 egress_fq = priv->egress_fqs[queue];
2080 if (fd->bpid == FSL_DPAA_BPID_INV)
2081 fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue]));
2083 /* Trace this Tx fd */
2084 trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd);
2086 for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) {
2087 err = qman_enqueue(egress_fq, fd);
2088 if (err != -EBUSY)
2089 break;
2092 if (unlikely(err < 0)) {
2093 percpu_stats->tx_fifo_errors++;
2094 return err;
2097 percpu_stats->tx_packets++;
2098 percpu_stats->tx_bytes += qm_fd_get_length(fd);
2100 return 0;
2103 #ifdef CONFIG_DPAA_ERRATUM_A050385
2104 static int dpaa_a050385_wa_skb(struct net_device *net_dev, struct sk_buff **s)
2106 struct dpaa_priv *priv = netdev_priv(net_dev);
2107 struct sk_buff *new_skb, *skb = *s;
2108 unsigned char *start, i;
2110 /* check linear buffer alignment */
2111 if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
2112 goto workaround;
2114 /* linear buffers just need to have an aligned start */
2115 if (!skb_is_nonlinear(skb))
2116 return 0;
2118 /* linear data size for nonlinear skbs needs to be aligned */
2119 if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
2120 goto workaround;
2122 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2123 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2125 /* all fragments need to have aligned start addresses */
2126 if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
2127 goto workaround;
2129 /* all but last fragment need to have aligned sizes */
2130 if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
2131 (i < skb_shinfo(skb)->nr_frags - 1))
2132 goto workaround;
2135 return 0;
2137 workaround:
2138 /* copy all the skb content into a new linear buffer */
2139 new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
2140 priv->tx_headroom);
2141 if (!new_skb)
2142 return -ENOMEM;
2144 /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
2145 skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
2147 /* Workaround for DPAA_A050385 requires data start to be aligned */
2148 start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
2149 if (start - new_skb->data)
2150 skb_reserve(new_skb, start - new_skb->data);
2152 skb_put(new_skb, skb->len);
2153 skb_copy_bits(skb, 0, new_skb->data, skb->len);
2154 skb_copy_header(new_skb, skb);
2155 new_skb->dev = skb->dev;
2157 /* Copy relevant timestamp info from the old skb to the new */
2158 if (priv->tx_tstamp) {
2159 skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags;
2160 skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps;
2161 skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey;
2162 if (skb->sk)
2163 skb_set_owner_w(new_skb, skb->sk);
2166 /* We move the headroom when we align it so we have to reset the
2167 * network and transport header offsets relative to the new data
2168 * pointer. The checksum offload relies on these offsets.
2170 skb_set_network_header(new_skb, skb_network_offset(skb));
2171 skb_set_transport_header(new_skb, skb_transport_offset(skb));
2173 dev_kfree_skb(skb);
2174 *s = new_skb;
2176 return 0;
2179 static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
2180 struct xdp_frame **init_xdpf)
2182 struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
2183 void *new_buff;
2184 struct page *p;
2186 /* Check the data alignment and make sure the headroom is large
2187 * enough to store the xdpf backpointer. Use an aligned headroom
2188 * value.
2190 * Due to alignment constraints, we give XDP access to the full 256
2191 * byte frame headroom. If the XDP program uses all of it, copy the
2192 * data to a new buffer and make room for storing the backpointer.
2194 if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
2195 xdpf->headroom >= priv->tx_headroom) {
2196 xdpf->headroom = priv->tx_headroom;
2197 return 0;
2200 p = dev_alloc_pages(0);
2201 if (unlikely(!p))
2202 return -ENOMEM;
2204 /* Copy the data to the new buffer at a properly aligned offset */
2205 new_buff = page_address(p);
2206 memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
2208 /* Create an XDP frame around the new buffer in a similar fashion
2209 * to xdp_convert_buff_to_frame.
2211 new_xdpf = new_buff;
2212 new_xdpf->data = new_buff + priv->tx_headroom;
2213 new_xdpf->len = xdpf->len;
2214 new_xdpf->headroom = priv->tx_headroom;
2215 new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
2216 new_xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
2218 /* Release the initial buffer */
2219 xdp_return_frame_rx_napi(xdpf);
2221 *init_xdpf = new_xdpf;
2222 return 0;
2224 #endif
2226 static netdev_tx_t
2227 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
2229 const int queue_mapping = skb_get_queue_mapping(skb);
2230 bool nonlinear = skb_is_nonlinear(skb);
2231 struct rtnl_link_stats64 *percpu_stats;
2232 struct dpaa_percpu_priv *percpu_priv;
2233 struct netdev_queue *txq;
2234 struct dpaa_priv *priv;
2235 struct qm_fd fd;
2236 int offset = 0;
2237 int err = 0;
2239 priv = netdev_priv(net_dev);
2240 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2241 percpu_stats = &percpu_priv->stats;
2243 qm_fd_clear_fd(&fd);
2245 if (!nonlinear) {
2246 /* We're going to store the skb backpointer at the beginning
2247 * of the data buffer, so we need a privately owned skb
2249 * We've made sure skb is not shared in dev->priv_flags,
2250 * we need to verify the skb head is not cloned
2252 if (skb_cow_head(skb, priv->tx_headroom))
2253 goto enomem;
2255 WARN_ON(skb_is_nonlinear(skb));
2258 /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
2259 * make sure we don't feed FMan with more fragments than it supports.
2261 if (unlikely(nonlinear &&
2262 (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
2263 /* If the egress skb contains more fragments than we support
2264 * we have no choice but to linearize it ourselves.
2266 if (__skb_linearize(skb))
2267 goto enomem;
2269 nonlinear = skb_is_nonlinear(skb);
2272 #ifdef CONFIG_DPAA_ERRATUM_A050385
2273 if (unlikely(fman_has_errata_a050385())) {
2274 if (dpaa_a050385_wa_skb(net_dev, &skb))
2275 goto enomem;
2276 nonlinear = skb_is_nonlinear(skb);
2278 #endif
2280 if (nonlinear) {
2281 /* Just create a S/G fd based on the skb */
2282 err = skb_to_sg_fd(priv, skb, &fd);
2283 percpu_priv->tx_frag_skbuffs++;
2284 } else {
2285 /* Create a contig FD from this skb */
2286 err = skb_to_contig_fd(priv, skb, &fd, &offset);
2288 if (unlikely(err < 0))
2289 goto skb_to_fd_failed;
2291 txq = netdev_get_tx_queue(net_dev, queue_mapping);
2293 /* LLTX requires to do our own update of trans_start */
2294 txq->trans_start = jiffies;
2296 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
2297 fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
2298 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2301 if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
2302 return NETDEV_TX_OK;
2304 dpaa_cleanup_tx_fd(priv, &fd, false);
2305 skb_to_fd_failed:
2306 enomem:
2307 percpu_stats->tx_errors++;
2308 dev_kfree_skb(skb);
2309 return NETDEV_TX_OK;
2312 static void dpaa_rx_error(struct net_device *net_dev,
2313 const struct dpaa_priv *priv,
2314 struct dpaa_percpu_priv *percpu_priv,
2315 const struct qm_fd *fd,
2316 u32 fqid)
2318 if (net_ratelimit())
2319 netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n",
2320 be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS);
2322 percpu_priv->stats.rx_errors++;
2324 if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA)
2325 percpu_priv->rx_errors.dme++;
2326 if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL)
2327 percpu_priv->rx_errors.fpe++;
2328 if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE)
2329 percpu_priv->rx_errors.fse++;
2330 if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR)
2331 percpu_priv->rx_errors.phe++;
2333 dpaa_fd_release(net_dev, fd);
2336 static void dpaa_tx_error(struct net_device *net_dev,
2337 const struct dpaa_priv *priv,
2338 struct dpaa_percpu_priv *percpu_priv,
2339 const struct qm_fd *fd,
2340 u32 fqid)
2342 struct sk_buff *skb;
2344 if (net_ratelimit())
2345 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2346 be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS);
2348 percpu_priv->stats.tx_errors++;
2350 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2351 dev_kfree_skb(skb);
2354 static int dpaa_eth_poll(struct napi_struct *napi, int budget)
2356 struct dpaa_napi_portal *np =
2357 container_of(napi, struct dpaa_napi_portal, napi);
2358 int cleaned;
2360 np->xdp_act = 0;
2362 cleaned = qman_p_poll_dqrr(np->p, budget);
2364 if (cleaned < budget) {
2365 napi_complete_done(napi, cleaned);
2366 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2367 } else if (np->down) {
2368 qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
2371 if (np->xdp_act & XDP_REDIRECT)
2372 xdp_do_flush();
2374 return cleaned;
2377 static void dpaa_tx_conf(struct net_device *net_dev,
2378 const struct dpaa_priv *priv,
2379 struct dpaa_percpu_priv *percpu_priv,
2380 const struct qm_fd *fd,
2381 u32 fqid)
2383 struct sk_buff *skb;
2385 if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) {
2386 if (net_ratelimit())
2387 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2388 be32_to_cpu(fd->status) &
2389 FM_FD_STAT_TX_ERRORS);
2391 percpu_priv->stats.tx_errors++;
2394 percpu_priv->tx_confirm++;
2396 skb = dpaa_cleanup_tx_fd(priv, fd, true);
2398 consume_skb(skb);
2401 static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
2402 struct qman_portal *portal, bool sched_napi)
2404 if (sched_napi) {
2405 /* Disable QMan IRQ and invoke NAPI */
2406 qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
2408 percpu_priv->np.p = portal;
2409 napi_schedule(&percpu_priv->np.napi);
2410 percpu_priv->in_interrupt++;
2411 return 1;
2413 return 0;
2416 static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
2417 struct qman_fq *fq,
2418 const struct qm_dqrr_entry *dq,
2419 bool sched_napi)
2421 struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2422 struct dpaa_percpu_priv *percpu_priv;
2423 struct net_device *net_dev;
2424 struct dpaa_bp *dpaa_bp;
2425 struct dpaa_priv *priv;
2427 net_dev = dpaa_fq->net_dev;
2428 priv = netdev_priv(net_dev);
2429 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2430 if (!dpaa_bp)
2431 return qman_cb_dqrr_consume;
2433 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2435 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2436 return qman_cb_dqrr_stop;
2438 dpaa_eth_refill_bpools(priv);
2439 dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2441 return qman_cb_dqrr_consume;
2444 static int dpaa_xdp_xmit_frame(struct net_device *net_dev,
2445 struct xdp_frame *xdpf)
2447 struct dpaa_priv *priv = netdev_priv(net_dev);
2448 struct rtnl_link_stats64 *percpu_stats;
2449 struct dpaa_percpu_priv *percpu_priv;
2450 struct dpaa_eth_swbp *swbp;
2451 struct netdev_queue *txq;
2452 void *buff_start;
2453 struct qm_fd fd;
2454 dma_addr_t addr;
2455 int err;
2457 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2458 percpu_stats = &percpu_priv->stats;
2460 #ifdef CONFIG_DPAA_ERRATUM_A050385
2461 if (unlikely(fman_has_errata_a050385())) {
2462 if (dpaa_a050385_wa_xdpf(priv, &xdpf)) {
2463 err = -ENOMEM;
2464 goto out_error;
2467 #endif
2469 if (xdpf->headroom < DPAA_TX_PRIV_DATA_SIZE) {
2470 err = -EINVAL;
2471 goto out_error;
2474 buff_start = xdpf->data - xdpf->headroom;
2476 /* Leave empty the skb backpointer at the start of the buffer.
2477 * Save the XDP frame for easy cleanup on confirmation.
2479 swbp = (struct dpaa_eth_swbp *)buff_start;
2480 swbp->skb = NULL;
2481 swbp->xdpf = xdpf;
2483 qm_fd_clear_fd(&fd);
2484 fd.bpid = FSL_DPAA_BPID_INV;
2485 fd.cmd |= cpu_to_be32(FM_FD_CMD_FCO);
2486 qm_fd_set_contig(&fd, xdpf->headroom, xdpf->len);
2488 addr = dma_map_single(priv->tx_dma_dev, buff_start,
2489 xdpf->headroom + xdpf->len,
2490 DMA_TO_DEVICE);
2491 if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
2492 err = -EINVAL;
2493 goto out_error;
2496 qm_fd_addr_set64(&fd, addr);
2498 /* Bump the trans_start */
2499 txq = netdev_get_tx_queue(net_dev, smp_processor_id());
2500 txq->trans_start = jiffies;
2502 err = dpaa_xmit(priv, percpu_stats, smp_processor_id(), &fd);
2503 if (err) {
2504 dma_unmap_single(priv->tx_dma_dev, addr,
2505 qm_fd_get_offset(&fd) + qm_fd_get_length(&fd),
2506 DMA_TO_DEVICE);
2507 goto out_error;
2510 return 0;
2512 out_error:
2513 percpu_stats->tx_errors++;
2514 return err;
2517 static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
2518 struct dpaa_fq *dpaa_fq, unsigned int *xdp_meta_len)
2520 ssize_t fd_off = qm_fd_get_offset(fd);
2521 struct bpf_prog *xdp_prog;
2522 struct xdp_frame *xdpf;
2523 struct xdp_buff xdp;
2524 u32 xdp_act;
2525 int err;
2527 rcu_read_lock();
2529 xdp_prog = READ_ONCE(priv->xdp_prog);
2530 if (!xdp_prog) {
2531 rcu_read_unlock();
2532 return XDP_PASS;
2535 xdp.data = vaddr + fd_off;
2536 xdp.data_meta = xdp.data;
2537 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
2538 xdp.data_end = xdp.data + qm_fd_get_length(fd);
2539 xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
2540 xdp.rxq = &dpaa_fq->xdp_rxq;
2542 /* We reserve a fixed headroom of 256 bytes under the erratum and we
2543 * offer it all to XDP programs to use. If no room is left for the
2544 * xdpf backpointer on TX, we will need to copy the data.
2545 * Disable metadata support since data realignments might be required
2546 * and the information can be lost.
2548 #ifdef CONFIG_DPAA_ERRATUM_A050385
2549 if (unlikely(fman_has_errata_a050385())) {
2550 xdp_set_data_meta_invalid(&xdp);
2551 xdp.data_hard_start = vaddr;
2552 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2554 #endif
2556 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
2558 /* Update the length and the offset of the FD */
2559 qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data);
2561 switch (xdp_act) {
2562 case XDP_PASS:
2563 #ifdef CONFIG_DPAA_ERRATUM_A050385
2564 *xdp_meta_len = xdp_data_meta_unsupported(&xdp) ? 0 :
2565 xdp.data - xdp.data_meta;
2566 #else
2567 *xdp_meta_len = xdp.data - xdp.data_meta;
2568 #endif
2569 break;
2570 case XDP_TX:
2571 /* We can access the full headroom when sending the frame
2572 * back out
2574 xdp.data_hard_start = vaddr;
2575 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2576 xdpf = xdp_convert_buff_to_frame(&xdp);
2577 if (unlikely(!xdpf)) {
2578 free_pages((unsigned long)vaddr, 0);
2579 break;
2582 if (dpaa_xdp_xmit_frame(priv->net_dev, xdpf))
2583 xdp_return_frame_rx_napi(xdpf);
2585 break;
2586 case XDP_REDIRECT:
2587 /* Allow redirect to use the full headroom */
2588 xdp.data_hard_start = vaddr;
2589 xdp.frame_sz = DPAA_BP_RAW_SIZE;
2591 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
2592 if (err) {
2593 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2594 free_pages((unsigned long)vaddr, 0);
2596 break;
2597 default:
2598 bpf_warn_invalid_xdp_action(xdp_act);
2599 fallthrough;
2600 case XDP_ABORTED:
2601 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
2602 fallthrough;
2603 case XDP_DROP:
2604 /* Free the buffer */
2605 free_pages((unsigned long)vaddr, 0);
2606 break;
2609 rcu_read_unlock();
2611 return xdp_act;
2614 static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2615 struct qman_fq *fq,
2616 const struct qm_dqrr_entry *dq,
2617 bool sched_napi)
2619 bool ts_valid = false, hash_valid = false;
2620 struct skb_shared_hwtstamps *shhwtstamps;
2621 unsigned int skb_len, xdp_meta_len = 0;
2622 struct rtnl_link_stats64 *percpu_stats;
2623 struct dpaa_percpu_priv *percpu_priv;
2624 const struct qm_fd *fd = &dq->fd;
2625 dma_addr_t addr = qm_fd_addr(fd);
2626 struct dpaa_napi_portal *np;
2627 enum qm_fd_format fd_format;
2628 struct net_device *net_dev;
2629 u32 fd_status, hash_offset;
2630 struct qm_sg_entry *sgt;
2631 struct dpaa_bp *dpaa_bp;
2632 struct dpaa_fq *dpaa_fq;
2633 struct dpaa_priv *priv;
2634 struct sk_buff *skb;
2635 int *count_ptr;
2636 u32 xdp_act;
2637 void *vaddr;
2638 u32 hash;
2639 u64 ns;
2641 np = container_of(&portal, struct dpaa_napi_portal, p);
2642 dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
2643 fd_status = be32_to_cpu(fd->status);
2644 fd_format = qm_fd_get_format(fd);
2645 net_dev = dpaa_fq->net_dev;
2646 priv = netdev_priv(net_dev);
2647 dpaa_bp = dpaa_bpid2pool(dq->fd.bpid);
2648 if (!dpaa_bp)
2649 return qman_cb_dqrr_consume;
2651 /* Trace the Rx fd */
2652 trace_dpaa_rx_fd(net_dev, fq, &dq->fd);
2654 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2655 percpu_stats = &percpu_priv->stats;
2657 if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
2658 return qman_cb_dqrr_stop;
2660 /* Make sure we didn't run out of buffers */
2661 if (unlikely(dpaa_eth_refill_bpools(priv))) {
2662 /* Unable to refill the buffer pool due to insufficient
2663 * system memory. Just release the frame back into the pool,
2664 * otherwise we'll soon end up with an empty buffer pool.
2666 dpaa_fd_release(net_dev, &dq->fd);
2667 return qman_cb_dqrr_consume;
2670 if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) {
2671 if (net_ratelimit())
2672 netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n",
2673 fd_status & FM_FD_STAT_RX_ERRORS);
2675 percpu_stats->rx_errors++;
2676 dpaa_fd_release(net_dev, fd);
2677 return qman_cb_dqrr_consume;
2680 dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
2681 DMA_FROM_DEVICE);
2683 /* prefetch the first 64 bytes of the frame or the SGT start */
2684 vaddr = phys_to_virt(addr);
2685 prefetch(vaddr + qm_fd_get_offset(fd));
2687 /* The only FD types that we may receive are contig and S/G */
2688 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2690 /* Account for either the contig buffer or the SGT buffer (depending on
2691 * which case we were in) having been removed from the pool.
2693 count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
2694 (*count_ptr)--;
2696 /* Extract the timestamp stored in the headroom before running XDP */
2697 if (priv->rx_tstamp) {
2698 if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns))
2699 ts_valid = true;
2700 else
2701 WARN_ONCE(1, "fman_port_get_tstamp failed!\n");
2704 /* Extract the hash stored in the headroom before running XDP */
2705 if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use &&
2706 !fman_port_get_hash_result_offset(priv->mac_dev->port[RX],
2707 &hash_offset)) {
2708 hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset));
2709 hash_valid = true;
2712 if (likely(fd_format == qm_fd_contig)) {
2713 xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr,
2714 dpaa_fq, &xdp_meta_len);
2715 np->xdp_act |= xdp_act;
2716 if (xdp_act != XDP_PASS) {
2717 percpu_stats->rx_packets++;
2718 percpu_stats->rx_bytes += qm_fd_get_length(fd);
2719 return qman_cb_dqrr_consume;
2721 skb = contig_fd_to_skb(priv, fd);
2722 } else {
2723 /* XDP doesn't support S/G frames. Return the fragments to the
2724 * buffer pool and release the SGT.
2726 if (READ_ONCE(priv->xdp_prog)) {
2727 WARN_ONCE(1, "S/G frames not supported under XDP\n");
2728 sgt = vaddr + qm_fd_get_offset(fd);
2729 dpaa_release_sgt_members(sgt);
2730 free_pages((unsigned long)vaddr, 0);
2731 return qman_cb_dqrr_consume;
2733 skb = sg_fd_to_skb(priv, fd);
2735 if (!skb)
2736 return qman_cb_dqrr_consume;
2738 if (xdp_meta_len)
2739 skb_metadata_set(skb, xdp_meta_len);
2741 /* Set the previously extracted timestamp */
2742 if (ts_valid) {
2743 shhwtstamps = skb_hwtstamps(skb);
2744 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2745 shhwtstamps->hwtstamp = ns_to_ktime(ns);
2748 skb->protocol = eth_type_trans(skb, net_dev);
2750 /* Set the previously extracted hash */
2751 if (hash_valid) {
2752 enum pkt_hash_types type;
2754 /* if L4 exists, it was used in the hash generation */
2755 type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
2756 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
2757 skb_set_hash(skb, hash, type);
2760 skb_len = skb->len;
2762 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2763 percpu_stats->rx_dropped++;
2764 return qman_cb_dqrr_consume;
2767 percpu_stats->rx_packets++;
2768 percpu_stats->rx_bytes += skb_len;
2770 return qman_cb_dqrr_consume;
2773 static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
2774 struct qman_fq *fq,
2775 const struct qm_dqrr_entry *dq,
2776 bool sched_napi)
2778 struct dpaa_percpu_priv *percpu_priv;
2779 struct net_device *net_dev;
2780 struct dpaa_priv *priv;
2782 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2783 priv = netdev_priv(net_dev);
2785 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2787 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2788 return qman_cb_dqrr_stop;
2790 dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2792 return qman_cb_dqrr_consume;
2795 static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
2796 struct qman_fq *fq,
2797 const struct qm_dqrr_entry *dq,
2798 bool sched_napi)
2800 struct dpaa_percpu_priv *percpu_priv;
2801 struct net_device *net_dev;
2802 struct dpaa_priv *priv;
2804 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2805 priv = netdev_priv(net_dev);
2807 /* Trace the fd */
2808 trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd);
2810 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2812 if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
2813 return qman_cb_dqrr_stop;
2815 dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
2817 return qman_cb_dqrr_consume;
2820 static void egress_ern(struct qman_portal *portal,
2821 struct qman_fq *fq,
2822 const union qm_mr_entry *msg)
2824 const struct qm_fd *fd = &msg->ern.fd;
2825 struct dpaa_percpu_priv *percpu_priv;
2826 const struct dpaa_priv *priv;
2827 struct net_device *net_dev;
2828 struct sk_buff *skb;
2830 net_dev = ((struct dpaa_fq *)fq)->net_dev;
2831 priv = netdev_priv(net_dev);
2832 percpu_priv = this_cpu_ptr(priv->percpu_priv);
2834 percpu_priv->stats.tx_dropped++;
2835 percpu_priv->stats.tx_fifo_errors++;
2836 count_ern(percpu_priv, msg);
2838 skb = dpaa_cleanup_tx_fd(priv, fd, false);
2839 dev_kfree_skb_any(skb);
2842 static const struct dpaa_fq_cbs dpaa_fq_cbs = {
2843 .rx_defq = { .cb = { .dqrr = rx_default_dqrr } },
2844 .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } },
2845 .rx_errq = { .cb = { .dqrr = rx_error_dqrr } },
2846 .tx_errq = { .cb = { .dqrr = conf_error_dqrr } },
2847 .egress_ern = { .cb = { .ern = egress_ern } }
2850 static void dpaa_eth_napi_enable(struct dpaa_priv *priv)
2852 struct dpaa_percpu_priv *percpu_priv;
2853 int i;
2855 for_each_online_cpu(i) {
2856 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2858 percpu_priv->np.down = false;
2859 napi_enable(&percpu_priv->np.napi);
2863 static void dpaa_eth_napi_disable(struct dpaa_priv *priv)
2865 struct dpaa_percpu_priv *percpu_priv;
2866 int i;
2868 for_each_online_cpu(i) {
2869 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
2871 percpu_priv->np.down = true;
2872 napi_disable(&percpu_priv->np.napi);
2876 static void dpaa_adjust_link(struct net_device *net_dev)
2878 struct mac_device *mac_dev;
2879 struct dpaa_priv *priv;
2881 priv = netdev_priv(net_dev);
2882 mac_dev = priv->mac_dev;
2883 mac_dev->adjust_link(mac_dev);
2886 /* The Aquantia PHYs are capable of performing rate adaptation */
2887 #define PHY_VEND_AQUANTIA 0x03a1b400
2889 static int dpaa_phy_init(struct net_device *net_dev)
2891 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2892 struct mac_device *mac_dev;
2893 struct phy_device *phy_dev;
2894 struct dpaa_priv *priv;
2896 priv = netdev_priv(net_dev);
2897 mac_dev = priv->mac_dev;
2899 phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
2900 &dpaa_adjust_link, 0,
2901 mac_dev->phy_if);
2902 if (!phy_dev) {
2903 netif_err(priv, ifup, net_dev, "init_phy() failed\n");
2904 return -ENODEV;
2907 /* Unless the PHY is capable of rate adaptation */
2908 if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII ||
2909 ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) {
2910 /* remove any features not supported by the controller */
2911 ethtool_convert_legacy_u32_to_link_mode(mask,
2912 mac_dev->if_support);
2913 linkmode_and(phy_dev->supported, phy_dev->supported, mask);
2916 phy_support_asym_pause(phy_dev);
2918 mac_dev->phy_dev = phy_dev;
2919 net_dev->phydev = phy_dev;
2921 return 0;
2924 static int dpaa_open(struct net_device *net_dev)
2926 struct mac_device *mac_dev;
2927 struct dpaa_priv *priv;
2928 int err, i;
2930 priv = netdev_priv(net_dev);
2931 mac_dev = priv->mac_dev;
2932 dpaa_eth_napi_enable(priv);
2934 err = dpaa_phy_init(net_dev);
2935 if (err)
2936 goto phy_init_failed;
2938 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
2939 err = fman_port_enable(mac_dev->port[i]);
2940 if (err)
2941 goto mac_start_failed;
2944 err = priv->mac_dev->start(mac_dev);
2945 if (err < 0) {
2946 netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
2947 goto mac_start_failed;
2950 netif_tx_start_all_queues(net_dev);
2952 return 0;
2954 mac_start_failed:
2955 for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
2956 fman_port_disable(mac_dev->port[i]);
2958 phy_init_failed:
2959 dpaa_eth_napi_disable(priv);
2961 return err;
2964 static int dpaa_eth_stop(struct net_device *net_dev)
2966 struct dpaa_priv *priv;
2967 int err;
2969 err = dpaa_stop(net_dev);
2971 priv = netdev_priv(net_dev);
2972 dpaa_eth_napi_disable(priv);
2974 return err;
2977 static bool xdp_validate_mtu(struct dpaa_priv *priv, int mtu)
2979 int max_contig_data = priv->dpaa_bp->size - priv->rx_headroom;
2981 /* We do not support S/G fragments when XDP is enabled.
2982 * Limit the MTU in relation to the buffer size.
2984 if (mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data) {
2985 dev_warn(priv->net_dev->dev.parent,
2986 "The maximum MTU for XDP is %d\n",
2987 max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN);
2988 return false;
2991 return true;
2994 static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
2996 struct dpaa_priv *priv = netdev_priv(net_dev);
2998 if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
2999 return -EINVAL;
3001 net_dev->mtu = new_mtu;
3002 return 0;
3005 static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf)
3007 struct dpaa_priv *priv = netdev_priv(net_dev);
3008 struct bpf_prog *old_prog;
3009 int err;
3010 bool up;
3012 /* S/G fragments are not supported in XDP-mode */
3013 if (bpf->prog && !xdp_validate_mtu(priv, net_dev->mtu)) {
3014 NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
3015 return -EINVAL;
3018 up = netif_running(net_dev);
3020 if (up)
3021 dpaa_eth_stop(net_dev);
3023 old_prog = xchg(&priv->xdp_prog, bpf->prog);
3024 if (old_prog)
3025 bpf_prog_put(old_prog);
3027 if (up) {
3028 err = dpaa_open(net_dev);
3029 if (err) {
3030 NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed");
3031 return err;
3035 return 0;
3038 static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp)
3040 switch (xdp->command) {
3041 case XDP_SETUP_PROG:
3042 return dpaa_setup_xdp(net_dev, xdp);
3043 default:
3044 return -EINVAL;
3048 static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
3049 struct xdp_frame **frames, u32 flags)
3051 struct xdp_frame *xdpf;
3052 int i, err, drops = 0;
3054 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3055 return -EINVAL;
3057 if (!netif_running(net_dev))
3058 return -ENETDOWN;
3060 for (i = 0; i < n; i++) {
3061 xdpf = frames[i];
3062 err = dpaa_xdp_xmit_frame(net_dev, xdpf);
3063 if (err) {
3064 xdp_return_frame_rx_napi(xdpf);
3065 drops++;
3069 return n - drops;
3072 static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3074 struct dpaa_priv *priv = netdev_priv(dev);
3075 struct hwtstamp_config config;
3077 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
3078 return -EFAULT;
3080 switch (config.tx_type) {
3081 case HWTSTAMP_TX_OFF:
3082 /* Couldn't disable rx/tx timestamping separately.
3083 * Do nothing here.
3085 priv->tx_tstamp = false;
3086 break;
3087 case HWTSTAMP_TX_ON:
3088 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3089 priv->tx_tstamp = true;
3090 break;
3091 default:
3092 return -ERANGE;
3095 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
3096 /* Couldn't disable rx/tx timestamping separately.
3097 * Do nothing here.
3099 priv->rx_tstamp = false;
3100 } else {
3101 priv->mac_dev->set_tstamp(priv->mac_dev->fman_mac, true);
3102 priv->rx_tstamp = true;
3103 /* TS is set for all frame types, not only those requested */
3104 config.rx_filter = HWTSTAMP_FILTER_ALL;
3107 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
3108 -EFAULT : 0;
3111 static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd)
3113 int ret = -EINVAL;
3115 if (cmd == SIOCGMIIREG) {
3116 if (net_dev->phydev)
3117 return phy_mii_ioctl(net_dev->phydev, rq, cmd);
3120 if (cmd == SIOCSHWTSTAMP)
3121 return dpaa_ts_ioctl(net_dev, rq, cmd);
3123 return ret;
3126 static const struct net_device_ops dpaa_ops = {
3127 .ndo_open = dpaa_open,
3128 .ndo_start_xmit = dpaa_start_xmit,
3129 .ndo_stop = dpaa_eth_stop,
3130 .ndo_tx_timeout = dpaa_tx_timeout,
3131 .ndo_get_stats64 = dpaa_get_stats64,
3132 .ndo_change_carrier = fixed_phy_change_carrier,
3133 .ndo_set_mac_address = dpaa_set_mac_address,
3134 .ndo_validate_addr = eth_validate_addr,
3135 .ndo_set_rx_mode = dpaa_set_rx_mode,
3136 .ndo_do_ioctl = dpaa_ioctl,
3137 .ndo_setup_tc = dpaa_setup_tc,
3138 .ndo_change_mtu = dpaa_change_mtu,
3139 .ndo_bpf = dpaa_xdp,
3140 .ndo_xdp_xmit = dpaa_xdp_xmit,
3143 static int dpaa_napi_add(struct net_device *net_dev)
3145 struct dpaa_priv *priv = netdev_priv(net_dev);
3146 struct dpaa_percpu_priv *percpu_priv;
3147 int cpu;
3149 for_each_possible_cpu(cpu) {
3150 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3152 netif_napi_add(net_dev, &percpu_priv->np.napi,
3153 dpaa_eth_poll, NAPI_POLL_WEIGHT);
3156 return 0;
3159 static void dpaa_napi_del(struct net_device *net_dev)
3161 struct dpaa_priv *priv = netdev_priv(net_dev);
3162 struct dpaa_percpu_priv *percpu_priv;
3163 int cpu;
3165 for_each_possible_cpu(cpu) {
3166 percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
3168 netif_napi_del(&percpu_priv->np.napi);
3172 static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
3173 struct bm_buffer *bmb)
3175 dma_addr_t addr = bm_buf_addr(bmb);
3177 dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
3178 DMA_FROM_DEVICE);
3180 skb_free_frag(phys_to_virt(addr));
3183 /* Alloc the dpaa_bp struct and configure default values */
3184 static struct dpaa_bp *dpaa_bp_alloc(struct device *dev)
3186 struct dpaa_bp *dpaa_bp;
3188 dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL);
3189 if (!dpaa_bp)
3190 return ERR_PTR(-ENOMEM);
3192 dpaa_bp->bpid = FSL_DPAA_BPID_INV;
3193 dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count);
3194 if (!dpaa_bp->percpu_count)
3195 return ERR_PTR(-ENOMEM);
3197 dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT;
3199 dpaa_bp->seed_cb = dpaa_bp_seed;
3200 dpaa_bp->free_buf_cb = dpaa_bp_free_pf;
3202 return dpaa_bp;
3205 /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR.
3206 * We won't be sending congestion notifications to FMan; for now, we just use
3207 * this CGR to generate enqueue rejections to FMan in order to drop the frames
3208 * before they reach our ingress queues and eat up memory.
3210 static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
3212 struct qm_mcc_initcgr initcgr;
3213 u32 cs_th;
3214 int err;
3216 err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid);
3217 if (err < 0) {
3218 if (netif_msg_drv(priv))
3219 pr_err("Error %d allocating CGR ID\n", err);
3220 goto out_error;
3223 /* Enable CS TD, but disable Congestion State Change Notifications. */
3224 memset(&initcgr, 0, sizeof(initcgr));
3225 initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
3226 initcgr.cgr.cscn_en = QM_CGR_EN;
3227 cs_th = DPAA_INGRESS_CS_THRESHOLD;
3228 qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1);
3230 initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN);
3231 initcgr.cgr.cstd_en = QM_CGR_EN;
3233 /* This CGR will be associated with the SWP affined to the current CPU.
3234 * However, we'll place all our ingress FQs in it.
3236 err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT,
3237 &initcgr);
3238 if (err < 0) {
3239 if (netif_msg_drv(priv))
3240 pr_err("Error %d creating ingress CGR with ID %d\n",
3241 err, priv->ingress_cgr.cgrid);
3242 qman_release_cgrid(priv->ingress_cgr.cgrid);
3243 goto out_error;
3245 if (netif_msg_drv(priv))
3246 pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n",
3247 priv->ingress_cgr.cgrid, priv->mac_dev->addr);
3249 priv->use_ingress_cgr = true;
3251 out_error:
3252 return err;
3255 static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl,
3256 enum port_type port)
3258 u16 headroom;
3260 /* The frame headroom must accommodate:
3261 * - the driver private data area
3262 * - parse results, hash results, timestamp if selected
3263 * If either hash results or time stamp are selected, both will
3264 * be copied to/from the frame headroom, as TS is located between PR and
3265 * HR in the IC and IC copy size has a granularity of 16bytes
3266 * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM)
3268 * Also make sure the headroom is a multiple of data_align bytes
3270 headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE);
3272 if (port == RX) {
3273 #ifdef CONFIG_DPAA_ERRATUM_A050385
3274 if (unlikely(fman_has_errata_a050385()))
3275 headroom = XDP_PACKET_HEADROOM;
3276 #endif
3278 return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT);
3279 } else {
3280 return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
3284 static int dpaa_eth_probe(struct platform_device *pdev)
3286 struct net_device *net_dev = NULL;
3287 struct dpaa_bp *dpaa_bp = NULL;
3288 struct dpaa_fq *dpaa_fq, *tmp;
3289 struct dpaa_priv *priv = NULL;
3290 struct fm_port_fqs port_fqs;
3291 struct mac_device *mac_dev;
3292 int err = 0, channel;
3293 struct device *dev;
3295 dev = &pdev->dev;
3297 err = bman_is_probed();
3298 if (!err)
3299 return -EPROBE_DEFER;
3300 if (err < 0) {
3301 dev_err(dev, "failing probe due to bman probe error\n");
3302 return -ENODEV;
3304 err = qman_is_probed();
3305 if (!err)
3306 return -EPROBE_DEFER;
3307 if (err < 0) {
3308 dev_err(dev, "failing probe due to qman probe error\n");
3309 return -ENODEV;
3311 err = bman_portals_probed();
3312 if (!err)
3313 return -EPROBE_DEFER;
3314 if (err < 0) {
3315 dev_err(dev,
3316 "failing probe due to bman portals probe error\n");
3317 return -ENODEV;
3319 err = qman_portals_probed();
3320 if (!err)
3321 return -EPROBE_DEFER;
3322 if (err < 0) {
3323 dev_err(dev,
3324 "failing probe due to qman portals probe error\n");
3325 return -ENODEV;
3328 /* Allocate this early, so we can store relevant information in
3329 * the private area
3331 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
3332 if (!net_dev) {
3333 dev_err(dev, "alloc_etherdev_mq() failed\n");
3334 return -ENOMEM;
3337 /* Do this here, so we can be verbose early */
3338 SET_NETDEV_DEV(net_dev, dev->parent);
3339 dev_set_drvdata(dev, net_dev);
3341 priv = netdev_priv(net_dev);
3342 priv->net_dev = net_dev;
3344 priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
3346 mac_dev = dpaa_mac_dev_get(pdev);
3347 if (IS_ERR(mac_dev)) {
3348 netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
3349 err = PTR_ERR(mac_dev);
3350 goto free_netdev;
3353 /* Devices used for DMA mapping */
3354 priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]);
3355 priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]);
3356 err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40));
3357 if (!err)
3358 err = dma_coerce_mask_and_coherent(priv->tx_dma_dev,
3359 DMA_BIT_MASK(40));
3360 if (err) {
3361 netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
3362 goto free_netdev;
3365 /* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
3366 * we choose conservatively and let the user explicitly set a higher
3367 * MTU via ifconfig. Otherwise, the user may end up with different MTUs
3368 * in the same LAN.
3369 * If on the other hand fsl_fm_max_frm has been chosen below 1500,
3370 * start with the maximum allowed.
3372 net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN);
3374 netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n",
3375 net_dev->mtu);
3377 priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */
3378 priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
3380 /* bp init */
3381 dpaa_bp = dpaa_bp_alloc(dev);
3382 if (IS_ERR(dpaa_bp)) {
3383 err = PTR_ERR(dpaa_bp);
3384 goto free_dpaa_bps;
3386 /* the raw size of the buffers used for reception */
3387 dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
3388 /* avoid runtime computations by keeping the usable size here */
3389 dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
3390 dpaa_bp->priv = priv;
3392 err = dpaa_bp_alloc_pool(dpaa_bp);
3393 if (err < 0)
3394 goto free_dpaa_bps;
3395 priv->dpaa_bp = dpaa_bp;
3397 INIT_LIST_HEAD(&priv->dpaa_fq_list);
3399 memset(&port_fqs, 0, sizeof(port_fqs));
3401 err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs);
3402 if (err < 0) {
3403 dev_err(dev, "dpaa_alloc_all_fqs() failed\n");
3404 goto free_dpaa_bps;
3407 priv->mac_dev = mac_dev;
3409 channel = dpaa_get_channel();
3410 if (channel < 0) {
3411 dev_err(dev, "dpaa_get_channel() failed\n");
3412 err = channel;
3413 goto free_dpaa_bps;
3416 priv->channel = (u16)channel;
3418 /* Walk the CPUs with affine portals
3419 * and add this pool channel to each's dequeue mask.
3421 dpaa_eth_add_channel(priv->channel, &pdev->dev);
3423 dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
3425 /* Create a congestion group for this netdev, with
3426 * dynamically-allocated CGR ID.
3427 * Must be executed after probing the MAC, but before
3428 * assigning the egress FQs to the CGRs.
3430 err = dpaa_eth_cgr_init(priv);
3431 if (err < 0) {
3432 dev_err(dev, "Error initializing CGR\n");
3433 goto free_dpaa_bps;
3436 err = dpaa_ingress_cgr_init(priv);
3437 if (err < 0) {
3438 dev_err(dev, "Error initializing ingress CGR\n");
3439 goto delete_egress_cgr;
3442 /* Add the FQs to the interface, and make them active */
3443 list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) {
3444 err = dpaa_fq_init(dpaa_fq, false);
3445 if (err < 0)
3446 goto free_dpaa_fqs;
3449 priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX);
3450 priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX);
3452 /* All real interfaces need their ports initialized */
3453 err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
3454 &priv->buf_layout[0], dev);
3455 if (err)
3456 goto free_dpaa_fqs;
3458 /* Rx traffic distribution based on keygen hashing defaults to on */
3459 priv->keygen_in_use = true;
3461 priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv);
3462 if (!priv->percpu_priv) {
3463 dev_err(dev, "devm_alloc_percpu() failed\n");
3464 err = -ENOMEM;
3465 goto free_dpaa_fqs;
3468 priv->num_tc = 1;
3469 netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
3471 /* Initialize NAPI */
3472 err = dpaa_napi_add(net_dev);
3473 if (err < 0)
3474 goto delete_dpaa_napi;
3476 err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout);
3477 if (err < 0)
3478 goto delete_dpaa_napi;
3480 dpaa_eth_sysfs_init(&net_dev->dev);
3482 netif_info(priv, probe, net_dev, "Probed interface %s\n",
3483 net_dev->name);
3485 return 0;
3487 delete_dpaa_napi:
3488 dpaa_napi_del(net_dev);
3489 free_dpaa_fqs:
3490 dpaa_fq_free(dev, &priv->dpaa_fq_list);
3491 qman_delete_cgr_safe(&priv->ingress_cgr);
3492 qman_release_cgrid(priv->ingress_cgr.cgrid);
3493 delete_egress_cgr:
3494 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3495 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3496 free_dpaa_bps:
3497 dpaa_bps_free(priv);
3498 free_netdev:
3499 dev_set_drvdata(dev, NULL);
3500 free_netdev(net_dev);
3502 return err;
3505 static int dpaa_remove(struct platform_device *pdev)
3507 struct net_device *net_dev;
3508 struct dpaa_priv *priv;
3509 struct device *dev;
3510 int err;
3512 dev = &pdev->dev;
3513 net_dev = dev_get_drvdata(dev);
3515 priv = netdev_priv(net_dev);
3517 dpaa_eth_sysfs_remove(dev);
3519 dev_set_drvdata(dev, NULL);
3520 unregister_netdev(net_dev);
3522 err = dpaa_fq_free(dev, &priv->dpaa_fq_list);
3524 qman_delete_cgr_safe(&priv->ingress_cgr);
3525 qman_release_cgrid(priv->ingress_cgr.cgrid);
3526 qman_delete_cgr_safe(&priv->cgr_data.cgr);
3527 qman_release_cgrid(priv->cgr_data.cgr.cgrid);
3529 dpaa_napi_del(net_dev);
3531 dpaa_bps_free(priv);
3533 free_netdev(net_dev);
3535 return err;
3538 static const struct platform_device_id dpaa_devtype[] = {
3540 .name = "dpaa-ethernet",
3541 .driver_data = 0,
3542 }, {
3545 MODULE_DEVICE_TABLE(platform, dpaa_devtype);
3547 static struct platform_driver dpaa_driver = {
3548 .driver = {
3549 .name = KBUILD_MODNAME,
3551 .id_table = dpaa_devtype,
3552 .probe = dpaa_eth_probe,
3553 .remove = dpaa_remove
3556 static int __init dpaa_load(void)
3558 int err;
3560 pr_debug("FSL DPAA Ethernet driver\n");
3562 /* initialize dpaa_eth mirror values */
3563 dpaa_rx_extra_headroom = fman_get_rx_extra_headroom();
3564 dpaa_max_frm = fman_get_max_frm();
3566 err = platform_driver_register(&dpaa_driver);
3567 if (err < 0)
3568 pr_err("Error, platform_driver_register() = %d\n", err);
3570 return err;
3572 module_init(dpaa_load);
3574 static void __exit dpaa_unload(void)
3576 platform_driver_unregister(&dpaa_driver);
3578 /* Only one channel is used and needs to be released after all
3579 * interfaces are removed
3581 dpaa_release_channel();
3583 module_exit(dpaa_unload);
3585 MODULE_LICENSE("Dual BSD/GPL");
3586 MODULE_DESCRIPTION("FSL DPAA Ethernet driver");