1 // SPDX-License-Identifier: GPL-2.0
3 * Keystone NetCP Core driver
5 * Copyright (C) 2014 Texas Instruments Incorporated
6 * Authors: Sandeep Nair <sandeep_n@ti.com>
7 * Sandeep Paulraj <s-paulraj@ti.com>
8 * Cyril Chemparathy <cyril@ti.com>
9 * Santosh Shilimkar <santosh.shilimkar@ti.com>
10 * Murali Karicheri <m-karicheri2@ti.com>
11 * Wingman Kwok <w-kwok2@ti.com>
15 #include <linux/module.h>
16 #include <linux/of_net.h>
17 #include <linux/of_address.h>
18 #include <linux/if_vlan.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/platform_device.h>
21 #include <linux/soc/ti/knav_qmss.h>
22 #include <linux/soc/ti/knav_dma.h>
26 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
27 #define NETCP_TX_TIMEOUT (5 * HZ)
28 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
29 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
30 #define NETCP_MAX_MCAST_ADDR 16
32 #define NETCP_EFUSE_REG_INDEX 0
34 #define NETCP_MOD_PROBE_SKIPPED 1
35 #define NETCP_MOD_PROBE_FAILED 2
37 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
38 NETIF_MSG_DRV | NETIF_MSG_LINK | \
39 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
40 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
41 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
42 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
43 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
46 #define NETCP_EFUSE_ADDR_SWAP 2
48 #define knav_queue_get_id(q) knav_queue_device_control(q, \
49 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
51 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
52 KNAV_QUEUE_ENABLE_NOTIFY, \
55 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
56 KNAV_QUEUE_DISABLE_NOTIFY, \
59 #define knav_queue_get_count(q) knav_queue_device_control(q, \
60 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
62 #define for_each_netcp_module(module) \
63 list_for_each_entry(module, &netcp_modules, module_list)
65 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
66 list_for_each_entry(inst_modpriv, \
67 &((netcp_device)->modpriv_head), inst_list)
69 #define for_each_module(netcp, intf_modpriv) \
70 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
72 /* Module management structures */
74 struct list_head device_list
;
75 struct list_head interface_head
;
76 struct list_head modpriv_head
;
77 struct device
*device
;
80 struct netcp_inst_modpriv
{
81 struct netcp_device
*netcp_device
;
82 struct netcp_module
*netcp_module
;
83 struct list_head inst_list
;
87 struct netcp_intf_modpriv
{
88 struct netcp_intf
*netcp_priv
;
89 struct netcp_module
*netcp_module
;
90 struct list_head intf_list
;
96 void (*txtstamp
)(void *context
, struct sk_buff
*skb
);
99 static LIST_HEAD(netcp_devices
);
100 static LIST_HEAD(netcp_modules
);
101 static DEFINE_MUTEX(netcp_modules_lock
);
103 static int netcp_debug_level
= -1;
104 module_param(netcp_debug_level
, int, 0);
105 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
107 /* Helper functions - Get/Set */
108 static void get_pkt_info(dma_addr_t
*buff
, u32
*buff_len
, dma_addr_t
*ndesc
,
109 struct knav_dma_desc
*desc
)
111 *buff_len
= le32_to_cpu(desc
->buff_len
);
112 *buff
= le32_to_cpu(desc
->buff
);
113 *ndesc
= le32_to_cpu(desc
->next_desc
);
116 static void get_desc_info(u32
*desc_info
, u32
*pkt_info
,
117 struct knav_dma_desc
*desc
)
119 *desc_info
= le32_to_cpu(desc
->desc_info
);
120 *pkt_info
= le32_to_cpu(desc
->packet_info
);
123 static u32
get_sw_data(int index
, struct knav_dma_desc
*desc
)
125 /* No Endian conversion needed as this data is untouched by hw */
126 return desc
->sw_data
[index
];
129 /* use these macros to get sw data */
130 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
131 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
132 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
133 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
135 static void get_org_pkt_info(dma_addr_t
*buff
, u32
*buff_len
,
136 struct knav_dma_desc
*desc
)
138 *buff
= le32_to_cpu(desc
->orig_buff
);
139 *buff_len
= le32_to_cpu(desc
->orig_len
);
142 static void get_words(dma_addr_t
*words
, int num_words
, __le32
*desc
)
146 for (i
= 0; i
< num_words
; i
++)
147 words
[i
] = le32_to_cpu(desc
[i
]);
150 static void set_pkt_info(dma_addr_t buff
, u32 buff_len
, u32 ndesc
,
151 struct knav_dma_desc
*desc
)
153 desc
->buff_len
= cpu_to_le32(buff_len
);
154 desc
->buff
= cpu_to_le32(buff
);
155 desc
->next_desc
= cpu_to_le32(ndesc
);
158 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
159 struct knav_dma_desc
*desc
)
161 desc
->desc_info
= cpu_to_le32(desc_info
);
162 desc
->packet_info
= cpu_to_le32(pkt_info
);
165 static void set_sw_data(int index
, u32 data
, struct knav_dma_desc
*desc
)
167 /* No Endian conversion needed as this data is untouched by hw */
168 desc
->sw_data
[index
] = data
;
171 /* use these macros to set sw data */
172 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
173 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
174 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
175 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
177 static void set_org_pkt_info(dma_addr_t buff
, u32 buff_len
,
178 struct knav_dma_desc
*desc
)
180 desc
->orig_buff
= cpu_to_le32(buff
);
181 desc
->orig_len
= cpu_to_le32(buff_len
);
184 static void set_words(u32
*words
, int num_words
, __le32
*desc
)
188 for (i
= 0; i
< num_words
; i
++)
189 desc
[i
] = cpu_to_le32(words
[i
]);
192 /* Read the e-fuse value as 32 bit values to be endian independent */
193 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
, u32 swap
)
195 unsigned int addr0
, addr1
;
197 addr1
= readl(efuse_mac
+ 4);
198 addr0
= readl(efuse_mac
);
201 case NETCP_EFUSE_ADDR_SWAP
:
203 addr1
= readl(efuse_mac
);
209 x
[0] = (addr1
& 0x0000ff00) >> 8;
210 x
[1] = addr1
& 0x000000ff;
211 x
[2] = (addr0
& 0xff000000) >> 24;
212 x
[3] = (addr0
& 0x00ff0000) >> 16;
213 x
[4] = (addr0
& 0x0000ff00) >> 8;
214 x
[5] = addr0
& 0x000000ff;
219 /* Module management routines */
220 static int netcp_register_interface(struct netcp_intf
*netcp
)
224 ret
= register_netdev(netcp
->ndev
);
226 netcp
->netdev_registered
= true;
230 static int netcp_module_probe(struct netcp_device
*netcp_device
,
231 struct netcp_module
*module
)
233 struct device
*dev
= netcp_device
->device
;
234 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
235 struct device_node
*child
;
236 struct netcp_inst_modpriv
*inst_modpriv
;
237 struct netcp_intf
*netcp_intf
;
238 struct netcp_module
*tmp
;
239 bool primary_module_registered
= false;
242 /* Find this module in the sub-tree for this device */
243 devices
= of_get_child_by_name(node
, "netcp-devices");
245 dev_err(dev
, "could not find netcp-devices node\n");
246 return NETCP_MOD_PROBE_SKIPPED
;
249 for_each_available_child_of_node(devices
, child
) {
253 if (of_property_read_string(child
, "label", &name
) < 0) {
254 snprintf(node_name
, sizeof(node_name
), "%pOFn", child
);
257 if (!strcasecmp(module
->name
, name
))
261 of_node_put(devices
);
262 /* If module not used for this device, skip it */
264 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
265 return NETCP_MOD_PROBE_SKIPPED
;
268 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
274 inst_modpriv
->netcp_device
= netcp_device
;
275 inst_modpriv
->netcp_module
= module
;
276 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
278 ret
= module
->probe(netcp_device
, dev
, child
,
279 &inst_modpriv
->module_priv
);
282 dev_err(dev
, "Probe of module(%s) failed with %d\n",
284 list_del(&inst_modpriv
->inst_list
);
285 devm_kfree(dev
, inst_modpriv
);
286 return NETCP_MOD_PROBE_FAILED
;
289 /* Attach modules only if the primary module is probed */
290 for_each_netcp_module(tmp
) {
292 primary_module_registered
= true;
295 if (!primary_module_registered
)
298 /* Attach module to interfaces */
299 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
301 struct netcp_intf_modpriv
*intf_modpriv
;
303 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
308 interface
= of_parse_phandle(netcp_intf
->node_interface
,
312 devm_kfree(dev
, intf_modpriv
);
316 intf_modpriv
->netcp_priv
= netcp_intf
;
317 intf_modpriv
->netcp_module
= module
;
318 list_add_tail(&intf_modpriv
->intf_list
,
319 &netcp_intf
->module_head
);
321 ret
= module
->attach(inst_modpriv
->module_priv
,
322 netcp_intf
->ndev
, interface
,
323 &intf_modpriv
->module_priv
);
324 of_node_put(interface
);
326 dev_dbg(dev
, "Attach of module %s declined with %d\n",
328 list_del(&intf_modpriv
->intf_list
);
329 devm_kfree(dev
, intf_modpriv
);
334 /* Now register the interface with netdev */
335 list_for_each_entry(netcp_intf
,
336 &netcp_device
->interface_head
,
338 /* If interface not registered then register now */
339 if (!netcp_intf
->netdev_registered
) {
340 ret
= netcp_register_interface(netcp_intf
);
348 int netcp_register_module(struct netcp_module
*module
)
350 struct netcp_device
*netcp_device
;
351 struct netcp_module
*tmp
;
355 WARN(1, "error registering netcp module: no name\n");
359 if (!module
->probe
) {
360 WARN(1, "error registering netcp module: no probe\n");
364 mutex_lock(&netcp_modules_lock
);
366 for_each_netcp_module(tmp
) {
367 if (!strcasecmp(tmp
->name
, module
->name
)) {
368 mutex_unlock(&netcp_modules_lock
);
372 list_add_tail(&module
->module_list
, &netcp_modules
);
374 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
375 ret
= netcp_module_probe(netcp_device
, module
);
379 mutex_unlock(&netcp_modules_lock
);
383 mutex_unlock(&netcp_modules_lock
);
384 netcp_unregister_module(module
);
387 EXPORT_SYMBOL_GPL(netcp_register_module
);
389 static void netcp_release_module(struct netcp_device
*netcp_device
,
390 struct netcp_module
*module
)
392 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
393 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
394 struct device
*dev
= netcp_device
->device
;
396 /* Release the module from each interface */
397 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
398 &netcp_device
->interface_head
,
400 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
402 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
403 &netcp_intf
->module_head
,
405 if (intf_modpriv
->netcp_module
== module
) {
406 module
->release(intf_modpriv
->module_priv
);
407 list_del(&intf_modpriv
->intf_list
);
408 devm_kfree(dev
, intf_modpriv
);
414 /* Remove the module from each instance */
415 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
416 &netcp_device
->modpriv_head
, inst_list
) {
417 if (inst_modpriv
->netcp_module
== module
) {
418 module
->remove(netcp_device
,
419 inst_modpriv
->module_priv
);
420 list_del(&inst_modpriv
->inst_list
);
421 devm_kfree(dev
, inst_modpriv
);
427 void netcp_unregister_module(struct netcp_module
*module
)
429 struct netcp_device
*netcp_device
;
430 struct netcp_module
*module_tmp
;
432 mutex_lock(&netcp_modules_lock
);
434 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
435 netcp_release_module(netcp_device
, module
);
438 /* Remove the module from the module list */
439 for_each_netcp_module(module_tmp
) {
440 if (module
== module_tmp
) {
441 list_del(&module
->module_list
);
446 mutex_unlock(&netcp_modules_lock
);
448 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
450 void *netcp_module_get_intf_data(struct netcp_module
*module
,
451 struct netcp_intf
*intf
)
453 struct netcp_intf_modpriv
*intf_modpriv
;
455 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
456 if (intf_modpriv
->netcp_module
== module
)
457 return intf_modpriv
->module_priv
;
460 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
462 /* Module TX and RX Hook management */
463 struct netcp_hook_list
{
464 struct list_head list
;
465 netcp_hook_rtn
*hook_rtn
;
470 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
471 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
473 struct netcp_hook_list
*entry
;
474 struct netcp_hook_list
*next
;
477 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
481 entry
->hook_rtn
= hook_rtn
;
482 entry
->hook_data
= hook_data
;
483 entry
->order
= order
;
485 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
486 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
487 if (next
->order
> order
)
490 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
491 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
495 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
497 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
498 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
500 struct netcp_hook_list
*next
, *n
;
503 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
504 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
505 if ((next
->order
== order
) &&
506 (next
->hook_rtn
== hook_rtn
) &&
507 (next
->hook_data
== hook_data
)) {
508 list_del(&next
->list
);
509 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
510 devm_kfree(netcp_priv
->dev
, next
);
514 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
517 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
519 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
520 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
522 struct netcp_hook_list
*entry
;
523 struct netcp_hook_list
*next
;
526 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
530 entry
->hook_rtn
= hook_rtn
;
531 entry
->hook_data
= hook_data
;
532 entry
->order
= order
;
534 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
535 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
536 if (next
->order
> order
)
539 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
540 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
544 EXPORT_SYMBOL_GPL(netcp_register_rxhook
);
546 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
547 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
549 struct netcp_hook_list
*next
, *n
;
552 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
553 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
554 if ((next
->order
== order
) &&
555 (next
->hook_rtn
== hook_rtn
) &&
556 (next
->hook_data
== hook_data
)) {
557 list_del(&next
->list
);
558 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
559 devm_kfree(netcp_priv
->dev
, next
);
563 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
567 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook
);
569 static void netcp_frag_free(bool is_frag
, void *ptr
)
577 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
578 struct knav_dma_desc
*desc
)
580 struct knav_dma_desc
*ndesc
;
581 dma_addr_t dma_desc
, dma_buf
;
582 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
586 get_words(&dma_desc
, 1, &desc
->next_desc
);
589 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
590 if (unlikely(!ndesc
)) {
591 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
594 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
595 /* warning!!!! We are retrieving the virtual ptr in the sw_data
596 * field as a 32bit value. Will not work on 64bit machines
598 buf_ptr
= (void *)GET_SW_DATA0(ndesc
);
599 buf_len
= (int)GET_SW_DATA1(desc
);
600 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
601 __free_page(buf_ptr
);
602 knav_pool_desc_put(netcp
->rx_pool
, desc
);
604 /* warning!!!! We are retrieving the virtual ptr in the sw_data
605 * field as a 32bit value. Will not work on 64bit machines
607 buf_ptr
= (void *)GET_SW_DATA0(desc
);
608 buf_len
= (int)GET_SW_DATA1(desc
);
611 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
612 knav_pool_desc_put(netcp
->rx_pool
, desc
);
615 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
617 struct netcp_stats
*rx_stats
= &netcp
->stats
;
618 struct knav_dma_desc
*desc
;
623 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
627 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
628 if (unlikely(!desc
)) {
629 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
631 rx_stats
->rx_errors
++;
634 netcp_free_rx_desc_chain(netcp
, desc
);
635 rx_stats
->rx_dropped
++;
639 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
641 struct netcp_stats
*rx_stats
= &netcp
->stats
;
642 unsigned int dma_sz
, buf_len
, org_buf_len
;
643 struct knav_dma_desc
*desc
, *ndesc
;
644 unsigned int pkt_sz
= 0, accum_sz
;
645 struct netcp_hook_list
*rx_hook
;
646 dma_addr_t dma_desc
, dma_buff
;
647 struct netcp_packet p_info
;
652 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
656 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
657 if (unlikely(!desc
)) {
658 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
662 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
663 /* warning!!!! We are retrieving the virtual ptr in the sw_data
664 * field as a 32bit value. Will not work on 64bit machines
666 org_buf_ptr
= (void *)GET_SW_DATA0(desc
);
667 org_buf_len
= (int)GET_SW_DATA1(desc
);
669 if (unlikely(!org_buf_ptr
)) {
670 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
674 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
676 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
678 /* Build a new sk_buff for the primary buffer */
679 skb
= build_skb(org_buf_ptr
, org_buf_len
);
680 if (unlikely(!skb
)) {
681 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
685 /* update data, tail and len */
686 skb_reserve(skb
, NETCP_SOP_OFFSET
);
687 __skb_put(skb
, buf_len
);
689 /* Fill in the page fragment list */
693 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
694 if (unlikely(!ndesc
)) {
695 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
699 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
700 /* warning!!!! We are retrieving the virtual ptr in the sw_data
701 * field as a 32bit value. Will not work on 64bit machines
703 page
= (struct page
*)GET_SW_DATA0(ndesc
);
705 if (likely(dma_buff
&& buf_len
&& page
)) {
706 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
709 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
710 &dma_buff
, buf_len
, page
);
714 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
715 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
718 /* Free the descriptor */
719 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
722 /* check for packet len and warn */
723 if (unlikely(pkt_sz
!= accum_sz
))
724 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
727 /* Newer version of the Ethernet switch can trim the Ethernet FCS
728 * from the packet and is indicated in hw_cap. So trim it only for
731 if (!(netcp
->hw_cap
& ETH_SW_CAN_REMOVE_ETH_FCS
))
732 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
734 /* Call each of the RX hooks */
736 skb
->dev
= netcp
->ndev
;
737 p_info
.rxtstamp_complete
= false;
738 get_desc_info(&tmp
, &p_info
.eflags
, desc
);
739 p_info
.epib
= desc
->epib
;
740 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
741 p_info
.eflags
= ((p_info
.eflags
>> KNAV_DMA_DESC_EFLAGS_SHIFT
) &
742 KNAV_DMA_DESC_EFLAGS_MASK
);
743 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
746 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
749 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
750 rx_hook
->order
, ret
);
751 /* Free the primary descriptor */
752 rx_stats
->rx_dropped
++;
753 knav_pool_desc_put(netcp
->rx_pool
, desc
);
758 /* Free the primary descriptor */
759 knav_pool_desc_put(netcp
->rx_pool
, desc
);
761 u64_stats_update_begin(&rx_stats
->syncp_rx
);
762 rx_stats
->rx_packets
++;
763 rx_stats
->rx_bytes
+= skb
->len
;
764 u64_stats_update_end(&rx_stats
->syncp_rx
);
766 /* push skb up the stack */
767 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
768 netif_receive_skb(skb
);
772 netcp_free_rx_desc_chain(netcp
, desc
);
773 rx_stats
->rx_errors
++;
777 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
782 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
787 /* Release descriptors and attached buffers from Rx FDQ */
788 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
790 struct knav_dma_desc
*desc
;
791 unsigned int buf_len
, dma_sz
;
795 /* Allocate descriptor */
796 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
797 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
798 if (unlikely(!desc
)) {
799 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
803 get_org_pkt_info(&dma
, &buf_len
, desc
);
804 /* warning!!!! We are retrieving the virtual ptr in the sw_data
805 * field as a 32bit value. Will not work on 64bit machines
807 buf_ptr
= (void *)GET_SW_DATA0(desc
);
809 if (unlikely(!dma
)) {
810 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
811 knav_pool_desc_put(netcp
->rx_pool
, desc
);
815 if (unlikely(!buf_ptr
)) {
816 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
817 knav_pool_desc_put(netcp
->rx_pool
, desc
);
822 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
824 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
826 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
828 __free_page(buf_ptr
);
831 knav_pool_desc_put(netcp
->rx_pool
, desc
);
835 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
839 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
840 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
841 netcp_free_rx_buf(netcp
, i
);
843 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
844 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
845 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
847 knav_pool_destroy(netcp
->rx_pool
);
848 netcp
->rx_pool
= NULL
;
851 static int netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
853 struct knav_dma_desc
*hwdesc
;
854 unsigned int buf_len
, dma_sz
;
855 u32 desc_info
, pkt_info
;
861 /* Allocate descriptor */
862 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
863 if (IS_ERR_OR_NULL(hwdesc
)) {
864 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
868 if (likely(fdq
== 0)) {
869 unsigned int primary_buf_len
;
870 /* Allocate a primary receive queue entry */
871 buf_len
= NETCP_PACKET_SIZE
+ NETCP_SOP_OFFSET
;
872 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
873 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
875 bufptr
= netdev_alloc_frag(primary_buf_len
);
876 sw_data
[1] = primary_buf_len
;
878 if (unlikely(!bufptr
)) {
879 dev_warn_ratelimited(netcp
->ndev_dev
,
880 "Primary RX buffer alloc failed\n");
883 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
885 if (unlikely(dma_mapping_error(netcp
->dev
, dma
)))
888 /* warning!!!! We are saving the virtual ptr in the sw_data
889 * field as a 32bit value. Will not work on 64bit machines
891 sw_data
[0] = (u32
)bufptr
;
893 /* Allocate a secondary receive queue entry */
894 page
= alloc_page(GFP_ATOMIC
| GFP_DMA
);
895 if (unlikely(!page
)) {
896 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
900 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
901 /* warning!!!! We are saving the virtual ptr in the sw_data
902 * field as a 32bit value. Will not work on 64bit machines
904 sw_data
[0] = (u32
)page
;
908 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
909 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
910 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
911 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
912 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
913 KNAV_DMA_DESC_RETQ_SHIFT
;
914 set_org_pkt_info(dma
, buf_len
, hwdesc
);
915 SET_SW_DATA0(sw_data
[0], hwdesc
);
916 SET_SW_DATA1(sw_data
[1], hwdesc
);
917 set_desc_info(desc_info
, pkt_info
, hwdesc
);
920 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
922 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
926 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
930 /* Refill Rx FDQ with descriptors & attached buffers */
931 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
933 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
936 /* Calculate the FDQ deficit and refill */
937 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
938 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
939 knav_queue_get_count(netcp
->rx_fdq
[i
]);
941 while (fdq_deficit
[i
]-- && !ret
)
942 ret
= netcp_allocate_rx_buf(netcp
, i
);
947 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
949 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
951 unsigned int packets
;
953 packets
= netcp_process_rx_packets(netcp
, budget
);
955 netcp_rxpool_refill(netcp
);
956 if (packets
< budget
) {
957 napi_complete_done(&netcp
->rx_napi
, packets
);
958 knav_queue_enable_notify(netcp
->rx_queue
);
964 static void netcp_rx_notify(void *arg
)
966 struct netcp_intf
*netcp
= arg
;
968 knav_queue_disable_notify(netcp
->rx_queue
);
969 napi_schedule(&netcp
->rx_napi
);
972 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
973 struct knav_dma_desc
*desc
,
974 unsigned int desc_sz
)
976 struct knav_dma_desc
*ndesc
= desc
;
977 dma_addr_t dma_desc
, dma_buf
;
978 unsigned int buf_len
;
981 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
983 if (dma_buf
&& buf_len
)
984 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
987 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%pad), len(%d)\n",
990 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
993 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
996 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1001 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
1002 unsigned int budget
)
1004 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1005 struct knav_dma_desc
*desc
;
1006 struct netcp_tx_cb
*tx_cb
;
1007 struct sk_buff
*skb
;
1008 unsigned int dma_sz
;
1013 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
1016 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
1017 if (unlikely(!desc
)) {
1018 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1019 tx_stats
->tx_errors
++;
1023 /* warning!!!! We are retrieving the virtual ptr in the sw_data
1024 * field as a 32bit value. Will not work on 64bit machines
1026 skb
= (struct sk_buff
*)GET_SW_DATA0(desc
);
1027 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
1029 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
1030 tx_stats
->tx_errors
++;
1034 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1035 if (tx_cb
->txtstamp
)
1036 tx_cb
->txtstamp(tx_cb
->ts_context
, skb
);
1038 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
1039 netif_running(netcp
->ndev
) &&
1040 (knav_pool_count(netcp
->tx_pool
) >
1041 netcp
->tx_resume_threshold
)) {
1042 u16 subqueue
= skb_get_queue_mapping(skb
);
1044 netif_wake_subqueue(netcp
->ndev
, subqueue
);
1047 u64_stats_update_begin(&tx_stats
->syncp_tx
);
1048 tx_stats
->tx_packets
++;
1049 tx_stats
->tx_bytes
+= skb
->len
;
1050 u64_stats_update_end(&tx_stats
->syncp_tx
);
1057 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
1060 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
1063 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
1064 if (packets
< budget
) {
1065 napi_complete(&netcp
->tx_napi
);
1066 knav_queue_enable_notify(netcp
->tx_compl_q
);
1072 static void netcp_tx_notify(void *arg
)
1074 struct netcp_intf
*netcp
= arg
;
1076 knav_queue_disable_notify(netcp
->tx_compl_q
);
1077 napi_schedule(&netcp
->tx_napi
);
1080 static struct knav_dma_desc
*
1081 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1083 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1084 unsigned int pkt_len
= skb_headlen(skb
);
1085 struct device
*dev
= netcp
->dev
;
1086 dma_addr_t dma_addr
;
1087 unsigned int dma_sz
;
1090 /* Map the linear buffer */
1091 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1092 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
1093 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1097 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1098 if (IS_ERR_OR_NULL(desc
)) {
1099 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1100 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1104 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1105 if (skb_is_nonlinear(skb
)) {
1106 prefetchw(skb_shinfo(skb
));
1108 desc
->next_desc
= 0;
1114 /* Handle the case where skb is fragmented in pages */
1115 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1116 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1117 struct page
*page
= skb_frag_page(frag
);
1118 u32 page_offset
= skb_frag_off(frag
);
1119 u32 buf_len
= skb_frag_size(frag
);
1120 dma_addr_t desc_dma
;
1123 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1125 if (unlikely(!dma_addr
)) {
1126 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1130 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1131 if (IS_ERR_OR_NULL(ndesc
)) {
1132 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1133 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1137 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
, ndesc
);
1138 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1139 desc_dma_32
= (u32
)desc_dma
;
1140 set_words(&desc_dma_32
, 1, &pdesc
->next_desc
);
1143 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1144 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1148 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1149 &dma_addr
, &dma_sz
);
1151 /* frag list based linkage is not supported for now. */
1152 if (skb_shinfo(skb
)->frag_list
) {
1153 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1158 WARN_ON(pkt_len
!= skb
->len
);
1160 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1161 set_words(&pkt_len
, 1, &desc
->desc_info
);
1165 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1169 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1170 struct sk_buff
*skb
,
1171 struct knav_dma_desc
*desc
)
1173 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1174 struct netcp_hook_list
*tx_hook
;
1175 struct netcp_packet p_info
;
1176 struct netcp_tx_cb
*tx_cb
;
1177 unsigned int dma_sz
;
1182 p_info
.netcp
= netcp
;
1184 p_info
.tx_pipe
= NULL
;
1185 p_info
.psdata_len
= 0;
1186 p_info
.ts_context
= NULL
;
1187 p_info
.txtstamp
= NULL
;
1188 p_info
.epib
= desc
->epib
;
1189 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
1190 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(__le32
));
1192 /* Find out where to inject the packet for transmission */
1193 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1194 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1196 if (unlikely(ret
!= 0)) {
1197 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1198 tx_hook
->order
, ret
);
1199 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1204 /* Make sure some TX hook claimed the packet */
1205 tx_pipe
= p_info
.tx_pipe
;
1207 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1212 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1213 tx_cb
->ts_context
= p_info
.ts_context
;
1214 tx_cb
->txtstamp
= p_info
.txtstamp
;
1216 /* update descriptor */
1217 if (p_info
.psdata_len
) {
1218 /* psdata points to both native-endian and device-endian data */
1219 __le32
*psdata
= (void __force
*)p_info
.psdata
;
1221 set_words((u32
*)psdata
+
1222 (KNAV_DMA_NUM_PS_WORDS
- p_info
.psdata_len
),
1223 p_info
.psdata_len
, psdata
);
1224 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1225 KNAV_DMA_DESC_PSLEN_SHIFT
;
1228 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1229 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1230 KNAV_DMA_DESC_RETQ_SHIFT
);
1232 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1233 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1234 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1237 set_words(&tmp
, 1, &desc
->packet_info
);
1238 /* warning!!!! We are saving the virtual ptr in the sw_data
1239 * field as a 32bit value. Will not work on 64bit machines
1241 SET_SW_DATA0((u32
)skb
, desc
);
1243 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1244 tmp
= tx_pipe
->switch_to_port
;
1245 set_words(&tmp
, 1, &desc
->tag_info
);
1248 /* submit packet descriptor */
1249 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1251 if (unlikely(ret
)) {
1252 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1256 skb_tx_timestamp(skb
);
1257 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1263 /* Submit the packet */
1264 static netdev_tx_t
netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1266 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1267 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1268 int subqueue
= skb_get_queue_mapping(skb
);
1269 struct knav_dma_desc
*desc
;
1270 int desc_count
, ret
= 0;
1272 if (unlikely(skb
->len
<= 0)) {
1274 return NETDEV_TX_OK
;
1277 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1278 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1280 /* If we get here, the skb has already been dropped */
1281 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1283 tx_stats
->tx_dropped
++;
1286 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1289 desc
= netcp_tx_map_skb(skb
, netcp
);
1290 if (unlikely(!desc
)) {
1291 netif_stop_subqueue(ndev
, subqueue
);
1296 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1300 /* Check Tx pool count & stop subqueue if needed */
1301 desc_count
= knav_pool_count(netcp
->tx_pool
);
1302 if (desc_count
< netcp
->tx_pause_threshold
) {
1303 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1304 netif_stop_subqueue(ndev
, subqueue
);
1306 return NETDEV_TX_OK
;
1309 tx_stats
->tx_dropped
++;
1311 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1316 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1318 if (tx_pipe
->dma_channel
) {
1319 knav_dma_close_channel(tx_pipe
->dma_channel
);
1320 tx_pipe
->dma_channel
= NULL
;
1324 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1326 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1328 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1329 struct knav_dma_cfg config
;
1333 memset(&config
, 0, sizeof(config
));
1334 config
.direction
= DMA_MEM_TO_DEV
;
1335 config
.u
.tx
.filt_einfo
= false;
1336 config
.u
.tx
.filt_pswords
= false;
1337 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1339 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1340 tx_pipe
->dma_chan_name
, &config
);
1341 if (IS_ERR(tx_pipe
->dma_channel
)) {
1342 dev_err(dev
, "failed opening tx chan(%s)\n",
1343 tx_pipe
->dma_chan_name
);
1344 ret
= PTR_ERR(tx_pipe
->dma_channel
);
1348 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1349 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1351 if (IS_ERR(tx_pipe
->dma_queue
)) {
1352 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %pe\n",
1353 name
, tx_pipe
->dma_queue
);
1354 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1358 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1362 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1363 knav_dma_close_channel(tx_pipe
->dma_channel
);
1364 tx_pipe
->dma_channel
= NULL
;
1367 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1369 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1370 struct netcp_device
*netcp_device
,
1371 const char *dma_chan_name
, unsigned int dma_queue_id
)
1373 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1374 tx_pipe
->netcp_device
= netcp_device
;
1375 tx_pipe
->dma_chan_name
= dma_chan_name
;
1376 tx_pipe
->dma_queue_id
= dma_queue_id
;
1379 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1381 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1383 enum netcp_addr_type type
)
1385 struct netcp_addr
*naddr
;
1387 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1388 if (naddr
->type
!= type
)
1390 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1398 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1400 enum netcp_addr_type type
)
1402 struct netcp_addr
*naddr
;
1404 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1410 naddr
->netcp
= netcp
;
1412 ether_addr_copy(naddr
->addr
, addr
);
1414 eth_zero_addr(naddr
->addr
);
1415 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1420 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1422 list_del(&naddr
->node
);
1423 devm_kfree(netcp
->dev
, naddr
);
1426 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1428 struct netcp_addr
*naddr
;
1430 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1434 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1435 enum netcp_addr_type type
)
1437 struct netcp_addr
*naddr
;
1439 naddr
= netcp_addr_find(netcp
, addr
, type
);
1441 naddr
->flags
|= ADDR_VALID
;
1445 naddr
= netcp_addr_add(netcp
, addr
, type
);
1446 if (!WARN_ON(!naddr
))
1447 naddr
->flags
|= ADDR_NEW
;
1450 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1452 struct netcp_addr
*naddr
, *tmp
;
1453 struct netcp_intf_modpriv
*priv
;
1454 struct netcp_module
*module
;
1457 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1458 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1460 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1461 naddr
->addr
, naddr
->type
);
1462 for_each_module(netcp
, priv
) {
1463 module
= priv
->netcp_module
;
1464 if (!module
->del_addr
)
1466 error
= module
->del_addr(priv
->module_priv
,
1470 netcp_addr_del(netcp
, naddr
);
1474 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1476 struct netcp_addr
*naddr
, *tmp
;
1477 struct netcp_intf_modpriv
*priv
;
1478 struct netcp_module
*module
;
1481 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1482 if (!(naddr
->flags
& ADDR_NEW
))
1484 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1485 naddr
->addr
, naddr
->type
);
1487 for_each_module(netcp
, priv
) {
1488 module
= priv
->netcp_module
;
1489 if (!module
->add_addr
)
1491 error
= module
->add_addr(priv
->module_priv
, naddr
);
1497 static int netcp_set_promiscuous(struct netcp_intf
*netcp
, bool promisc
)
1499 struct netcp_intf_modpriv
*priv
;
1500 struct netcp_module
*module
;
1503 for_each_module(netcp
, priv
) {
1504 module
= priv
->netcp_module
;
1505 if (!module
->set_rx_mode
)
1508 error
= module
->set_rx_mode(priv
->module_priv
, promisc
);
1515 static void netcp_set_rx_mode(struct net_device
*ndev
)
1517 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1518 struct netdev_hw_addr
*ndev_addr
;
1521 promisc
= (ndev
->flags
& IFF_PROMISC
||
1522 ndev
->flags
& IFF_ALLMULTI
||
1523 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1525 spin_lock(&netcp
->lock
);
1526 /* first clear all marks */
1527 netcp_addr_clear_mark(netcp
);
1529 /* next add new entries, mark existing ones */
1530 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1531 for_each_dev_addr(ndev
, ndev_addr
)
1532 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1533 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1534 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1535 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1536 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1539 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1541 /* finally sweep and callout into modules */
1542 netcp_addr_sweep_del(netcp
);
1543 netcp_addr_sweep_add(netcp
);
1544 netcp_set_promiscuous(netcp
, promisc
);
1545 spin_unlock(&netcp
->lock
);
1548 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1552 if (netcp
->rx_channel
) {
1553 knav_dma_close_channel(netcp
->rx_channel
);
1554 netcp
->rx_channel
= NULL
;
1557 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1558 netcp_rxpool_free(netcp
);
1560 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1561 knav_queue_close(netcp
->rx_queue
);
1562 netcp
->rx_queue
= NULL
;
1565 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1566 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1567 knav_queue_close(netcp
->rx_fdq
[i
]);
1568 netcp
->rx_fdq
[i
] = NULL
;
1571 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1572 knav_queue_close(netcp
->tx_compl_q
);
1573 netcp
->tx_compl_q
= NULL
;
1576 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1577 knav_pool_destroy(netcp
->tx_pool
);
1578 netcp
->tx_pool
= NULL
;
1582 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1584 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1585 struct knav_queue_notify_config notify_cfg
;
1586 struct knav_dma_cfg config
;
1592 /* Create Rx/Tx descriptor pools */
1593 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1594 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1595 netcp
->rx_pool_region_id
);
1596 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1597 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1598 ret
= PTR_ERR(netcp
->rx_pool
);
1602 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1603 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1604 netcp
->tx_pool_region_id
);
1605 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1606 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1607 ret
= PTR_ERR(netcp
->tx_pool
);
1611 /* open Tx completion queue */
1612 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1613 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1614 if (IS_ERR(netcp
->tx_compl_q
)) {
1615 ret
= PTR_ERR(netcp
->tx_compl_q
);
1618 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1620 /* Set notification for Tx completion */
1621 notify_cfg
.fn
= netcp_tx_notify
;
1622 notify_cfg
.fn_arg
= netcp
;
1623 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1624 KNAV_QUEUE_SET_NOTIFIER
,
1625 (unsigned long)¬ify_cfg
);
1629 knav_queue_disable_notify(netcp
->tx_compl_q
);
1631 /* open Rx completion queue */
1632 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1633 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1634 if (IS_ERR(netcp
->rx_queue
)) {
1635 ret
= PTR_ERR(netcp
->rx_queue
);
1638 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1640 /* Set notification for Rx completion */
1641 notify_cfg
.fn
= netcp_rx_notify
;
1642 notify_cfg
.fn_arg
= netcp
;
1643 ret
= knav_queue_device_control(netcp
->rx_queue
,
1644 KNAV_QUEUE_SET_NOTIFIER
,
1645 (unsigned long)¬ify_cfg
);
1649 knav_queue_disable_notify(netcp
->rx_queue
);
1652 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_queue_depths
[i
];
1654 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1655 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1656 if (IS_ERR(netcp
->rx_fdq
[i
])) {
1657 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1662 memset(&config
, 0, sizeof(config
));
1663 config
.direction
= DMA_DEV_TO_MEM
;
1664 config
.u
.rx
.einfo_present
= true;
1665 config
.u
.rx
.psinfo_present
= true;
1666 config
.u
.rx
.err_mode
= DMA_DROP
;
1667 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1668 config
.u
.rx
.psinfo_at_sop
= false;
1669 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1670 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1671 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1673 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1674 if (netcp
->rx_fdq
[i
])
1675 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1676 config
.u
.rx
.fdq
[i
] = last_fdq
;
1679 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1680 netcp
->dma_chan_name
, &config
);
1681 if (IS_ERR(netcp
->rx_channel
)) {
1682 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1683 netcp
->dma_chan_name
);
1684 ret
= PTR_ERR(netcp
->rx_channel
);
1688 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1692 netcp_free_navigator_resources(netcp
);
1696 /* Open the device */
1697 static int netcp_ndo_open(struct net_device
*ndev
)
1699 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1700 struct netcp_intf_modpriv
*intf_modpriv
;
1701 struct netcp_module
*module
;
1704 netif_carrier_off(ndev
);
1705 ret
= netcp_setup_navigator_resources(ndev
);
1707 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1711 for_each_module(netcp
, intf_modpriv
) {
1712 module
= intf_modpriv
->netcp_module
;
1714 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1716 dev_err(netcp
->ndev_dev
, "module open failed\n");
1722 napi_enable(&netcp
->rx_napi
);
1723 napi_enable(&netcp
->tx_napi
);
1724 knav_queue_enable_notify(netcp
->tx_compl_q
);
1725 knav_queue_enable_notify(netcp
->rx_queue
);
1726 netcp_rxpool_refill(netcp
);
1727 netif_tx_wake_all_queues(ndev
);
1728 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1732 for_each_module(netcp
, intf_modpriv
) {
1733 module
= intf_modpriv
->netcp_module
;
1735 module
->close(intf_modpriv
->module_priv
, ndev
);
1739 netcp_free_navigator_resources(netcp
);
1743 /* Close the device */
1744 static int netcp_ndo_stop(struct net_device
*ndev
)
1746 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1747 struct netcp_intf_modpriv
*intf_modpriv
;
1748 struct netcp_module
*module
;
1751 netif_tx_stop_all_queues(ndev
);
1752 netif_carrier_off(ndev
);
1753 netcp_addr_clear_mark(netcp
);
1754 netcp_addr_sweep_del(netcp
);
1755 knav_queue_disable_notify(netcp
->rx_queue
);
1756 knav_queue_disable_notify(netcp
->tx_compl_q
);
1757 napi_disable(&netcp
->rx_napi
);
1758 napi_disable(&netcp
->tx_napi
);
1760 for_each_module(netcp
, intf_modpriv
) {
1761 module
= intf_modpriv
->netcp_module
;
1762 if (module
->close
) {
1763 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1765 dev_err(netcp
->ndev_dev
, "Close failed\n");
1769 /* Recycle Rx descriptors from completion queue */
1770 netcp_empty_rx_queue(netcp
);
1772 /* Recycle Tx descriptors from completion queue */
1773 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1775 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1776 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1777 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1779 netcp_free_navigator_resources(netcp
);
1780 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1784 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1785 struct ifreq
*req
, int cmd
)
1787 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1788 struct netcp_intf_modpriv
*intf_modpriv
;
1789 struct netcp_module
*module
;
1790 int ret
= -1, err
= -EOPNOTSUPP
;
1792 if (!netif_running(ndev
))
1795 for_each_module(netcp
, intf_modpriv
) {
1796 module
= intf_modpriv
->netcp_module
;
1800 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1801 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1810 return (ret
== 0) ? 0 : err
;
1813 static void netcp_ndo_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1815 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1816 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1818 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1819 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1820 netif_trans_update(ndev
);
1821 netif_tx_wake_all_queues(ndev
);
1824 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1826 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1827 struct netcp_intf_modpriv
*intf_modpriv
;
1828 struct netcp_module
*module
;
1829 unsigned long flags
;
1832 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1834 spin_lock_irqsave(&netcp
->lock
, flags
);
1835 for_each_module(netcp
, intf_modpriv
) {
1836 module
= intf_modpriv
->netcp_module
;
1837 if ((module
->add_vid
) && (vid
!= 0)) {
1838 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1840 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1846 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1851 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1853 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1854 struct netcp_intf_modpriv
*intf_modpriv
;
1855 struct netcp_module
*module
;
1856 unsigned long flags
;
1859 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1861 spin_lock_irqsave(&netcp
->lock
, flags
);
1862 for_each_module(netcp
, intf_modpriv
) {
1863 module
= intf_modpriv
->netcp_module
;
1864 if (module
->del_vid
) {
1865 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1867 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1873 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1877 static int netcp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1880 struct tc_mqprio_qopt
*mqprio
= type_data
;
1884 /* setup tc must be called under rtnl lock */
1887 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1890 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
1891 num_tc
= mqprio
->num_tc
;
1893 /* Sanity-check the number of traffic classes requested */
1894 if ((dev
->real_num_tx_queues
<= 1) ||
1895 (dev
->real_num_tx_queues
< num_tc
))
1898 /* Configure traffic class to queue mappings */
1900 netdev_set_num_tc(dev
, num_tc
);
1901 for (i
= 0; i
< num_tc
; i
++)
1902 netdev_set_tc_queue(dev
, i
, 1, i
);
1904 netdev_reset_tc(dev
);
1911 netcp_get_stats(struct net_device
*ndev
, struct rtnl_link_stats64
*stats
)
1913 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1914 struct netcp_stats
*p
= &netcp
->stats
;
1915 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1919 start
= u64_stats_fetch_begin(&p
->syncp_rx
);
1920 rxpackets
= p
->rx_packets
;
1921 rxbytes
= p
->rx_bytes
;
1922 } while (u64_stats_fetch_retry(&p
->syncp_rx
, start
));
1925 start
= u64_stats_fetch_begin(&p
->syncp_tx
);
1926 txpackets
= p
->tx_packets
;
1927 txbytes
= p
->tx_bytes
;
1928 } while (u64_stats_fetch_retry(&p
->syncp_tx
, start
));
1930 stats
->rx_packets
= rxpackets
;
1931 stats
->rx_bytes
= rxbytes
;
1932 stats
->tx_packets
= txpackets
;
1933 stats
->tx_bytes
= txbytes
;
1935 /* The following are stored as 32 bit */
1936 stats
->rx_errors
= p
->rx_errors
;
1937 stats
->rx_dropped
= p
->rx_dropped
;
1938 stats
->tx_dropped
= p
->tx_dropped
;
1941 static const struct net_device_ops netcp_netdev_ops
= {
1942 .ndo_open
= netcp_ndo_open
,
1943 .ndo_stop
= netcp_ndo_stop
,
1944 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1945 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1946 .ndo_eth_ioctl
= netcp_ndo_ioctl
,
1947 .ndo_get_stats64
= netcp_get_stats
,
1948 .ndo_set_mac_address
= eth_mac_addr
,
1949 .ndo_validate_addr
= eth_validate_addr
,
1950 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1951 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1952 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1953 .ndo_select_queue
= dev_pick_tx_zero
,
1954 .ndo_setup_tc
= netcp_setup_tc
,
1957 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1958 struct device_node
*node_interface
)
1960 struct device
*dev
= netcp_device
->device
;
1961 struct device_node
*node
= dev
->of_node
;
1962 struct netcp_intf
*netcp
;
1963 struct net_device
*ndev
;
1964 resource_size_t size
;
1965 struct resource res
;
1966 void __iomem
*efuse
= NULL
;
1968 u8 efuse_mac_addr
[6];
1972 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1974 dev_err(dev
, "Error allocating netdev\n");
1978 ndev
->features
|= NETIF_F_SG
;
1979 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1980 ndev
->hw_features
= ndev
->features
;
1981 ndev
->vlan_features
|= NETIF_F_SG
;
1983 /* MTU range: 68 - 9486 */
1984 ndev
->min_mtu
= ETH_MIN_MTU
;
1985 ndev
->max_mtu
= NETCP_MAX_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
);
1987 netcp
= netdev_priv(ndev
);
1988 spin_lock_init(&netcp
->lock
);
1989 INIT_LIST_HEAD(&netcp
->module_head
);
1990 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
1991 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
1992 INIT_LIST_HEAD(&netcp
->addr_list
);
1993 u64_stats_init(&netcp
->stats
.syncp_rx
);
1994 u64_stats_init(&netcp
->stats
.syncp_tx
);
1995 netcp
->netcp_device
= netcp_device
;
1996 netcp
->dev
= netcp_device
->device
;
1998 netcp
->ndev_dev
= &ndev
->dev
;
1999 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
2000 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
2001 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
2002 netcp
->node_interface
= node_interface
;
2004 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
2006 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
2007 dev_err(dev
, "could not find efuse-mac reg resource\n");
2011 size
= resource_size(&res
);
2013 if (!devm_request_mem_region(dev
, res
.start
, size
,
2015 dev_err(dev
, "could not reserve resource\n");
2020 efuse
= devm_ioremap(dev
, res
.start
, size
);
2022 dev_err(dev
, "could not map resource\n");
2023 devm_release_mem_region(dev
, res
.start
, size
);
2028 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
, efuse_mac
);
2029 if (is_valid_ether_addr(efuse_mac_addr
))
2030 eth_hw_addr_set(ndev
, efuse_mac_addr
);
2032 eth_hw_addr_random(ndev
);
2034 devm_iounmap(dev
, efuse
);
2035 devm_release_mem_region(dev
, res
.start
, size
);
2037 ret
= of_get_ethdev_address(node_interface
, ndev
);
2039 eth_hw_addr_random(ndev
);
2042 ret
= of_property_read_string(node_interface
, "rx-channel",
2043 &netcp
->dma_chan_name
);
2045 dev_err(dev
, "missing \"rx-channel\" parameter\n");
2050 ret
= of_property_read_u32(node_interface
, "rx-queue",
2051 &netcp
->rx_queue_id
);
2053 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
2054 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
2057 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
2058 netcp
->rx_queue_depths
,
2059 KNAV_DMA_FDQ_PER_CHAN
);
2061 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
2062 netcp
->rx_queue_depths
[0] = 128;
2065 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
2067 dev_err(dev
, "missing \"rx-pool\" parameter\n");
2071 netcp
->rx_pool_size
= temp
[0];
2072 netcp
->rx_pool_region_id
= temp
[1];
2074 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
2076 dev_err(dev
, "missing \"tx-pool\" parameter\n");
2080 netcp
->tx_pool_size
= temp
[0];
2081 netcp
->tx_pool_region_id
= temp
[1];
2083 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
2084 dev_err(dev
, "tx-pool size too small, must be at least %u\n",
2085 (unsigned int)MAX_SKB_FRAGS
);
2090 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
2091 &netcp
->tx_compl_qid
);
2093 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
2094 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
2098 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
);
2099 netif_napi_add_tx(ndev
, &netcp
->tx_napi
, netcp_tx_poll
);
2101 /* Register the network device */
2103 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
2104 ndev
->netdev_ops
= &netcp_netdev_ops
;
2105 SET_NETDEV_DEV(ndev
, dev
);
2107 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2115 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2116 struct net_device
*ndev
)
2118 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2119 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2120 struct netcp_module
*module
;
2122 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2125 /* Notify each of the modules that the interface is going away */
2126 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2128 module
= intf_modpriv
->netcp_module
;
2129 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2131 if (module
->release
)
2132 module
->release(intf_modpriv
->module_priv
);
2133 list_del(&intf_modpriv
->intf_list
);
2135 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2138 list_del(&netcp
->interface_list
);
2140 of_node_put(netcp
->node_interface
);
2141 unregister_netdev(ndev
);
2145 static int netcp_probe(struct platform_device
*pdev
)
2147 struct device_node
*node
= pdev
->dev
.of_node
;
2148 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2149 struct device_node
*child
, *interfaces
;
2150 struct netcp_device
*netcp_device
;
2151 struct device
*dev
= &pdev
->dev
;
2152 struct netcp_module
*module
;
2155 if (!knav_dma_device_ready() ||
2156 !knav_qmss_device_ready())
2157 return -EPROBE_DEFER
;
2160 dev_err(dev
, "could not find device info\n");
2164 /* Allocate a new NETCP device instance */
2165 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2169 pm_runtime_enable(&pdev
->dev
);
2170 ret
= pm_runtime_get_sync(&pdev
->dev
);
2172 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2173 pm_runtime_disable(&pdev
->dev
);
2177 /* Initialize the NETCP device instance */
2178 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2179 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2180 netcp_device
->device
= dev
;
2181 platform_set_drvdata(pdev
, netcp_device
);
2183 /* create interfaces */
2184 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2186 dev_err(dev
, "could not find netcp-interfaces node\n");
2191 for_each_available_child_of_node(interfaces
, child
) {
2192 ret
= netcp_create_interface(netcp_device
, child
);
2194 dev_err(dev
, "could not create interface(%pOFn)\n",
2196 goto probe_quit_interface
;
2200 of_node_put(interfaces
);
2202 /* Add the device instance to the list */
2203 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2205 /* Probe & attach any modules already registered */
2206 mutex_lock(&netcp_modules_lock
);
2207 for_each_netcp_module(module
) {
2208 ret
= netcp_module_probe(netcp_device
, module
);
2210 dev_err(dev
, "module(%s) probe failed\n", module
->name
);
2212 mutex_unlock(&netcp_modules_lock
);
2215 probe_quit_interface
:
2216 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2217 &netcp_device
->interface_head
,
2219 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2222 of_node_put(interfaces
);
2225 pm_runtime_put_sync(&pdev
->dev
);
2226 pm_runtime_disable(&pdev
->dev
);
2227 platform_set_drvdata(pdev
, NULL
);
2231 static void netcp_remove(struct platform_device
*pdev
)
2233 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2234 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2235 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2236 struct netcp_module
*module
;
2238 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2240 module
= inst_modpriv
->netcp_module
;
2241 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2242 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2243 list_del(&inst_modpriv
->inst_list
);
2246 /* now that all modules are removed, clean up the interfaces */
2247 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2248 &netcp_device
->interface_head
,
2250 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2253 WARN(!list_empty(&netcp_device
->interface_head
),
2254 "%s interface list not empty!\n", pdev
->name
);
2256 pm_runtime_put_sync(&pdev
->dev
);
2257 pm_runtime_disable(&pdev
->dev
);
2258 platform_set_drvdata(pdev
, NULL
);
2261 static const struct of_device_id of_match
[] = {
2262 { .compatible
= "ti,netcp-1.0", },
2265 MODULE_DEVICE_TABLE(of
, of_match
);
2267 static struct platform_driver netcp_driver
= {
2269 .name
= "netcp-1.0",
2270 .of_match_table
= of_match
,
2272 .probe
= netcp_probe
,
2273 .remove
= netcp_remove
,
2275 module_platform_driver(netcp_driver
);
2277 MODULE_LICENSE("GPL v2");
2278 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2279 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");