2 * Keystone NetCP Core driver
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/module.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/knav_qmss.h>
30 #include <linux/soc/ti/knav_dma.h>
34 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35 #define NETCP_NAPI_WEIGHT 64
36 #define NETCP_TX_TIMEOUT (5 * HZ)
37 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
38 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
39 #define NETCP_MAX_MCAST_ADDR 16
41 #define NETCP_EFUSE_REG_INDEX 0
43 #define NETCP_MOD_PROBE_SKIPPED 1
44 #define NETCP_MOD_PROBE_FAILED 2
46 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
47 NETIF_MSG_DRV | NETIF_MSG_LINK | \
48 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
49 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
50 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
51 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
52 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
55 #define NETCP_EFUSE_ADDR_SWAP 2
57 #define knav_queue_get_id(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
60 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
61 KNAV_QUEUE_ENABLE_NOTIFY, \
64 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
65 KNAV_QUEUE_DISABLE_NOTIFY, \
68 #define knav_queue_get_count(q) knav_queue_device_control(q, \
69 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
71 #define for_each_netcp_module(module) \
72 list_for_each_entry(module, &netcp_modules, module_list)
74 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
75 list_for_each_entry(inst_modpriv, \
76 &((netcp_device)->modpriv_head), inst_list)
78 #define for_each_module(netcp, intf_modpriv) \
79 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
81 /* Module management structures */
83 struct list_head device_list
;
84 struct list_head interface_head
;
85 struct list_head modpriv_head
;
86 struct device
*device
;
89 struct netcp_inst_modpriv
{
90 struct netcp_device
*netcp_device
;
91 struct netcp_module
*netcp_module
;
92 struct list_head inst_list
;
96 struct netcp_intf_modpriv
{
97 struct netcp_intf
*netcp_priv
;
98 struct netcp_module
*netcp_module
;
99 struct list_head intf_list
;
105 void (*txtstamp
)(void *context
, struct sk_buff
*skb
);
108 static LIST_HEAD(netcp_devices
);
109 static LIST_HEAD(netcp_modules
);
110 static DEFINE_MUTEX(netcp_modules_lock
);
112 static int netcp_debug_level
= -1;
113 module_param(netcp_debug_level
, int, 0);
114 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
116 /* Helper functions - Get/Set */
117 static void get_pkt_info(dma_addr_t
*buff
, u32
*buff_len
, dma_addr_t
*ndesc
,
118 struct knav_dma_desc
*desc
)
120 *buff_len
= le32_to_cpu(desc
->buff_len
);
121 *buff
= le32_to_cpu(desc
->buff
);
122 *ndesc
= le32_to_cpu(desc
->next_desc
);
125 static void get_desc_info(u32
*desc_info
, u32
*pkt_info
,
126 struct knav_dma_desc
*desc
)
128 *desc_info
= le32_to_cpu(desc
->desc_info
);
129 *pkt_info
= le32_to_cpu(desc
->packet_info
);
132 static u32
get_sw_data(int index
, struct knav_dma_desc
*desc
)
134 /* No Endian conversion needed as this data is untouched by hw */
135 return desc
->sw_data
[index
];
138 /* use these macros to get sw data */
139 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
140 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
141 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
142 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
144 static void get_org_pkt_info(dma_addr_t
*buff
, u32
*buff_len
,
145 struct knav_dma_desc
*desc
)
147 *buff
= le32_to_cpu(desc
->orig_buff
);
148 *buff_len
= le32_to_cpu(desc
->orig_len
);
151 static void get_words(dma_addr_t
*words
, int num_words
, __le32
*desc
)
155 for (i
= 0; i
< num_words
; i
++)
156 words
[i
] = le32_to_cpu(desc
[i
]);
159 static void set_pkt_info(dma_addr_t buff
, u32 buff_len
, u32 ndesc
,
160 struct knav_dma_desc
*desc
)
162 desc
->buff_len
= cpu_to_le32(buff_len
);
163 desc
->buff
= cpu_to_le32(buff
);
164 desc
->next_desc
= cpu_to_le32(ndesc
);
167 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
168 struct knav_dma_desc
*desc
)
170 desc
->desc_info
= cpu_to_le32(desc_info
);
171 desc
->packet_info
= cpu_to_le32(pkt_info
);
174 static void set_sw_data(int index
, u32 data
, struct knav_dma_desc
*desc
)
176 /* No Endian conversion needed as this data is untouched by hw */
177 desc
->sw_data
[index
] = data
;
180 /* use these macros to set sw data */
181 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
182 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
183 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
184 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
186 static void set_org_pkt_info(dma_addr_t buff
, u32 buff_len
,
187 struct knav_dma_desc
*desc
)
189 desc
->orig_buff
= cpu_to_le32(buff
);
190 desc
->orig_len
= cpu_to_le32(buff_len
);
193 static void set_words(u32
*words
, int num_words
, __le32
*desc
)
197 for (i
= 0; i
< num_words
; i
++)
198 desc
[i
] = cpu_to_le32(words
[i
]);
201 /* Read the e-fuse value as 32 bit values to be endian independent */
202 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
, u32 swap
)
204 unsigned int addr0
, addr1
;
206 addr1
= readl(efuse_mac
+ 4);
207 addr0
= readl(efuse_mac
);
210 case NETCP_EFUSE_ADDR_SWAP
:
212 addr1
= readl(efuse_mac
);
218 x
[0] = (addr1
& 0x0000ff00) >> 8;
219 x
[1] = addr1
& 0x000000ff;
220 x
[2] = (addr0
& 0xff000000) >> 24;
221 x
[3] = (addr0
& 0x00ff0000) >> 16;
222 x
[4] = (addr0
& 0x0000ff00) >> 8;
223 x
[5] = addr0
& 0x000000ff;
228 static const char *netcp_node_name(struct device_node
*node
)
232 if (of_property_read_string(node
, "label", &name
) < 0)
239 /* Module management routines */
240 static int netcp_register_interface(struct netcp_intf
*netcp
)
244 ret
= register_netdev(netcp
->ndev
);
246 netcp
->netdev_registered
= true;
250 static int netcp_module_probe(struct netcp_device
*netcp_device
,
251 struct netcp_module
*module
)
253 struct device
*dev
= netcp_device
->device
;
254 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
255 struct device_node
*child
;
256 struct netcp_inst_modpriv
*inst_modpriv
;
257 struct netcp_intf
*netcp_intf
;
258 struct netcp_module
*tmp
;
259 bool primary_module_registered
= false;
262 /* Find this module in the sub-tree for this device */
263 devices
= of_get_child_by_name(node
, "netcp-devices");
265 dev_err(dev
, "could not find netcp-devices node\n");
266 return NETCP_MOD_PROBE_SKIPPED
;
269 for_each_available_child_of_node(devices
, child
) {
270 const char *name
= netcp_node_name(child
);
272 if (!strcasecmp(module
->name
, name
))
276 of_node_put(devices
);
277 /* If module not used for this device, skip it */
279 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
280 return NETCP_MOD_PROBE_SKIPPED
;
283 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
289 inst_modpriv
->netcp_device
= netcp_device
;
290 inst_modpriv
->netcp_module
= module
;
291 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
293 ret
= module
->probe(netcp_device
, dev
, child
,
294 &inst_modpriv
->module_priv
);
297 dev_err(dev
, "Probe of module(%s) failed with %d\n",
299 list_del(&inst_modpriv
->inst_list
);
300 devm_kfree(dev
, inst_modpriv
);
301 return NETCP_MOD_PROBE_FAILED
;
304 /* Attach modules only if the primary module is probed */
305 for_each_netcp_module(tmp
) {
307 primary_module_registered
= true;
310 if (!primary_module_registered
)
313 /* Attach module to interfaces */
314 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
316 struct netcp_intf_modpriv
*intf_modpriv
;
318 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
323 interface
= of_parse_phandle(netcp_intf
->node_interface
,
327 devm_kfree(dev
, intf_modpriv
);
331 intf_modpriv
->netcp_priv
= netcp_intf
;
332 intf_modpriv
->netcp_module
= module
;
333 list_add_tail(&intf_modpriv
->intf_list
,
334 &netcp_intf
->module_head
);
336 ret
= module
->attach(inst_modpriv
->module_priv
,
337 netcp_intf
->ndev
, interface
,
338 &intf_modpriv
->module_priv
);
339 of_node_put(interface
);
341 dev_dbg(dev
, "Attach of module %s declined with %d\n",
343 list_del(&intf_modpriv
->intf_list
);
344 devm_kfree(dev
, intf_modpriv
);
349 /* Now register the interface with netdev */
350 list_for_each_entry(netcp_intf
,
351 &netcp_device
->interface_head
,
353 /* If interface not registered then register now */
354 if (!netcp_intf
->netdev_registered
) {
355 ret
= netcp_register_interface(netcp_intf
);
363 int netcp_register_module(struct netcp_module
*module
)
365 struct netcp_device
*netcp_device
;
366 struct netcp_module
*tmp
;
370 WARN(1, "error registering netcp module: no name\n");
374 if (!module
->probe
) {
375 WARN(1, "error registering netcp module: no probe\n");
379 mutex_lock(&netcp_modules_lock
);
381 for_each_netcp_module(tmp
) {
382 if (!strcasecmp(tmp
->name
, module
->name
)) {
383 mutex_unlock(&netcp_modules_lock
);
387 list_add_tail(&module
->module_list
, &netcp_modules
);
389 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
390 ret
= netcp_module_probe(netcp_device
, module
);
394 mutex_unlock(&netcp_modules_lock
);
398 mutex_unlock(&netcp_modules_lock
);
399 netcp_unregister_module(module
);
402 EXPORT_SYMBOL_GPL(netcp_register_module
);
404 static void netcp_release_module(struct netcp_device
*netcp_device
,
405 struct netcp_module
*module
)
407 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
408 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
409 struct device
*dev
= netcp_device
->device
;
411 /* Release the module from each interface */
412 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
413 &netcp_device
->interface_head
,
415 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
417 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
418 &netcp_intf
->module_head
,
420 if (intf_modpriv
->netcp_module
== module
) {
421 module
->release(intf_modpriv
->module_priv
);
422 list_del(&intf_modpriv
->intf_list
);
423 devm_kfree(dev
, intf_modpriv
);
429 /* Remove the module from each instance */
430 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
431 &netcp_device
->modpriv_head
, inst_list
) {
432 if (inst_modpriv
->netcp_module
== module
) {
433 module
->remove(netcp_device
,
434 inst_modpriv
->module_priv
);
435 list_del(&inst_modpriv
->inst_list
);
436 devm_kfree(dev
, inst_modpriv
);
442 void netcp_unregister_module(struct netcp_module
*module
)
444 struct netcp_device
*netcp_device
;
445 struct netcp_module
*module_tmp
;
447 mutex_lock(&netcp_modules_lock
);
449 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
450 netcp_release_module(netcp_device
, module
);
453 /* Remove the module from the module list */
454 for_each_netcp_module(module_tmp
) {
455 if (module
== module_tmp
) {
456 list_del(&module
->module_list
);
461 mutex_unlock(&netcp_modules_lock
);
463 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
465 void *netcp_module_get_intf_data(struct netcp_module
*module
,
466 struct netcp_intf
*intf
)
468 struct netcp_intf_modpriv
*intf_modpriv
;
470 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
471 if (intf_modpriv
->netcp_module
== module
)
472 return intf_modpriv
->module_priv
;
475 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
477 /* Module TX and RX Hook management */
478 struct netcp_hook_list
{
479 struct list_head list
;
480 netcp_hook_rtn
*hook_rtn
;
485 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
486 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
488 struct netcp_hook_list
*entry
;
489 struct netcp_hook_list
*next
;
492 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
496 entry
->hook_rtn
= hook_rtn
;
497 entry
->hook_data
= hook_data
;
498 entry
->order
= order
;
500 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
501 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
502 if (next
->order
> order
)
505 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
506 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
510 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
512 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
513 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
515 struct netcp_hook_list
*next
, *n
;
518 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
519 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
520 if ((next
->order
== order
) &&
521 (next
->hook_rtn
== hook_rtn
) &&
522 (next
->hook_data
== hook_data
)) {
523 list_del(&next
->list
);
524 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
525 devm_kfree(netcp_priv
->dev
, next
);
529 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
532 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
534 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
535 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
537 struct netcp_hook_list
*entry
;
538 struct netcp_hook_list
*next
;
541 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
545 entry
->hook_rtn
= hook_rtn
;
546 entry
->hook_data
= hook_data
;
547 entry
->order
= order
;
549 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
550 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
551 if (next
->order
> order
)
554 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
555 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
559 EXPORT_SYMBOL_GPL(netcp_register_rxhook
);
561 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
562 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
564 struct netcp_hook_list
*next
, *n
;
567 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
568 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
569 if ((next
->order
== order
) &&
570 (next
->hook_rtn
== hook_rtn
) &&
571 (next
->hook_data
== hook_data
)) {
572 list_del(&next
->list
);
573 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
574 devm_kfree(netcp_priv
->dev
, next
);
578 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
582 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook
);
584 static void netcp_frag_free(bool is_frag
, void *ptr
)
592 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
593 struct knav_dma_desc
*desc
)
595 struct knav_dma_desc
*ndesc
;
596 dma_addr_t dma_desc
, dma_buf
;
597 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
601 get_words(&dma_desc
, 1, &desc
->next_desc
);
604 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
605 if (unlikely(!ndesc
)) {
606 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
609 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
610 /* warning!!!! We are retrieving the virtual ptr in the sw_data
611 * field as a 32bit value. Will not work on 64bit machines
613 buf_ptr
= (void *)GET_SW_DATA0(ndesc
);
614 buf_len
= (int)GET_SW_DATA1(desc
);
615 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
616 __free_page(buf_ptr
);
617 knav_pool_desc_put(netcp
->rx_pool
, desc
);
619 /* warning!!!! We are retrieving the virtual ptr in the sw_data
620 * field as a 32bit value. Will not work on 64bit machines
622 buf_ptr
= (void *)GET_SW_DATA0(desc
);
623 buf_len
= (int)GET_SW_DATA1(desc
);
626 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
627 knav_pool_desc_put(netcp
->rx_pool
, desc
);
630 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
632 struct netcp_stats
*rx_stats
= &netcp
->stats
;
633 struct knav_dma_desc
*desc
;
638 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
642 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
643 if (unlikely(!desc
)) {
644 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
646 rx_stats
->rx_errors
++;
649 netcp_free_rx_desc_chain(netcp
, desc
);
650 rx_stats
->rx_dropped
++;
654 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
656 struct netcp_stats
*rx_stats
= &netcp
->stats
;
657 unsigned int dma_sz
, buf_len
, org_buf_len
;
658 struct knav_dma_desc
*desc
, *ndesc
;
659 unsigned int pkt_sz
= 0, accum_sz
;
660 struct netcp_hook_list
*rx_hook
;
661 dma_addr_t dma_desc
, dma_buff
;
662 struct netcp_packet p_info
;
667 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
671 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
672 if (unlikely(!desc
)) {
673 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
677 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
678 /* warning!!!! We are retrieving the virtual ptr in the sw_data
679 * field as a 32bit value. Will not work on 64bit machines
681 org_buf_ptr
= (void *)GET_SW_DATA0(desc
);
682 org_buf_len
= (int)GET_SW_DATA1(desc
);
684 if (unlikely(!org_buf_ptr
)) {
685 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
689 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
691 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
693 /* Build a new sk_buff for the primary buffer */
694 skb
= build_skb(org_buf_ptr
, org_buf_len
);
695 if (unlikely(!skb
)) {
696 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
700 /* update data, tail and len */
701 skb_reserve(skb
, NETCP_SOP_OFFSET
);
702 __skb_put(skb
, buf_len
);
704 /* Fill in the page fragment list */
708 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
709 if (unlikely(!ndesc
)) {
710 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
714 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
715 /* warning!!!! We are retrieving the virtual ptr in the sw_data
716 * field as a 32bit value. Will not work on 64bit machines
718 page
= (struct page
*)GET_SW_DATA0(ndesc
);
720 if (likely(dma_buff
&& buf_len
&& page
)) {
721 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
724 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
725 &dma_buff
, buf_len
, page
);
729 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
730 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
733 /* Free the descriptor */
734 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
737 /* check for packet len and warn */
738 if (unlikely(pkt_sz
!= accum_sz
))
739 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
742 /* Newer version of the Ethernet switch can trim the Ethernet FCS
743 * from the packet and is indicated in hw_cap. So trim it only for
746 if (!(netcp
->hw_cap
& ETH_SW_CAN_REMOVE_ETH_FCS
))
747 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
749 /* Call each of the RX hooks */
751 skb
->dev
= netcp
->ndev
;
752 p_info
.rxtstamp_complete
= false;
753 get_desc_info(&tmp
, &p_info
.eflags
, desc
);
754 p_info
.epib
= desc
->epib
;
755 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
756 p_info
.eflags
= ((p_info
.eflags
>> KNAV_DMA_DESC_EFLAGS_SHIFT
) &
757 KNAV_DMA_DESC_EFLAGS_MASK
);
758 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
761 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
764 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
765 rx_hook
->order
, ret
);
766 /* Free the primary descriptor */
767 rx_stats
->rx_dropped
++;
768 knav_pool_desc_put(netcp
->rx_pool
, desc
);
773 /* Free the primary descriptor */
774 knav_pool_desc_put(netcp
->rx_pool
, desc
);
776 u64_stats_update_begin(&rx_stats
->syncp_rx
);
777 rx_stats
->rx_packets
++;
778 rx_stats
->rx_bytes
+= skb
->len
;
779 u64_stats_update_end(&rx_stats
->syncp_rx
);
781 /* push skb up the stack */
782 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
783 netif_receive_skb(skb
);
787 netcp_free_rx_desc_chain(netcp
, desc
);
788 rx_stats
->rx_errors
++;
792 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
797 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
802 /* Release descriptors and attached buffers from Rx FDQ */
803 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
805 struct knav_dma_desc
*desc
;
806 unsigned int buf_len
, dma_sz
;
810 /* Allocate descriptor */
811 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
812 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
813 if (unlikely(!desc
)) {
814 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
818 get_org_pkt_info(&dma
, &buf_len
, desc
);
819 /* warning!!!! We are retrieving the virtual ptr in the sw_data
820 * field as a 32bit value. Will not work on 64bit machines
822 buf_ptr
= (void *)GET_SW_DATA0(desc
);
824 if (unlikely(!dma
)) {
825 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
826 knav_pool_desc_put(netcp
->rx_pool
, desc
);
830 if (unlikely(!buf_ptr
)) {
831 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
832 knav_pool_desc_put(netcp
->rx_pool
, desc
);
837 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
839 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
841 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
843 __free_page(buf_ptr
);
846 knav_pool_desc_put(netcp
->rx_pool
, desc
);
850 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
854 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
855 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
856 netcp_free_rx_buf(netcp
, i
);
858 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
859 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
860 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
862 knav_pool_destroy(netcp
->rx_pool
);
863 netcp
->rx_pool
= NULL
;
866 static int netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
868 struct knav_dma_desc
*hwdesc
;
869 unsigned int buf_len
, dma_sz
;
870 u32 desc_info
, pkt_info
;
876 /* Allocate descriptor */
877 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
878 if (IS_ERR_OR_NULL(hwdesc
)) {
879 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
883 if (likely(fdq
== 0)) {
884 unsigned int primary_buf_len
;
885 /* Allocate a primary receive queue entry */
886 buf_len
= NETCP_PACKET_SIZE
+ NETCP_SOP_OFFSET
;
887 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
888 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
890 bufptr
= netdev_alloc_frag(primary_buf_len
);
891 sw_data
[1] = primary_buf_len
;
893 if (unlikely(!bufptr
)) {
894 dev_warn_ratelimited(netcp
->ndev_dev
,
895 "Primary RX buffer alloc failed\n");
898 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
900 if (unlikely(dma_mapping_error(netcp
->dev
, dma
)))
903 /* warning!!!! We are saving the virtual ptr in the sw_data
904 * field as a 32bit value. Will not work on 64bit machines
906 sw_data
[0] = (u32
)bufptr
;
908 /* Allocate a secondary receive queue entry */
909 page
= alloc_page(GFP_ATOMIC
| GFP_DMA
);
910 if (unlikely(!page
)) {
911 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
915 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
916 /* warning!!!! We are saving the virtual ptr in the sw_data
917 * field as a 32bit value. Will not work on 64bit machines
919 sw_data
[0] = (u32
)page
;
923 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
924 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
925 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
926 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
927 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
928 KNAV_DMA_DESC_RETQ_SHIFT
;
929 set_org_pkt_info(dma
, buf_len
, hwdesc
);
930 SET_SW_DATA0(sw_data
[0], hwdesc
);
931 SET_SW_DATA1(sw_data
[1], hwdesc
);
932 set_desc_info(desc_info
, pkt_info
, hwdesc
);
935 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
937 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
941 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
945 /* Refill Rx FDQ with descriptors & attached buffers */
946 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
948 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
951 /* Calculate the FDQ deficit and refill */
952 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
953 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
954 knav_queue_get_count(netcp
->rx_fdq
[i
]);
956 while (fdq_deficit
[i
]-- && !ret
)
957 ret
= netcp_allocate_rx_buf(netcp
, i
);
962 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
964 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
966 unsigned int packets
;
968 packets
= netcp_process_rx_packets(netcp
, budget
);
970 netcp_rxpool_refill(netcp
);
971 if (packets
< budget
) {
972 napi_complete_done(&netcp
->rx_napi
, packets
);
973 knav_queue_enable_notify(netcp
->rx_queue
);
979 static void netcp_rx_notify(void *arg
)
981 struct netcp_intf
*netcp
= arg
;
983 knav_queue_disable_notify(netcp
->rx_queue
);
984 napi_schedule(&netcp
->rx_napi
);
987 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
988 struct knav_dma_desc
*desc
,
989 unsigned int desc_sz
)
991 struct knav_dma_desc
*ndesc
= desc
;
992 dma_addr_t dma_desc
, dma_buf
;
993 unsigned int buf_len
;
996 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
998 if (dma_buf
&& buf_len
)
999 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
1002 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%pad), len(%d)\n",
1005 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
1008 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
1011 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1016 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
1017 unsigned int budget
)
1019 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1020 struct knav_dma_desc
*desc
;
1021 struct netcp_tx_cb
*tx_cb
;
1022 struct sk_buff
*skb
;
1023 unsigned int dma_sz
;
1028 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
1031 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
1032 if (unlikely(!desc
)) {
1033 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1034 tx_stats
->tx_errors
++;
1038 /* warning!!!! We are retrieving the virtual ptr in the sw_data
1039 * field as a 32bit value. Will not work on 64bit machines
1041 skb
= (struct sk_buff
*)GET_SW_DATA0(desc
);
1042 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
1044 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
1045 tx_stats
->tx_errors
++;
1049 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1050 if (tx_cb
->txtstamp
)
1051 tx_cb
->txtstamp(tx_cb
->ts_context
, skb
);
1053 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
1054 netif_running(netcp
->ndev
) &&
1055 (knav_pool_count(netcp
->tx_pool
) >
1056 netcp
->tx_resume_threshold
)) {
1057 u16 subqueue
= skb_get_queue_mapping(skb
);
1059 netif_wake_subqueue(netcp
->ndev
, subqueue
);
1062 u64_stats_update_begin(&tx_stats
->syncp_tx
);
1063 tx_stats
->tx_packets
++;
1064 tx_stats
->tx_bytes
+= skb
->len
;
1065 u64_stats_update_end(&tx_stats
->syncp_tx
);
1072 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
1075 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
1078 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
1079 if (packets
< budget
) {
1080 napi_complete(&netcp
->tx_napi
);
1081 knav_queue_enable_notify(netcp
->tx_compl_q
);
1087 static void netcp_tx_notify(void *arg
)
1089 struct netcp_intf
*netcp
= arg
;
1091 knav_queue_disable_notify(netcp
->tx_compl_q
);
1092 napi_schedule(&netcp
->tx_napi
);
1095 static struct knav_dma_desc
*
1096 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1098 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1099 unsigned int pkt_len
= skb_headlen(skb
);
1100 struct device
*dev
= netcp
->dev
;
1101 dma_addr_t dma_addr
;
1102 unsigned int dma_sz
;
1105 /* Map the linear buffer */
1106 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1107 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
1108 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1112 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1113 if (IS_ERR_OR_NULL(desc
)) {
1114 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1115 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1119 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1120 if (skb_is_nonlinear(skb
)) {
1121 prefetchw(skb_shinfo(skb
));
1123 desc
->next_desc
= 0;
1129 /* Handle the case where skb is fragmented in pages */
1130 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1131 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1132 struct page
*page
= skb_frag_page(frag
);
1133 u32 page_offset
= frag
->page_offset
;
1134 u32 buf_len
= skb_frag_size(frag
);
1135 dma_addr_t desc_dma
;
1138 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1140 if (unlikely(!dma_addr
)) {
1141 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1145 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1146 if (IS_ERR_OR_NULL(ndesc
)) {
1147 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1148 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1152 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
, ndesc
);
1153 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1154 desc_dma_32
= (u32
)desc_dma
;
1155 set_words(&desc_dma_32
, 1, &pdesc
->next_desc
);
1158 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1159 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1163 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1164 &dma_addr
, &dma_sz
);
1166 /* frag list based linkage is not supported for now. */
1167 if (skb_shinfo(skb
)->frag_list
) {
1168 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1173 WARN_ON(pkt_len
!= skb
->len
);
1175 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1176 set_words(&pkt_len
, 1, &desc
->desc_info
);
1180 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1184 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1185 struct sk_buff
*skb
,
1186 struct knav_dma_desc
*desc
)
1188 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1189 struct netcp_hook_list
*tx_hook
;
1190 struct netcp_packet p_info
;
1191 struct netcp_tx_cb
*tx_cb
;
1192 unsigned int dma_sz
;
1197 p_info
.netcp
= netcp
;
1199 p_info
.tx_pipe
= NULL
;
1200 p_info
.psdata_len
= 0;
1201 p_info
.ts_context
= NULL
;
1202 p_info
.txtstamp
= NULL
;
1203 p_info
.epib
= desc
->epib
;
1204 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
1205 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(__le32
));
1207 /* Find out where to inject the packet for transmission */
1208 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1209 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1211 if (unlikely(ret
!= 0)) {
1212 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1213 tx_hook
->order
, ret
);
1214 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1219 /* Make sure some TX hook claimed the packet */
1220 tx_pipe
= p_info
.tx_pipe
;
1222 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1227 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1228 tx_cb
->ts_context
= p_info
.ts_context
;
1229 tx_cb
->txtstamp
= p_info
.txtstamp
;
1231 /* update descriptor */
1232 if (p_info
.psdata_len
) {
1233 /* psdata points to both native-endian and device-endian data */
1234 __le32
*psdata
= (void __force
*)p_info
.psdata
;
1236 set_words((u32
*)psdata
+
1237 (KNAV_DMA_NUM_PS_WORDS
- p_info
.psdata_len
),
1238 p_info
.psdata_len
, psdata
);
1239 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1240 KNAV_DMA_DESC_PSLEN_SHIFT
;
1243 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1244 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1245 KNAV_DMA_DESC_RETQ_SHIFT
);
1247 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1248 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1249 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1252 set_words(&tmp
, 1, &desc
->packet_info
);
1253 /* warning!!!! We are saving the virtual ptr in the sw_data
1254 * field as a 32bit value. Will not work on 64bit machines
1256 SET_SW_DATA0((u32
)skb
, desc
);
1258 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1259 tmp
= tx_pipe
->switch_to_port
;
1260 set_words(&tmp
, 1, &desc
->tag_info
);
1263 /* submit packet descriptor */
1264 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1266 if (unlikely(ret
)) {
1267 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1271 skb_tx_timestamp(skb
);
1272 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1278 /* Submit the packet */
1279 static int netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1281 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1282 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1283 int subqueue
= skb_get_queue_mapping(skb
);
1284 struct knav_dma_desc
*desc
;
1285 int desc_count
, ret
= 0;
1287 if (unlikely(skb
->len
<= 0)) {
1289 return NETDEV_TX_OK
;
1292 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1293 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1295 /* If we get here, the skb has already been dropped */
1296 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1298 tx_stats
->tx_dropped
++;
1301 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1304 desc
= netcp_tx_map_skb(skb
, netcp
);
1305 if (unlikely(!desc
)) {
1306 netif_stop_subqueue(ndev
, subqueue
);
1311 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1315 /* Check Tx pool count & stop subqueue if needed */
1316 desc_count
= knav_pool_count(netcp
->tx_pool
);
1317 if (desc_count
< netcp
->tx_pause_threshold
) {
1318 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1319 netif_stop_subqueue(ndev
, subqueue
);
1321 return NETDEV_TX_OK
;
1324 tx_stats
->tx_dropped
++;
1326 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1331 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1333 if (tx_pipe
->dma_channel
) {
1334 knav_dma_close_channel(tx_pipe
->dma_channel
);
1335 tx_pipe
->dma_channel
= NULL
;
1339 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1341 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1343 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1344 struct knav_dma_cfg config
;
1348 memset(&config
, 0, sizeof(config
));
1349 config
.direction
= DMA_MEM_TO_DEV
;
1350 config
.u
.tx
.filt_einfo
= false;
1351 config
.u
.tx
.filt_pswords
= false;
1352 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1354 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1355 tx_pipe
->dma_chan_name
, &config
);
1356 if (IS_ERR(tx_pipe
->dma_channel
)) {
1357 dev_err(dev
, "failed opening tx chan(%s)\n",
1358 tx_pipe
->dma_chan_name
);
1359 ret
= PTR_ERR(tx_pipe
->dma_channel
);
1363 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1364 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1366 if (IS_ERR(tx_pipe
->dma_queue
)) {
1367 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %d\n",
1369 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1373 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1377 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1378 knav_dma_close_channel(tx_pipe
->dma_channel
);
1379 tx_pipe
->dma_channel
= NULL
;
1382 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1384 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1385 struct netcp_device
*netcp_device
,
1386 const char *dma_chan_name
, unsigned int dma_queue_id
)
1388 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1389 tx_pipe
->netcp_device
= netcp_device
;
1390 tx_pipe
->dma_chan_name
= dma_chan_name
;
1391 tx_pipe
->dma_queue_id
= dma_queue_id
;
1394 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1396 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1398 enum netcp_addr_type type
)
1400 struct netcp_addr
*naddr
;
1402 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1403 if (naddr
->type
!= type
)
1405 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1413 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1415 enum netcp_addr_type type
)
1417 struct netcp_addr
*naddr
;
1419 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1425 naddr
->netcp
= netcp
;
1427 ether_addr_copy(naddr
->addr
, addr
);
1429 eth_zero_addr(naddr
->addr
);
1430 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1435 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1437 list_del(&naddr
->node
);
1438 devm_kfree(netcp
->dev
, naddr
);
1441 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1443 struct netcp_addr
*naddr
;
1445 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1449 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1450 enum netcp_addr_type type
)
1452 struct netcp_addr
*naddr
;
1454 naddr
= netcp_addr_find(netcp
, addr
, type
);
1456 naddr
->flags
|= ADDR_VALID
;
1460 naddr
= netcp_addr_add(netcp
, addr
, type
);
1461 if (!WARN_ON(!naddr
))
1462 naddr
->flags
|= ADDR_NEW
;
1465 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1467 struct netcp_addr
*naddr
, *tmp
;
1468 struct netcp_intf_modpriv
*priv
;
1469 struct netcp_module
*module
;
1472 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1473 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1475 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1476 naddr
->addr
, naddr
->type
);
1477 for_each_module(netcp
, priv
) {
1478 module
= priv
->netcp_module
;
1479 if (!module
->del_addr
)
1481 error
= module
->del_addr(priv
->module_priv
,
1485 netcp_addr_del(netcp
, naddr
);
1489 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1491 struct netcp_addr
*naddr
, *tmp
;
1492 struct netcp_intf_modpriv
*priv
;
1493 struct netcp_module
*module
;
1496 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1497 if (!(naddr
->flags
& ADDR_NEW
))
1499 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1500 naddr
->addr
, naddr
->type
);
1502 for_each_module(netcp
, priv
) {
1503 module
= priv
->netcp_module
;
1504 if (!module
->add_addr
)
1506 error
= module
->add_addr(priv
->module_priv
, naddr
);
1512 static void netcp_set_rx_mode(struct net_device
*ndev
)
1514 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1515 struct netdev_hw_addr
*ndev_addr
;
1518 promisc
= (ndev
->flags
& IFF_PROMISC
||
1519 ndev
->flags
& IFF_ALLMULTI
||
1520 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1522 spin_lock(&netcp
->lock
);
1523 /* first clear all marks */
1524 netcp_addr_clear_mark(netcp
);
1526 /* next add new entries, mark existing ones */
1527 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1528 for_each_dev_addr(ndev
, ndev_addr
)
1529 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1530 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1531 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1532 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1533 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1536 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1538 /* finally sweep and callout into modules */
1539 netcp_addr_sweep_del(netcp
);
1540 netcp_addr_sweep_add(netcp
);
1541 spin_unlock(&netcp
->lock
);
1544 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1548 if (netcp
->rx_channel
) {
1549 knav_dma_close_channel(netcp
->rx_channel
);
1550 netcp
->rx_channel
= NULL
;
1553 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1554 netcp_rxpool_free(netcp
);
1556 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1557 knav_queue_close(netcp
->rx_queue
);
1558 netcp
->rx_queue
= NULL
;
1561 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1562 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1563 knav_queue_close(netcp
->rx_fdq
[i
]);
1564 netcp
->rx_fdq
[i
] = NULL
;
1567 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1568 knav_queue_close(netcp
->tx_compl_q
);
1569 netcp
->tx_compl_q
= NULL
;
1572 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1573 knav_pool_destroy(netcp
->tx_pool
);
1574 netcp
->tx_pool
= NULL
;
1578 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1580 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1581 struct knav_queue_notify_config notify_cfg
;
1582 struct knav_dma_cfg config
;
1588 /* Create Rx/Tx descriptor pools */
1589 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1590 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1591 netcp
->rx_pool_region_id
);
1592 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1593 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1594 ret
= PTR_ERR(netcp
->rx_pool
);
1598 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1599 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1600 netcp
->tx_pool_region_id
);
1601 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1602 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1603 ret
= PTR_ERR(netcp
->tx_pool
);
1607 /* open Tx completion queue */
1608 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1609 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1610 if (IS_ERR(netcp
->tx_compl_q
)) {
1611 ret
= PTR_ERR(netcp
->tx_compl_q
);
1614 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1616 /* Set notification for Tx completion */
1617 notify_cfg
.fn
= netcp_tx_notify
;
1618 notify_cfg
.fn_arg
= netcp
;
1619 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1620 KNAV_QUEUE_SET_NOTIFIER
,
1621 (unsigned long)¬ify_cfg
);
1625 knav_queue_disable_notify(netcp
->tx_compl_q
);
1627 /* open Rx completion queue */
1628 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1629 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1630 if (IS_ERR(netcp
->rx_queue
)) {
1631 ret
= PTR_ERR(netcp
->rx_queue
);
1634 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1636 /* Set notification for Rx completion */
1637 notify_cfg
.fn
= netcp_rx_notify
;
1638 notify_cfg
.fn_arg
= netcp
;
1639 ret
= knav_queue_device_control(netcp
->rx_queue
,
1640 KNAV_QUEUE_SET_NOTIFIER
,
1641 (unsigned long)¬ify_cfg
);
1645 knav_queue_disable_notify(netcp
->rx_queue
);
1648 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_queue_depths
[i
];
1650 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1651 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1652 if (IS_ERR(netcp
->rx_fdq
[i
])) {
1653 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1658 memset(&config
, 0, sizeof(config
));
1659 config
.direction
= DMA_DEV_TO_MEM
;
1660 config
.u
.rx
.einfo_present
= true;
1661 config
.u
.rx
.psinfo_present
= true;
1662 config
.u
.rx
.err_mode
= DMA_DROP
;
1663 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1664 config
.u
.rx
.psinfo_at_sop
= false;
1665 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1666 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1667 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1669 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1670 if (netcp
->rx_fdq
[i
])
1671 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1672 config
.u
.rx
.fdq
[i
] = last_fdq
;
1675 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1676 netcp
->dma_chan_name
, &config
);
1677 if (IS_ERR(netcp
->rx_channel
)) {
1678 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1679 netcp
->dma_chan_name
);
1680 ret
= PTR_ERR(netcp
->rx_channel
);
1684 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1688 netcp_free_navigator_resources(netcp
);
1692 /* Open the device */
1693 static int netcp_ndo_open(struct net_device
*ndev
)
1695 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1696 struct netcp_intf_modpriv
*intf_modpriv
;
1697 struct netcp_module
*module
;
1700 netif_carrier_off(ndev
);
1701 ret
= netcp_setup_navigator_resources(ndev
);
1703 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1707 for_each_module(netcp
, intf_modpriv
) {
1708 module
= intf_modpriv
->netcp_module
;
1710 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1712 dev_err(netcp
->ndev_dev
, "module open failed\n");
1718 napi_enable(&netcp
->rx_napi
);
1719 napi_enable(&netcp
->tx_napi
);
1720 knav_queue_enable_notify(netcp
->tx_compl_q
);
1721 knav_queue_enable_notify(netcp
->rx_queue
);
1722 netcp_rxpool_refill(netcp
);
1723 netif_tx_wake_all_queues(ndev
);
1724 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1728 for_each_module(netcp
, intf_modpriv
) {
1729 module
= intf_modpriv
->netcp_module
;
1731 module
->close(intf_modpriv
->module_priv
, ndev
);
1735 netcp_free_navigator_resources(netcp
);
1739 /* Close the device */
1740 static int netcp_ndo_stop(struct net_device
*ndev
)
1742 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1743 struct netcp_intf_modpriv
*intf_modpriv
;
1744 struct netcp_module
*module
;
1747 netif_tx_stop_all_queues(ndev
);
1748 netif_carrier_off(ndev
);
1749 netcp_addr_clear_mark(netcp
);
1750 netcp_addr_sweep_del(netcp
);
1751 knav_queue_disable_notify(netcp
->rx_queue
);
1752 knav_queue_disable_notify(netcp
->tx_compl_q
);
1753 napi_disable(&netcp
->rx_napi
);
1754 napi_disable(&netcp
->tx_napi
);
1756 for_each_module(netcp
, intf_modpriv
) {
1757 module
= intf_modpriv
->netcp_module
;
1758 if (module
->close
) {
1759 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1761 dev_err(netcp
->ndev_dev
, "Close failed\n");
1765 /* Recycle Rx descriptors from completion queue */
1766 netcp_empty_rx_queue(netcp
);
1768 /* Recycle Tx descriptors from completion queue */
1769 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1771 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1772 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1773 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1775 netcp_free_navigator_resources(netcp
);
1776 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1780 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1781 struct ifreq
*req
, int cmd
)
1783 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1784 struct netcp_intf_modpriv
*intf_modpriv
;
1785 struct netcp_module
*module
;
1786 int ret
= -1, err
= -EOPNOTSUPP
;
1788 if (!netif_running(ndev
))
1791 for_each_module(netcp
, intf_modpriv
) {
1792 module
= intf_modpriv
->netcp_module
;
1796 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1797 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1806 return (ret
== 0) ? 0 : err
;
1809 static void netcp_ndo_tx_timeout(struct net_device
*ndev
)
1811 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1812 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1814 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1815 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1816 netif_trans_update(ndev
);
1817 netif_tx_wake_all_queues(ndev
);
1820 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1822 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1823 struct netcp_intf_modpriv
*intf_modpriv
;
1824 struct netcp_module
*module
;
1825 unsigned long flags
;
1828 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1830 spin_lock_irqsave(&netcp
->lock
, flags
);
1831 for_each_module(netcp
, intf_modpriv
) {
1832 module
= intf_modpriv
->netcp_module
;
1833 if ((module
->add_vid
) && (vid
!= 0)) {
1834 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1836 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1842 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1847 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1849 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1850 struct netcp_intf_modpriv
*intf_modpriv
;
1851 struct netcp_module
*module
;
1852 unsigned long flags
;
1855 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1857 spin_lock_irqsave(&netcp
->lock
, flags
);
1858 for_each_module(netcp
, intf_modpriv
) {
1859 module
= intf_modpriv
->netcp_module
;
1860 if (module
->del_vid
) {
1861 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1863 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1869 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1873 static u16
netcp_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1875 select_queue_fallback_t fallback
)
1880 static int netcp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1883 struct tc_mqprio_qopt
*mqprio
= type_data
;
1887 /* setup tc must be called under rtnl lock */
1890 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1893 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
1894 num_tc
= mqprio
->num_tc
;
1896 /* Sanity-check the number of traffic classes requested */
1897 if ((dev
->real_num_tx_queues
<= 1) ||
1898 (dev
->real_num_tx_queues
< num_tc
))
1901 /* Configure traffic class to queue mappings */
1903 netdev_set_num_tc(dev
, num_tc
);
1904 for (i
= 0; i
< num_tc
; i
++)
1905 netdev_set_tc_queue(dev
, i
, 1, i
);
1907 netdev_reset_tc(dev
);
1914 netcp_get_stats(struct net_device
*ndev
, struct rtnl_link_stats64
*stats
)
1916 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1917 struct netcp_stats
*p
= &netcp
->stats
;
1918 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1922 start
= u64_stats_fetch_begin_irq(&p
->syncp_rx
);
1923 rxpackets
= p
->rx_packets
;
1924 rxbytes
= p
->rx_bytes
;
1925 } while (u64_stats_fetch_retry_irq(&p
->syncp_rx
, start
));
1928 start
= u64_stats_fetch_begin_irq(&p
->syncp_tx
);
1929 txpackets
= p
->tx_packets
;
1930 txbytes
= p
->tx_bytes
;
1931 } while (u64_stats_fetch_retry_irq(&p
->syncp_tx
, start
));
1933 stats
->rx_packets
= rxpackets
;
1934 stats
->rx_bytes
= rxbytes
;
1935 stats
->tx_packets
= txpackets
;
1936 stats
->tx_bytes
= txbytes
;
1938 /* The following are stored as 32 bit */
1939 stats
->rx_errors
= p
->rx_errors
;
1940 stats
->rx_dropped
= p
->rx_dropped
;
1941 stats
->tx_dropped
= p
->tx_dropped
;
1944 static const struct net_device_ops netcp_netdev_ops
= {
1945 .ndo_open
= netcp_ndo_open
,
1946 .ndo_stop
= netcp_ndo_stop
,
1947 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1948 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1949 .ndo_do_ioctl
= netcp_ndo_ioctl
,
1950 .ndo_get_stats64
= netcp_get_stats
,
1951 .ndo_set_mac_address
= eth_mac_addr
,
1952 .ndo_validate_addr
= eth_validate_addr
,
1953 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1954 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1955 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1956 .ndo_select_queue
= netcp_select_queue
,
1957 .ndo_setup_tc
= netcp_setup_tc
,
1960 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1961 struct device_node
*node_interface
)
1963 struct device
*dev
= netcp_device
->device
;
1964 struct device_node
*node
= dev
->of_node
;
1965 struct netcp_intf
*netcp
;
1966 struct net_device
*ndev
;
1967 resource_size_t size
;
1968 struct resource res
;
1969 void __iomem
*efuse
= NULL
;
1971 const void *mac_addr
;
1972 u8 efuse_mac_addr
[6];
1976 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1978 dev_err(dev
, "Error allocating netdev\n");
1982 ndev
->features
|= NETIF_F_SG
;
1983 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1984 ndev
->hw_features
= ndev
->features
;
1985 ndev
->vlan_features
|= NETIF_F_SG
;
1987 /* MTU range: 68 - 9486 */
1988 ndev
->min_mtu
= ETH_MIN_MTU
;
1989 ndev
->max_mtu
= NETCP_MAX_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
);
1991 netcp
= netdev_priv(ndev
);
1992 spin_lock_init(&netcp
->lock
);
1993 INIT_LIST_HEAD(&netcp
->module_head
);
1994 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
1995 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
1996 INIT_LIST_HEAD(&netcp
->addr_list
);
1997 u64_stats_init(&netcp
->stats
.syncp_rx
);
1998 u64_stats_init(&netcp
->stats
.syncp_tx
);
1999 netcp
->netcp_device
= netcp_device
;
2000 netcp
->dev
= netcp_device
->device
;
2002 netcp
->ndev_dev
= &ndev
->dev
;
2003 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
2004 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
2005 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
2006 netcp
->node_interface
= node_interface
;
2008 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
2010 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
2011 dev_err(dev
, "could not find efuse-mac reg resource\n");
2015 size
= resource_size(&res
);
2017 if (!devm_request_mem_region(dev
, res
.start
, size
,
2019 dev_err(dev
, "could not reserve resource\n");
2024 efuse
= devm_ioremap_nocache(dev
, res
.start
, size
);
2026 dev_err(dev
, "could not map resource\n");
2027 devm_release_mem_region(dev
, res
.start
, size
);
2032 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
, efuse_mac
);
2033 if (is_valid_ether_addr(efuse_mac_addr
))
2034 ether_addr_copy(ndev
->dev_addr
, efuse_mac_addr
);
2036 random_ether_addr(ndev
->dev_addr
);
2038 devm_iounmap(dev
, efuse
);
2039 devm_release_mem_region(dev
, res
.start
, size
);
2041 mac_addr
= of_get_mac_address(node_interface
);
2043 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
2045 random_ether_addr(ndev
->dev_addr
);
2048 ret
= of_property_read_string(node_interface
, "rx-channel",
2049 &netcp
->dma_chan_name
);
2051 dev_err(dev
, "missing \"rx-channel\" parameter\n");
2056 ret
= of_property_read_u32(node_interface
, "rx-queue",
2057 &netcp
->rx_queue_id
);
2059 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
2060 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
2063 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
2064 netcp
->rx_queue_depths
,
2065 KNAV_DMA_FDQ_PER_CHAN
);
2067 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
2068 netcp
->rx_queue_depths
[0] = 128;
2071 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
2073 dev_err(dev
, "missing \"rx-pool\" parameter\n");
2077 netcp
->rx_pool_size
= temp
[0];
2078 netcp
->rx_pool_region_id
= temp
[1];
2080 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
2082 dev_err(dev
, "missing \"tx-pool\" parameter\n");
2086 netcp
->tx_pool_size
= temp
[0];
2087 netcp
->tx_pool_region_id
= temp
[1];
2089 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
2090 dev_err(dev
, "tx-pool size too small, must be atleast(%ld)\n",
2096 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
2097 &netcp
->tx_compl_qid
);
2099 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
2100 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
2104 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
, NETCP_NAPI_WEIGHT
);
2105 netif_tx_napi_add(ndev
, &netcp
->tx_napi
, netcp_tx_poll
, NETCP_NAPI_WEIGHT
);
2107 /* Register the network device */
2109 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
2110 ndev
->netdev_ops
= &netcp_netdev_ops
;
2111 SET_NETDEV_DEV(ndev
, dev
);
2113 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2121 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2122 struct net_device
*ndev
)
2124 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2125 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2126 struct netcp_module
*module
;
2128 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2131 /* Notify each of the modules that the interface is going away */
2132 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2134 module
= intf_modpriv
->netcp_module
;
2135 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2137 if (module
->release
)
2138 module
->release(intf_modpriv
->module_priv
);
2139 list_del(&intf_modpriv
->intf_list
);
2141 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2144 list_del(&netcp
->interface_list
);
2146 of_node_put(netcp
->node_interface
);
2147 unregister_netdev(ndev
);
2151 static int netcp_probe(struct platform_device
*pdev
)
2153 struct device_node
*node
= pdev
->dev
.of_node
;
2154 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2155 struct device_node
*child
, *interfaces
;
2156 struct netcp_device
*netcp_device
;
2157 struct device
*dev
= &pdev
->dev
;
2161 dev_err(dev
, "could not find device info\n");
2165 /* Allocate a new NETCP device instance */
2166 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2170 pm_runtime_enable(&pdev
->dev
);
2171 ret
= pm_runtime_get_sync(&pdev
->dev
);
2173 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2174 pm_runtime_disable(&pdev
->dev
);
2178 /* Initialize the NETCP device instance */
2179 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2180 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2181 netcp_device
->device
= dev
;
2182 platform_set_drvdata(pdev
, netcp_device
);
2184 /* create interfaces */
2185 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2187 dev_err(dev
, "could not find netcp-interfaces node\n");
2192 for_each_available_child_of_node(interfaces
, child
) {
2193 ret
= netcp_create_interface(netcp_device
, child
);
2195 dev_err(dev
, "could not create interface(%s)\n",
2197 goto probe_quit_interface
;
2201 of_node_put(interfaces
);
2203 /* Add the device instance to the list */
2204 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2208 probe_quit_interface
:
2209 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2210 &netcp_device
->interface_head
,
2212 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2215 of_node_put(interfaces
);
2218 pm_runtime_put_sync(&pdev
->dev
);
2219 pm_runtime_disable(&pdev
->dev
);
2220 platform_set_drvdata(pdev
, NULL
);
2224 static int netcp_remove(struct platform_device
*pdev
)
2226 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2227 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2228 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2229 struct netcp_module
*module
;
2231 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2233 module
= inst_modpriv
->netcp_module
;
2234 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2235 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2236 list_del(&inst_modpriv
->inst_list
);
2239 /* now that all modules are removed, clean up the interfaces */
2240 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2241 &netcp_device
->interface_head
,
2243 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2246 WARN(!list_empty(&netcp_device
->interface_head
),
2247 "%s interface list not empty!\n", pdev
->name
);
2249 pm_runtime_put_sync(&pdev
->dev
);
2250 pm_runtime_disable(&pdev
->dev
);
2251 platform_set_drvdata(pdev
, NULL
);
2255 static const struct of_device_id of_match
[] = {
2256 { .compatible
= "ti,netcp-1.0", },
2259 MODULE_DEVICE_TABLE(of
, of_match
);
2261 static struct platform_driver netcp_driver
= {
2263 .name
= "netcp-1.0",
2264 .of_match_table
= of_match
,
2266 .probe
= netcp_probe
,
2267 .remove
= netcp_remove
,
2269 module_platform_driver(netcp_driver
);
2271 MODULE_LICENSE("GPL v2");
2272 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2273 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");