2 * Keystone NetCP Core driver
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/module.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/knav_qmss.h>
30 #include <linux/soc/ti/knav_dma.h>
34 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35 #define NETCP_NAPI_WEIGHT 64
36 #define NETCP_TX_TIMEOUT (5 * HZ)
37 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
38 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
39 #define NETCP_MAX_MCAST_ADDR 16
41 #define NETCP_EFUSE_REG_INDEX 0
43 #define NETCP_MOD_PROBE_SKIPPED 1
44 #define NETCP_MOD_PROBE_FAILED 2
46 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
47 NETIF_MSG_DRV | NETIF_MSG_LINK | \
48 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
49 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
50 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
51 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
52 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
55 #define NETCP_EFUSE_ADDR_SWAP 2
57 #define knav_queue_get_id(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
60 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
61 KNAV_QUEUE_ENABLE_NOTIFY, \
64 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
65 KNAV_QUEUE_DISABLE_NOTIFY, \
68 #define knav_queue_get_count(q) knav_queue_device_control(q, \
69 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
71 #define for_each_netcp_module(module) \
72 list_for_each_entry(module, &netcp_modules, module_list)
74 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
75 list_for_each_entry(inst_modpriv, \
76 &((netcp_device)->modpriv_head), inst_list)
78 #define for_each_module(netcp, intf_modpriv) \
79 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
81 /* Module management structures */
83 struct list_head device_list
;
84 struct list_head interface_head
;
85 struct list_head modpriv_head
;
86 struct device
*device
;
89 struct netcp_inst_modpriv
{
90 struct netcp_device
*netcp_device
;
91 struct netcp_module
*netcp_module
;
92 struct list_head inst_list
;
96 struct netcp_intf_modpriv
{
97 struct netcp_intf
*netcp_priv
;
98 struct netcp_module
*netcp_module
;
99 struct list_head intf_list
;
105 void (*txtstamp
)(void *context
, struct sk_buff
*skb
);
108 static LIST_HEAD(netcp_devices
);
109 static LIST_HEAD(netcp_modules
);
110 static DEFINE_MUTEX(netcp_modules_lock
);
112 static int netcp_debug_level
= -1;
113 module_param(netcp_debug_level
, int, 0);
114 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
116 /* Helper functions - Get/Set */
117 static void get_pkt_info(dma_addr_t
*buff
, u32
*buff_len
, dma_addr_t
*ndesc
,
118 struct knav_dma_desc
*desc
)
120 *buff_len
= le32_to_cpu(desc
->buff_len
);
121 *buff
= le32_to_cpu(desc
->buff
);
122 *ndesc
= le32_to_cpu(desc
->next_desc
);
125 static void get_desc_info(u32
*desc_info
, u32
*pkt_info
,
126 struct knav_dma_desc
*desc
)
128 *desc_info
= le32_to_cpu(desc
->desc_info
);
129 *pkt_info
= le32_to_cpu(desc
->packet_info
);
132 static u32
get_sw_data(int index
, struct knav_dma_desc
*desc
)
134 /* No Endian conversion needed as this data is untouched by hw */
135 return desc
->sw_data
[index
];
138 /* use these macros to get sw data */
139 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
140 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
141 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
142 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
144 static void get_org_pkt_info(dma_addr_t
*buff
, u32
*buff_len
,
145 struct knav_dma_desc
*desc
)
147 *buff
= le32_to_cpu(desc
->orig_buff
);
148 *buff_len
= le32_to_cpu(desc
->orig_len
);
151 static void get_words(dma_addr_t
*words
, int num_words
, __le32
*desc
)
155 for (i
= 0; i
< num_words
; i
++)
156 words
[i
] = le32_to_cpu(desc
[i
]);
159 static void set_pkt_info(dma_addr_t buff
, u32 buff_len
, u32 ndesc
,
160 struct knav_dma_desc
*desc
)
162 desc
->buff_len
= cpu_to_le32(buff_len
);
163 desc
->buff
= cpu_to_le32(buff
);
164 desc
->next_desc
= cpu_to_le32(ndesc
);
167 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
168 struct knav_dma_desc
*desc
)
170 desc
->desc_info
= cpu_to_le32(desc_info
);
171 desc
->packet_info
= cpu_to_le32(pkt_info
);
174 static void set_sw_data(int index
, u32 data
, struct knav_dma_desc
*desc
)
176 /* No Endian conversion needed as this data is untouched by hw */
177 desc
->sw_data
[index
] = data
;
180 /* use these macros to set sw data */
181 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
182 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
183 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
184 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
186 static void set_org_pkt_info(dma_addr_t buff
, u32 buff_len
,
187 struct knav_dma_desc
*desc
)
189 desc
->orig_buff
= cpu_to_le32(buff
);
190 desc
->orig_len
= cpu_to_le32(buff_len
);
193 static void set_words(u32
*words
, int num_words
, __le32
*desc
)
197 for (i
= 0; i
< num_words
; i
++)
198 desc
[i
] = cpu_to_le32(words
[i
]);
201 /* Read the e-fuse value as 32 bit values to be endian independent */
202 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
, u32 swap
)
204 unsigned int addr0
, addr1
;
206 addr1
= readl(efuse_mac
+ 4);
207 addr0
= readl(efuse_mac
);
210 case NETCP_EFUSE_ADDR_SWAP
:
212 addr1
= readl(efuse_mac
);
218 x
[0] = (addr1
& 0x0000ff00) >> 8;
219 x
[1] = addr1
& 0x000000ff;
220 x
[2] = (addr0
& 0xff000000) >> 24;
221 x
[3] = (addr0
& 0x00ff0000) >> 16;
222 x
[4] = (addr0
& 0x0000ff00) >> 8;
223 x
[5] = addr0
& 0x000000ff;
228 static const char *netcp_node_name(struct device_node
*node
)
232 if (of_property_read_string(node
, "label", &name
) < 0)
239 /* Module management routines */
240 static int netcp_register_interface(struct netcp_intf
*netcp
)
244 ret
= register_netdev(netcp
->ndev
);
246 netcp
->netdev_registered
= true;
250 static int netcp_module_probe(struct netcp_device
*netcp_device
,
251 struct netcp_module
*module
)
253 struct device
*dev
= netcp_device
->device
;
254 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
255 struct device_node
*child
;
256 struct netcp_inst_modpriv
*inst_modpriv
;
257 struct netcp_intf
*netcp_intf
;
258 struct netcp_module
*tmp
;
259 bool primary_module_registered
= false;
262 /* Find this module in the sub-tree for this device */
263 devices
= of_get_child_by_name(node
, "netcp-devices");
265 dev_err(dev
, "could not find netcp-devices node\n");
266 return NETCP_MOD_PROBE_SKIPPED
;
269 for_each_available_child_of_node(devices
, child
) {
270 const char *name
= netcp_node_name(child
);
272 if (!strcasecmp(module
->name
, name
))
276 of_node_put(devices
);
277 /* If module not used for this device, skip it */
279 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
280 return NETCP_MOD_PROBE_SKIPPED
;
283 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
289 inst_modpriv
->netcp_device
= netcp_device
;
290 inst_modpriv
->netcp_module
= module
;
291 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
293 ret
= module
->probe(netcp_device
, dev
, child
,
294 &inst_modpriv
->module_priv
);
297 dev_err(dev
, "Probe of module(%s) failed with %d\n",
299 list_del(&inst_modpriv
->inst_list
);
300 devm_kfree(dev
, inst_modpriv
);
301 return NETCP_MOD_PROBE_FAILED
;
304 /* Attach modules only if the primary module is probed */
305 for_each_netcp_module(tmp
) {
307 primary_module_registered
= true;
310 if (!primary_module_registered
)
313 /* Attach module to interfaces */
314 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
316 struct netcp_intf_modpriv
*intf_modpriv
;
318 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
323 interface
= of_parse_phandle(netcp_intf
->node_interface
,
327 devm_kfree(dev
, intf_modpriv
);
331 intf_modpriv
->netcp_priv
= netcp_intf
;
332 intf_modpriv
->netcp_module
= module
;
333 list_add_tail(&intf_modpriv
->intf_list
,
334 &netcp_intf
->module_head
);
336 ret
= module
->attach(inst_modpriv
->module_priv
,
337 netcp_intf
->ndev
, interface
,
338 &intf_modpriv
->module_priv
);
339 of_node_put(interface
);
341 dev_dbg(dev
, "Attach of module %s declined with %d\n",
343 list_del(&intf_modpriv
->intf_list
);
344 devm_kfree(dev
, intf_modpriv
);
349 /* Now register the interface with netdev */
350 list_for_each_entry(netcp_intf
,
351 &netcp_device
->interface_head
,
353 /* If interface not registered then register now */
354 if (!netcp_intf
->netdev_registered
) {
355 ret
= netcp_register_interface(netcp_intf
);
363 int netcp_register_module(struct netcp_module
*module
)
365 struct netcp_device
*netcp_device
;
366 struct netcp_module
*tmp
;
370 WARN(1, "error registering netcp module: no name\n");
374 if (!module
->probe
) {
375 WARN(1, "error registering netcp module: no probe\n");
379 mutex_lock(&netcp_modules_lock
);
381 for_each_netcp_module(tmp
) {
382 if (!strcasecmp(tmp
->name
, module
->name
)) {
383 mutex_unlock(&netcp_modules_lock
);
387 list_add_tail(&module
->module_list
, &netcp_modules
);
389 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
390 ret
= netcp_module_probe(netcp_device
, module
);
394 mutex_unlock(&netcp_modules_lock
);
398 mutex_unlock(&netcp_modules_lock
);
399 netcp_unregister_module(module
);
402 EXPORT_SYMBOL_GPL(netcp_register_module
);
404 static void netcp_release_module(struct netcp_device
*netcp_device
,
405 struct netcp_module
*module
)
407 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
408 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
409 struct device
*dev
= netcp_device
->device
;
411 /* Release the module from each interface */
412 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
413 &netcp_device
->interface_head
,
415 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
417 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
418 &netcp_intf
->module_head
,
420 if (intf_modpriv
->netcp_module
== module
) {
421 module
->release(intf_modpriv
->module_priv
);
422 list_del(&intf_modpriv
->intf_list
);
423 devm_kfree(dev
, intf_modpriv
);
429 /* Remove the module from each instance */
430 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
431 &netcp_device
->modpriv_head
, inst_list
) {
432 if (inst_modpriv
->netcp_module
== module
) {
433 module
->remove(netcp_device
,
434 inst_modpriv
->module_priv
);
435 list_del(&inst_modpriv
->inst_list
);
436 devm_kfree(dev
, inst_modpriv
);
442 void netcp_unregister_module(struct netcp_module
*module
)
444 struct netcp_device
*netcp_device
;
445 struct netcp_module
*module_tmp
;
447 mutex_lock(&netcp_modules_lock
);
449 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
450 netcp_release_module(netcp_device
, module
);
453 /* Remove the module from the module list */
454 for_each_netcp_module(module_tmp
) {
455 if (module
== module_tmp
) {
456 list_del(&module
->module_list
);
461 mutex_unlock(&netcp_modules_lock
);
463 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
465 void *netcp_module_get_intf_data(struct netcp_module
*module
,
466 struct netcp_intf
*intf
)
468 struct netcp_intf_modpriv
*intf_modpriv
;
470 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
471 if (intf_modpriv
->netcp_module
== module
)
472 return intf_modpriv
->module_priv
;
475 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
477 /* Module TX and RX Hook management */
478 struct netcp_hook_list
{
479 struct list_head list
;
480 netcp_hook_rtn
*hook_rtn
;
485 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
486 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
488 struct netcp_hook_list
*entry
;
489 struct netcp_hook_list
*next
;
492 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
496 entry
->hook_rtn
= hook_rtn
;
497 entry
->hook_data
= hook_data
;
498 entry
->order
= order
;
500 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
501 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
502 if (next
->order
> order
)
505 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
506 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
510 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
512 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
513 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
515 struct netcp_hook_list
*next
, *n
;
518 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
519 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
520 if ((next
->order
== order
) &&
521 (next
->hook_rtn
== hook_rtn
) &&
522 (next
->hook_data
== hook_data
)) {
523 list_del(&next
->list
);
524 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
525 devm_kfree(netcp_priv
->dev
, next
);
529 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
532 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
534 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
535 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
537 struct netcp_hook_list
*entry
;
538 struct netcp_hook_list
*next
;
541 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
545 entry
->hook_rtn
= hook_rtn
;
546 entry
->hook_data
= hook_data
;
547 entry
->order
= order
;
549 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
550 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
551 if (next
->order
> order
)
554 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
555 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
559 EXPORT_SYMBOL_GPL(netcp_register_rxhook
);
561 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
562 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
564 struct netcp_hook_list
*next
, *n
;
567 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
568 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
569 if ((next
->order
== order
) &&
570 (next
->hook_rtn
== hook_rtn
) &&
571 (next
->hook_data
== hook_data
)) {
572 list_del(&next
->list
);
573 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
574 devm_kfree(netcp_priv
->dev
, next
);
578 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
582 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook
);
584 static void netcp_frag_free(bool is_frag
, void *ptr
)
592 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
593 struct knav_dma_desc
*desc
)
595 struct knav_dma_desc
*ndesc
;
596 dma_addr_t dma_desc
, dma_buf
;
597 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
601 get_words(&dma_desc
, 1, &desc
->next_desc
);
604 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
605 if (unlikely(!ndesc
)) {
606 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
609 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
610 /* warning!!!! We are retrieving the virtual ptr in the sw_data
611 * field as a 32bit value. Will not work on 64bit machines
613 buf_ptr
= (void *)GET_SW_DATA0(ndesc
);
614 buf_len
= (int)GET_SW_DATA1(desc
);
615 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
616 __free_page(buf_ptr
);
617 knav_pool_desc_put(netcp
->rx_pool
, desc
);
619 /* warning!!!! We are retrieving the virtual ptr in the sw_data
620 * field as a 32bit value. Will not work on 64bit machines
622 buf_ptr
= (void *)GET_SW_DATA0(desc
);
623 buf_len
= (int)GET_SW_DATA1(desc
);
626 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
627 knav_pool_desc_put(netcp
->rx_pool
, desc
);
630 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
632 struct netcp_stats
*rx_stats
= &netcp
->stats
;
633 struct knav_dma_desc
*desc
;
638 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
642 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
643 if (unlikely(!desc
)) {
644 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
646 rx_stats
->rx_errors
++;
649 netcp_free_rx_desc_chain(netcp
, desc
);
650 rx_stats
->rx_dropped
++;
654 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
656 struct netcp_stats
*rx_stats
= &netcp
->stats
;
657 unsigned int dma_sz
, buf_len
, org_buf_len
;
658 struct knav_dma_desc
*desc
, *ndesc
;
659 unsigned int pkt_sz
= 0, accum_sz
;
660 struct netcp_hook_list
*rx_hook
;
661 dma_addr_t dma_desc
, dma_buff
;
662 struct netcp_packet p_info
;
667 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
671 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
672 if (unlikely(!desc
)) {
673 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
677 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
678 /* warning!!!! We are retrieving the virtual ptr in the sw_data
679 * field as a 32bit value. Will not work on 64bit machines
681 org_buf_ptr
= (void *)GET_SW_DATA0(desc
);
682 org_buf_len
= (int)GET_SW_DATA1(desc
);
684 if (unlikely(!org_buf_ptr
)) {
685 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
689 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
691 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
693 /* Build a new sk_buff for the primary buffer */
694 skb
= build_skb(org_buf_ptr
, org_buf_len
);
695 if (unlikely(!skb
)) {
696 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
700 /* update data, tail and len */
701 skb_reserve(skb
, NETCP_SOP_OFFSET
);
702 __skb_put(skb
, buf_len
);
704 /* Fill in the page fragment list */
708 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
709 if (unlikely(!ndesc
)) {
710 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
714 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
715 /* warning!!!! We are retrieving the virtual ptr in the sw_data
716 * field as a 32bit value. Will not work on 64bit machines
718 page
= (struct page
*)GET_SW_DATA0(ndesc
);
720 if (likely(dma_buff
&& buf_len
&& page
)) {
721 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
724 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
725 &dma_buff
, buf_len
, page
);
729 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
730 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
733 /* Free the descriptor */
734 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
737 /* check for packet len and warn */
738 if (unlikely(pkt_sz
!= accum_sz
))
739 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
742 /* Newer version of the Ethernet switch can trim the Ethernet FCS
743 * from the packet and is indicated in hw_cap. So trim it only for
746 if (!(netcp
->hw_cap
& ETH_SW_CAN_REMOVE_ETH_FCS
))
747 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
749 /* Call each of the RX hooks */
751 skb
->dev
= netcp
->ndev
;
752 p_info
.rxtstamp_complete
= false;
753 get_desc_info(&tmp
, &p_info
.eflags
, desc
);
754 p_info
.epib
= desc
->epib
;
755 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
756 p_info
.eflags
= ((p_info
.eflags
>> KNAV_DMA_DESC_EFLAGS_SHIFT
) &
757 KNAV_DMA_DESC_EFLAGS_MASK
);
758 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
761 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
764 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
765 rx_hook
->order
, ret
);
766 /* Free the primary descriptor */
767 rx_stats
->rx_dropped
++;
768 knav_pool_desc_put(netcp
->rx_pool
, desc
);
773 /* Free the primary descriptor */
774 knav_pool_desc_put(netcp
->rx_pool
, desc
);
776 u64_stats_update_begin(&rx_stats
->syncp_rx
);
777 rx_stats
->rx_packets
++;
778 rx_stats
->rx_bytes
+= skb
->len
;
779 u64_stats_update_end(&rx_stats
->syncp_rx
);
781 /* push skb up the stack */
782 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
783 netif_receive_skb(skb
);
787 netcp_free_rx_desc_chain(netcp
, desc
);
788 rx_stats
->rx_errors
++;
792 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
797 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
802 /* Release descriptors and attached buffers from Rx FDQ */
803 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
805 struct knav_dma_desc
*desc
;
806 unsigned int buf_len
, dma_sz
;
810 /* Allocate descriptor */
811 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
812 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
813 if (unlikely(!desc
)) {
814 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
818 get_org_pkt_info(&dma
, &buf_len
, desc
);
819 /* warning!!!! We are retrieving the virtual ptr in the sw_data
820 * field as a 32bit value. Will not work on 64bit machines
822 buf_ptr
= (void *)GET_SW_DATA0(desc
);
824 if (unlikely(!dma
)) {
825 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
826 knav_pool_desc_put(netcp
->rx_pool
, desc
);
830 if (unlikely(!buf_ptr
)) {
831 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
832 knav_pool_desc_put(netcp
->rx_pool
, desc
);
837 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
839 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
841 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
843 __free_page(buf_ptr
);
846 knav_pool_desc_put(netcp
->rx_pool
, desc
);
850 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
854 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
855 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
856 netcp_free_rx_buf(netcp
, i
);
858 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
859 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
860 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
862 knav_pool_destroy(netcp
->rx_pool
);
863 netcp
->rx_pool
= NULL
;
866 static int netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
868 struct knav_dma_desc
*hwdesc
;
869 unsigned int buf_len
, dma_sz
;
870 u32 desc_info
, pkt_info
;
876 /* Allocate descriptor */
877 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
878 if (IS_ERR_OR_NULL(hwdesc
)) {
879 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
883 if (likely(fdq
== 0)) {
884 unsigned int primary_buf_len
;
885 /* Allocate a primary receive queue entry */
886 buf_len
= NETCP_PACKET_SIZE
+ NETCP_SOP_OFFSET
;
887 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
888 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
890 bufptr
= netdev_alloc_frag(primary_buf_len
);
891 sw_data
[1] = primary_buf_len
;
893 if (unlikely(!bufptr
)) {
894 dev_warn_ratelimited(netcp
->ndev_dev
,
895 "Primary RX buffer alloc failed\n");
898 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
900 if (unlikely(dma_mapping_error(netcp
->dev
, dma
)))
903 /* warning!!!! We are saving the virtual ptr in the sw_data
904 * field as a 32bit value. Will not work on 64bit machines
906 sw_data
[0] = (u32
)bufptr
;
908 /* Allocate a secondary receive queue entry */
909 page
= alloc_page(GFP_ATOMIC
| GFP_DMA
);
910 if (unlikely(!page
)) {
911 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
915 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
916 /* warning!!!! We are saving the virtual ptr in the sw_data
917 * field as a 32bit value. Will not work on 64bit machines
919 sw_data
[0] = (u32
)page
;
923 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
924 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
925 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
926 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
927 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
928 KNAV_DMA_DESC_RETQ_SHIFT
;
929 set_org_pkt_info(dma
, buf_len
, hwdesc
);
930 SET_SW_DATA0(sw_data
[0], hwdesc
);
931 SET_SW_DATA1(sw_data
[1], hwdesc
);
932 set_desc_info(desc_info
, pkt_info
, hwdesc
);
935 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
937 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
941 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
945 /* Refill Rx FDQ with descriptors & attached buffers */
946 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
948 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
951 /* Calculate the FDQ deficit and refill */
952 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
953 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
954 knav_queue_get_count(netcp
->rx_fdq
[i
]);
956 while (fdq_deficit
[i
]-- && !ret
)
957 ret
= netcp_allocate_rx_buf(netcp
, i
);
962 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
964 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
966 unsigned int packets
;
968 packets
= netcp_process_rx_packets(netcp
, budget
);
970 netcp_rxpool_refill(netcp
);
971 if (packets
< budget
) {
972 napi_complete_done(&netcp
->rx_napi
, packets
);
973 knav_queue_enable_notify(netcp
->rx_queue
);
979 static void netcp_rx_notify(void *arg
)
981 struct netcp_intf
*netcp
= arg
;
983 knav_queue_disable_notify(netcp
->rx_queue
);
984 napi_schedule(&netcp
->rx_napi
);
987 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
988 struct knav_dma_desc
*desc
,
989 unsigned int desc_sz
)
991 struct knav_dma_desc
*ndesc
= desc
;
992 dma_addr_t dma_desc
, dma_buf
;
993 unsigned int buf_len
;
996 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
998 if (dma_buf
&& buf_len
)
999 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
1002 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%pad), len(%d)\n",
1005 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
1008 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
1011 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1016 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
1017 unsigned int budget
)
1019 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1020 struct knav_dma_desc
*desc
;
1021 struct netcp_tx_cb
*tx_cb
;
1022 struct sk_buff
*skb
;
1023 unsigned int dma_sz
;
1028 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
1031 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
1032 if (unlikely(!desc
)) {
1033 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1034 tx_stats
->tx_errors
++;
1038 /* warning!!!! We are retrieving the virtual ptr in the sw_data
1039 * field as a 32bit value. Will not work on 64bit machines
1041 skb
= (struct sk_buff
*)GET_SW_DATA0(desc
);
1042 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
1044 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
1045 tx_stats
->tx_errors
++;
1049 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1050 if (tx_cb
->txtstamp
)
1051 tx_cb
->txtstamp(tx_cb
->ts_context
, skb
);
1053 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
1054 netif_running(netcp
->ndev
) &&
1055 (knav_pool_count(netcp
->tx_pool
) >
1056 netcp
->tx_resume_threshold
)) {
1057 u16 subqueue
= skb_get_queue_mapping(skb
);
1059 netif_wake_subqueue(netcp
->ndev
, subqueue
);
1062 u64_stats_update_begin(&tx_stats
->syncp_tx
);
1063 tx_stats
->tx_packets
++;
1064 tx_stats
->tx_bytes
+= skb
->len
;
1065 u64_stats_update_end(&tx_stats
->syncp_tx
);
1072 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
1075 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
1078 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
1079 if (packets
< budget
) {
1080 napi_complete(&netcp
->tx_napi
);
1081 knav_queue_enable_notify(netcp
->tx_compl_q
);
1087 static void netcp_tx_notify(void *arg
)
1089 struct netcp_intf
*netcp
= arg
;
1091 knav_queue_disable_notify(netcp
->tx_compl_q
);
1092 napi_schedule(&netcp
->tx_napi
);
1095 static struct knav_dma_desc
*
1096 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1098 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1099 unsigned int pkt_len
= skb_headlen(skb
);
1100 struct device
*dev
= netcp
->dev
;
1101 dma_addr_t dma_addr
;
1102 unsigned int dma_sz
;
1105 /* Map the linear buffer */
1106 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1107 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
1108 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1112 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1113 if (IS_ERR_OR_NULL(desc
)) {
1114 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1115 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1119 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1120 if (skb_is_nonlinear(skb
)) {
1121 prefetchw(skb_shinfo(skb
));
1123 desc
->next_desc
= 0;
1129 /* Handle the case where skb is fragmented in pages */
1130 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1131 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1132 struct page
*page
= skb_frag_page(frag
);
1133 u32 page_offset
= frag
->page_offset
;
1134 u32 buf_len
= skb_frag_size(frag
);
1135 dma_addr_t desc_dma
;
1138 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1140 if (unlikely(!dma_addr
)) {
1141 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1145 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1146 if (IS_ERR_OR_NULL(ndesc
)) {
1147 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1148 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1152 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
, ndesc
);
1153 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1154 desc_dma_32
= (u32
)desc_dma
;
1155 set_words(&desc_dma_32
, 1, &pdesc
->next_desc
);
1158 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1159 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1163 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1164 &dma_addr
, &dma_sz
);
1166 /* frag list based linkage is not supported for now. */
1167 if (skb_shinfo(skb
)->frag_list
) {
1168 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1173 WARN_ON(pkt_len
!= skb
->len
);
1175 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1176 set_words(&pkt_len
, 1, &desc
->desc_info
);
1180 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1184 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1185 struct sk_buff
*skb
,
1186 struct knav_dma_desc
*desc
)
1188 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1189 struct netcp_hook_list
*tx_hook
;
1190 struct netcp_packet p_info
;
1191 struct netcp_tx_cb
*tx_cb
;
1192 unsigned int dma_sz
;
1197 p_info
.netcp
= netcp
;
1199 p_info
.tx_pipe
= NULL
;
1200 p_info
.psdata_len
= 0;
1201 p_info
.ts_context
= NULL
;
1202 p_info
.txtstamp
= NULL
;
1203 p_info
.epib
= desc
->epib
;
1204 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
1205 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(__le32
));
1207 /* Find out where to inject the packet for transmission */
1208 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1209 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1211 if (unlikely(ret
!= 0)) {
1212 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1213 tx_hook
->order
, ret
);
1214 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1219 /* Make sure some TX hook claimed the packet */
1220 tx_pipe
= p_info
.tx_pipe
;
1222 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1227 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1228 tx_cb
->ts_context
= p_info
.ts_context
;
1229 tx_cb
->txtstamp
= p_info
.txtstamp
;
1231 /* update descriptor */
1232 if (p_info
.psdata_len
) {
1233 /* psdata points to both native-endian and device-endian data */
1234 __le32
*psdata
= (void __force
*)p_info
.psdata
;
1236 set_words((u32
*)psdata
+
1237 (KNAV_DMA_NUM_PS_WORDS
- p_info
.psdata_len
),
1238 p_info
.psdata_len
, psdata
);
1239 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1240 KNAV_DMA_DESC_PSLEN_SHIFT
;
1243 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1244 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1245 KNAV_DMA_DESC_RETQ_SHIFT
);
1247 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1248 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1249 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1252 set_words(&tmp
, 1, &desc
->packet_info
);
1253 /* warning!!!! We are saving the virtual ptr in the sw_data
1254 * field as a 32bit value. Will not work on 64bit machines
1256 SET_SW_DATA0((u32
)skb
, desc
);
1258 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1259 tmp
= tx_pipe
->switch_to_port
;
1260 set_words(&tmp
, 1, &desc
->tag_info
);
1263 /* submit packet descriptor */
1264 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1266 if (unlikely(ret
)) {
1267 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1271 skb_tx_timestamp(skb
);
1272 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1278 /* Submit the packet */
1279 static int netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1281 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1282 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1283 int subqueue
= skb_get_queue_mapping(skb
);
1284 struct knav_dma_desc
*desc
;
1285 int desc_count
, ret
= 0;
1287 if (unlikely(skb
->len
<= 0)) {
1289 return NETDEV_TX_OK
;
1292 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1293 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1295 /* If we get here, the skb has already been dropped */
1296 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1298 tx_stats
->tx_dropped
++;
1301 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1304 desc
= netcp_tx_map_skb(skb
, netcp
);
1305 if (unlikely(!desc
)) {
1306 netif_stop_subqueue(ndev
, subqueue
);
1311 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1315 /* Check Tx pool count & stop subqueue if needed */
1316 desc_count
= knav_pool_count(netcp
->tx_pool
);
1317 if (desc_count
< netcp
->tx_pause_threshold
) {
1318 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1319 netif_stop_subqueue(ndev
, subqueue
);
1321 return NETDEV_TX_OK
;
1324 tx_stats
->tx_dropped
++;
1326 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1331 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1333 if (tx_pipe
->dma_channel
) {
1334 knav_dma_close_channel(tx_pipe
->dma_channel
);
1335 tx_pipe
->dma_channel
= NULL
;
1339 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1341 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1343 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1344 struct knav_dma_cfg config
;
1348 memset(&config
, 0, sizeof(config
));
1349 config
.direction
= DMA_MEM_TO_DEV
;
1350 config
.u
.tx
.filt_einfo
= false;
1351 config
.u
.tx
.filt_pswords
= false;
1352 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1354 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1355 tx_pipe
->dma_chan_name
, &config
);
1356 if (IS_ERR(tx_pipe
->dma_channel
)) {
1357 dev_err(dev
, "failed opening tx chan(%s)\n",
1358 tx_pipe
->dma_chan_name
);
1359 ret
= PTR_ERR(tx_pipe
->dma_channel
);
1363 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1364 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1366 if (IS_ERR(tx_pipe
->dma_queue
)) {
1367 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %d\n",
1369 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1373 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1377 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1378 knav_dma_close_channel(tx_pipe
->dma_channel
);
1379 tx_pipe
->dma_channel
= NULL
;
1382 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1384 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1385 struct netcp_device
*netcp_device
,
1386 const char *dma_chan_name
, unsigned int dma_queue_id
)
1388 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1389 tx_pipe
->netcp_device
= netcp_device
;
1390 tx_pipe
->dma_chan_name
= dma_chan_name
;
1391 tx_pipe
->dma_queue_id
= dma_queue_id
;
1394 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1396 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1398 enum netcp_addr_type type
)
1400 struct netcp_addr
*naddr
;
1402 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1403 if (naddr
->type
!= type
)
1405 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1413 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1415 enum netcp_addr_type type
)
1417 struct netcp_addr
*naddr
;
1419 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1425 naddr
->netcp
= netcp
;
1427 ether_addr_copy(naddr
->addr
, addr
);
1429 eth_zero_addr(naddr
->addr
);
1430 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1435 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1437 list_del(&naddr
->node
);
1438 devm_kfree(netcp
->dev
, naddr
);
1441 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1443 struct netcp_addr
*naddr
;
1445 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1449 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1450 enum netcp_addr_type type
)
1452 struct netcp_addr
*naddr
;
1454 naddr
= netcp_addr_find(netcp
, addr
, type
);
1456 naddr
->flags
|= ADDR_VALID
;
1460 naddr
= netcp_addr_add(netcp
, addr
, type
);
1461 if (!WARN_ON(!naddr
))
1462 naddr
->flags
|= ADDR_NEW
;
1465 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1467 struct netcp_addr
*naddr
, *tmp
;
1468 struct netcp_intf_modpriv
*priv
;
1469 struct netcp_module
*module
;
1472 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1473 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1475 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1476 naddr
->addr
, naddr
->type
);
1477 for_each_module(netcp
, priv
) {
1478 module
= priv
->netcp_module
;
1479 if (!module
->del_addr
)
1481 error
= module
->del_addr(priv
->module_priv
,
1485 netcp_addr_del(netcp
, naddr
);
1489 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1491 struct netcp_addr
*naddr
, *tmp
;
1492 struct netcp_intf_modpriv
*priv
;
1493 struct netcp_module
*module
;
1496 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1497 if (!(naddr
->flags
& ADDR_NEW
))
1499 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1500 naddr
->addr
, naddr
->type
);
1502 for_each_module(netcp
, priv
) {
1503 module
= priv
->netcp_module
;
1504 if (!module
->add_addr
)
1506 error
= module
->add_addr(priv
->module_priv
, naddr
);
1512 static int netcp_set_promiscuous(struct netcp_intf
*netcp
, bool promisc
)
1514 struct netcp_intf_modpriv
*priv
;
1515 struct netcp_module
*module
;
1518 for_each_module(netcp
, priv
) {
1519 module
= priv
->netcp_module
;
1520 if (!module
->set_rx_mode
)
1523 error
= module
->set_rx_mode(priv
->module_priv
, promisc
);
1530 static void netcp_set_rx_mode(struct net_device
*ndev
)
1532 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1533 struct netdev_hw_addr
*ndev_addr
;
1536 promisc
= (ndev
->flags
& IFF_PROMISC
||
1537 ndev
->flags
& IFF_ALLMULTI
||
1538 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1540 spin_lock(&netcp
->lock
);
1541 /* first clear all marks */
1542 netcp_addr_clear_mark(netcp
);
1544 /* next add new entries, mark existing ones */
1545 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1546 for_each_dev_addr(ndev
, ndev_addr
)
1547 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1548 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1549 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1550 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1551 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1554 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1556 /* finally sweep and callout into modules */
1557 netcp_addr_sweep_del(netcp
);
1558 netcp_addr_sweep_add(netcp
);
1559 netcp_set_promiscuous(netcp
, promisc
);
1560 spin_unlock(&netcp
->lock
);
1563 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1567 if (netcp
->rx_channel
) {
1568 knav_dma_close_channel(netcp
->rx_channel
);
1569 netcp
->rx_channel
= NULL
;
1572 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1573 netcp_rxpool_free(netcp
);
1575 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1576 knav_queue_close(netcp
->rx_queue
);
1577 netcp
->rx_queue
= NULL
;
1580 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1581 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1582 knav_queue_close(netcp
->rx_fdq
[i
]);
1583 netcp
->rx_fdq
[i
] = NULL
;
1586 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1587 knav_queue_close(netcp
->tx_compl_q
);
1588 netcp
->tx_compl_q
= NULL
;
1591 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1592 knav_pool_destroy(netcp
->tx_pool
);
1593 netcp
->tx_pool
= NULL
;
1597 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1599 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1600 struct knav_queue_notify_config notify_cfg
;
1601 struct knav_dma_cfg config
;
1607 /* Create Rx/Tx descriptor pools */
1608 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1609 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1610 netcp
->rx_pool_region_id
);
1611 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1612 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1613 ret
= PTR_ERR(netcp
->rx_pool
);
1617 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1618 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1619 netcp
->tx_pool_region_id
);
1620 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1621 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1622 ret
= PTR_ERR(netcp
->tx_pool
);
1626 /* open Tx completion queue */
1627 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1628 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1629 if (IS_ERR(netcp
->tx_compl_q
)) {
1630 ret
= PTR_ERR(netcp
->tx_compl_q
);
1633 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1635 /* Set notification for Tx completion */
1636 notify_cfg
.fn
= netcp_tx_notify
;
1637 notify_cfg
.fn_arg
= netcp
;
1638 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1639 KNAV_QUEUE_SET_NOTIFIER
,
1640 (unsigned long)¬ify_cfg
);
1644 knav_queue_disable_notify(netcp
->tx_compl_q
);
1646 /* open Rx completion queue */
1647 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1648 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1649 if (IS_ERR(netcp
->rx_queue
)) {
1650 ret
= PTR_ERR(netcp
->rx_queue
);
1653 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1655 /* Set notification for Rx completion */
1656 notify_cfg
.fn
= netcp_rx_notify
;
1657 notify_cfg
.fn_arg
= netcp
;
1658 ret
= knav_queue_device_control(netcp
->rx_queue
,
1659 KNAV_QUEUE_SET_NOTIFIER
,
1660 (unsigned long)¬ify_cfg
);
1664 knav_queue_disable_notify(netcp
->rx_queue
);
1667 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_queue_depths
[i
];
1669 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1670 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1671 if (IS_ERR(netcp
->rx_fdq
[i
])) {
1672 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1677 memset(&config
, 0, sizeof(config
));
1678 config
.direction
= DMA_DEV_TO_MEM
;
1679 config
.u
.rx
.einfo_present
= true;
1680 config
.u
.rx
.psinfo_present
= true;
1681 config
.u
.rx
.err_mode
= DMA_DROP
;
1682 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1683 config
.u
.rx
.psinfo_at_sop
= false;
1684 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1685 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1686 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1688 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1689 if (netcp
->rx_fdq
[i
])
1690 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1691 config
.u
.rx
.fdq
[i
] = last_fdq
;
1694 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1695 netcp
->dma_chan_name
, &config
);
1696 if (IS_ERR(netcp
->rx_channel
)) {
1697 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1698 netcp
->dma_chan_name
);
1699 ret
= PTR_ERR(netcp
->rx_channel
);
1703 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1707 netcp_free_navigator_resources(netcp
);
1711 /* Open the device */
1712 static int netcp_ndo_open(struct net_device
*ndev
)
1714 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1715 struct netcp_intf_modpriv
*intf_modpriv
;
1716 struct netcp_module
*module
;
1719 netif_carrier_off(ndev
);
1720 ret
= netcp_setup_navigator_resources(ndev
);
1722 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1726 for_each_module(netcp
, intf_modpriv
) {
1727 module
= intf_modpriv
->netcp_module
;
1729 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1731 dev_err(netcp
->ndev_dev
, "module open failed\n");
1737 napi_enable(&netcp
->rx_napi
);
1738 napi_enable(&netcp
->tx_napi
);
1739 knav_queue_enable_notify(netcp
->tx_compl_q
);
1740 knav_queue_enable_notify(netcp
->rx_queue
);
1741 netcp_rxpool_refill(netcp
);
1742 netif_tx_wake_all_queues(ndev
);
1743 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1747 for_each_module(netcp
, intf_modpriv
) {
1748 module
= intf_modpriv
->netcp_module
;
1750 module
->close(intf_modpriv
->module_priv
, ndev
);
1754 netcp_free_navigator_resources(netcp
);
1758 /* Close the device */
1759 static int netcp_ndo_stop(struct net_device
*ndev
)
1761 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1762 struct netcp_intf_modpriv
*intf_modpriv
;
1763 struct netcp_module
*module
;
1766 netif_tx_stop_all_queues(ndev
);
1767 netif_carrier_off(ndev
);
1768 netcp_addr_clear_mark(netcp
);
1769 netcp_addr_sweep_del(netcp
);
1770 knav_queue_disable_notify(netcp
->rx_queue
);
1771 knav_queue_disable_notify(netcp
->tx_compl_q
);
1772 napi_disable(&netcp
->rx_napi
);
1773 napi_disable(&netcp
->tx_napi
);
1775 for_each_module(netcp
, intf_modpriv
) {
1776 module
= intf_modpriv
->netcp_module
;
1777 if (module
->close
) {
1778 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1780 dev_err(netcp
->ndev_dev
, "Close failed\n");
1784 /* Recycle Rx descriptors from completion queue */
1785 netcp_empty_rx_queue(netcp
);
1787 /* Recycle Tx descriptors from completion queue */
1788 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1790 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1791 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1792 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1794 netcp_free_navigator_resources(netcp
);
1795 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1799 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1800 struct ifreq
*req
, int cmd
)
1802 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1803 struct netcp_intf_modpriv
*intf_modpriv
;
1804 struct netcp_module
*module
;
1805 int ret
= -1, err
= -EOPNOTSUPP
;
1807 if (!netif_running(ndev
))
1810 for_each_module(netcp
, intf_modpriv
) {
1811 module
= intf_modpriv
->netcp_module
;
1815 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1816 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1825 return (ret
== 0) ? 0 : err
;
1828 static void netcp_ndo_tx_timeout(struct net_device
*ndev
)
1830 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1831 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1833 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1834 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1835 netif_trans_update(ndev
);
1836 netif_tx_wake_all_queues(ndev
);
1839 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1841 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1842 struct netcp_intf_modpriv
*intf_modpriv
;
1843 struct netcp_module
*module
;
1844 unsigned long flags
;
1847 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1849 spin_lock_irqsave(&netcp
->lock
, flags
);
1850 for_each_module(netcp
, intf_modpriv
) {
1851 module
= intf_modpriv
->netcp_module
;
1852 if ((module
->add_vid
) && (vid
!= 0)) {
1853 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1855 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1861 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1866 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1868 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1869 struct netcp_intf_modpriv
*intf_modpriv
;
1870 struct netcp_module
*module
;
1871 unsigned long flags
;
1874 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1876 spin_lock_irqsave(&netcp
->lock
, flags
);
1877 for_each_module(netcp
, intf_modpriv
) {
1878 module
= intf_modpriv
->netcp_module
;
1879 if (module
->del_vid
) {
1880 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1882 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1888 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1892 static int netcp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1895 struct tc_mqprio_qopt
*mqprio
= type_data
;
1899 /* setup tc must be called under rtnl lock */
1902 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1905 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
1906 num_tc
= mqprio
->num_tc
;
1908 /* Sanity-check the number of traffic classes requested */
1909 if ((dev
->real_num_tx_queues
<= 1) ||
1910 (dev
->real_num_tx_queues
< num_tc
))
1913 /* Configure traffic class to queue mappings */
1915 netdev_set_num_tc(dev
, num_tc
);
1916 for (i
= 0; i
< num_tc
; i
++)
1917 netdev_set_tc_queue(dev
, i
, 1, i
);
1919 netdev_reset_tc(dev
);
1926 netcp_get_stats(struct net_device
*ndev
, struct rtnl_link_stats64
*stats
)
1928 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1929 struct netcp_stats
*p
= &netcp
->stats
;
1930 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1934 start
= u64_stats_fetch_begin_irq(&p
->syncp_rx
);
1935 rxpackets
= p
->rx_packets
;
1936 rxbytes
= p
->rx_bytes
;
1937 } while (u64_stats_fetch_retry_irq(&p
->syncp_rx
, start
));
1940 start
= u64_stats_fetch_begin_irq(&p
->syncp_tx
);
1941 txpackets
= p
->tx_packets
;
1942 txbytes
= p
->tx_bytes
;
1943 } while (u64_stats_fetch_retry_irq(&p
->syncp_tx
, start
));
1945 stats
->rx_packets
= rxpackets
;
1946 stats
->rx_bytes
= rxbytes
;
1947 stats
->tx_packets
= txpackets
;
1948 stats
->tx_bytes
= txbytes
;
1950 /* The following are stored as 32 bit */
1951 stats
->rx_errors
= p
->rx_errors
;
1952 stats
->rx_dropped
= p
->rx_dropped
;
1953 stats
->tx_dropped
= p
->tx_dropped
;
1956 static const struct net_device_ops netcp_netdev_ops
= {
1957 .ndo_open
= netcp_ndo_open
,
1958 .ndo_stop
= netcp_ndo_stop
,
1959 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1960 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1961 .ndo_do_ioctl
= netcp_ndo_ioctl
,
1962 .ndo_get_stats64
= netcp_get_stats
,
1963 .ndo_set_mac_address
= eth_mac_addr
,
1964 .ndo_validate_addr
= eth_validate_addr
,
1965 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1966 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1967 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1968 .ndo_select_queue
= dev_pick_tx_zero
,
1969 .ndo_setup_tc
= netcp_setup_tc
,
1972 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1973 struct device_node
*node_interface
)
1975 struct device
*dev
= netcp_device
->device
;
1976 struct device_node
*node
= dev
->of_node
;
1977 struct netcp_intf
*netcp
;
1978 struct net_device
*ndev
;
1979 resource_size_t size
;
1980 struct resource res
;
1981 void __iomem
*efuse
= NULL
;
1983 const void *mac_addr
;
1984 u8 efuse_mac_addr
[6];
1988 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1990 dev_err(dev
, "Error allocating netdev\n");
1994 ndev
->features
|= NETIF_F_SG
;
1995 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1996 ndev
->hw_features
= ndev
->features
;
1997 ndev
->vlan_features
|= NETIF_F_SG
;
1999 /* MTU range: 68 - 9486 */
2000 ndev
->min_mtu
= ETH_MIN_MTU
;
2001 ndev
->max_mtu
= NETCP_MAX_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
);
2003 netcp
= netdev_priv(ndev
);
2004 spin_lock_init(&netcp
->lock
);
2005 INIT_LIST_HEAD(&netcp
->module_head
);
2006 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
2007 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
2008 INIT_LIST_HEAD(&netcp
->addr_list
);
2009 u64_stats_init(&netcp
->stats
.syncp_rx
);
2010 u64_stats_init(&netcp
->stats
.syncp_tx
);
2011 netcp
->netcp_device
= netcp_device
;
2012 netcp
->dev
= netcp_device
->device
;
2014 netcp
->ndev_dev
= &ndev
->dev
;
2015 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
2016 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
2017 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
2018 netcp
->node_interface
= node_interface
;
2020 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
2022 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
2023 dev_err(dev
, "could not find efuse-mac reg resource\n");
2027 size
= resource_size(&res
);
2029 if (!devm_request_mem_region(dev
, res
.start
, size
,
2031 dev_err(dev
, "could not reserve resource\n");
2036 efuse
= devm_ioremap_nocache(dev
, res
.start
, size
);
2038 dev_err(dev
, "could not map resource\n");
2039 devm_release_mem_region(dev
, res
.start
, size
);
2044 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
, efuse_mac
);
2045 if (is_valid_ether_addr(efuse_mac_addr
))
2046 ether_addr_copy(ndev
->dev_addr
, efuse_mac_addr
);
2048 eth_random_addr(ndev
->dev_addr
);
2050 devm_iounmap(dev
, efuse
);
2051 devm_release_mem_region(dev
, res
.start
, size
);
2053 mac_addr
= of_get_mac_address(node_interface
);
2055 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
2057 eth_random_addr(ndev
->dev_addr
);
2060 ret
= of_property_read_string(node_interface
, "rx-channel",
2061 &netcp
->dma_chan_name
);
2063 dev_err(dev
, "missing \"rx-channel\" parameter\n");
2068 ret
= of_property_read_u32(node_interface
, "rx-queue",
2069 &netcp
->rx_queue_id
);
2071 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
2072 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
2075 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
2076 netcp
->rx_queue_depths
,
2077 KNAV_DMA_FDQ_PER_CHAN
);
2079 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
2080 netcp
->rx_queue_depths
[0] = 128;
2083 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
2085 dev_err(dev
, "missing \"rx-pool\" parameter\n");
2089 netcp
->rx_pool_size
= temp
[0];
2090 netcp
->rx_pool_region_id
= temp
[1];
2092 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
2094 dev_err(dev
, "missing \"tx-pool\" parameter\n");
2098 netcp
->tx_pool_size
= temp
[0];
2099 netcp
->tx_pool_region_id
= temp
[1];
2101 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
2102 dev_err(dev
, "tx-pool size too small, must be atleast(%ld)\n",
2108 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
2109 &netcp
->tx_compl_qid
);
2111 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
2112 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
2116 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
, NETCP_NAPI_WEIGHT
);
2117 netif_tx_napi_add(ndev
, &netcp
->tx_napi
, netcp_tx_poll
, NETCP_NAPI_WEIGHT
);
2119 /* Register the network device */
2121 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
2122 ndev
->netdev_ops
= &netcp_netdev_ops
;
2123 SET_NETDEV_DEV(ndev
, dev
);
2125 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2133 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2134 struct net_device
*ndev
)
2136 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2137 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2138 struct netcp_module
*module
;
2140 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2143 /* Notify each of the modules that the interface is going away */
2144 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2146 module
= intf_modpriv
->netcp_module
;
2147 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2149 if (module
->release
)
2150 module
->release(intf_modpriv
->module_priv
);
2151 list_del(&intf_modpriv
->intf_list
);
2153 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2156 list_del(&netcp
->interface_list
);
2158 of_node_put(netcp
->node_interface
);
2159 unregister_netdev(ndev
);
2163 static int netcp_probe(struct platform_device
*pdev
)
2165 struct device_node
*node
= pdev
->dev
.of_node
;
2166 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2167 struct device_node
*child
, *interfaces
;
2168 struct netcp_device
*netcp_device
;
2169 struct device
*dev
= &pdev
->dev
;
2170 struct netcp_module
*module
;
2173 if (!knav_dma_device_ready() ||
2174 !knav_qmss_device_ready())
2175 return -EPROBE_DEFER
;
2178 dev_err(dev
, "could not find device info\n");
2182 /* Allocate a new NETCP device instance */
2183 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2187 pm_runtime_enable(&pdev
->dev
);
2188 ret
= pm_runtime_get_sync(&pdev
->dev
);
2190 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2191 pm_runtime_disable(&pdev
->dev
);
2195 /* Initialize the NETCP device instance */
2196 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2197 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2198 netcp_device
->device
= dev
;
2199 platform_set_drvdata(pdev
, netcp_device
);
2201 /* create interfaces */
2202 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2204 dev_err(dev
, "could not find netcp-interfaces node\n");
2209 for_each_available_child_of_node(interfaces
, child
) {
2210 ret
= netcp_create_interface(netcp_device
, child
);
2212 dev_err(dev
, "could not create interface(%s)\n",
2214 goto probe_quit_interface
;
2218 of_node_put(interfaces
);
2220 /* Add the device instance to the list */
2221 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2223 /* Probe & attach any modules already registered */
2224 mutex_lock(&netcp_modules_lock
);
2225 for_each_netcp_module(module
) {
2226 ret
= netcp_module_probe(netcp_device
, module
);
2228 dev_err(dev
, "module(%s) probe failed\n", module
->name
);
2230 mutex_unlock(&netcp_modules_lock
);
2233 probe_quit_interface
:
2234 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2235 &netcp_device
->interface_head
,
2237 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2240 of_node_put(interfaces
);
2243 pm_runtime_put_sync(&pdev
->dev
);
2244 pm_runtime_disable(&pdev
->dev
);
2245 platform_set_drvdata(pdev
, NULL
);
2249 static int netcp_remove(struct platform_device
*pdev
)
2251 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2252 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2253 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2254 struct netcp_module
*module
;
2256 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2258 module
= inst_modpriv
->netcp_module
;
2259 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2260 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2261 list_del(&inst_modpriv
->inst_list
);
2264 /* now that all modules are removed, clean up the interfaces */
2265 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2266 &netcp_device
->interface_head
,
2268 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2271 WARN(!list_empty(&netcp_device
->interface_head
),
2272 "%s interface list not empty!\n", pdev
->name
);
2274 pm_runtime_put_sync(&pdev
->dev
);
2275 pm_runtime_disable(&pdev
->dev
);
2276 platform_set_drvdata(pdev
, NULL
);
2280 static const struct of_device_id of_match
[] = {
2281 { .compatible
= "ti,netcp-1.0", },
2284 MODULE_DEVICE_TABLE(of
, of_match
);
2286 static struct platform_driver netcp_driver
= {
2288 .name
= "netcp-1.0",
2289 .of_match_table
= of_match
,
2291 .probe
= netcp_probe
,
2292 .remove
= netcp_remove
,
2294 module_platform_driver(netcp_driver
);
2296 MODULE_LICENSE("GPL v2");
2297 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2298 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");