2 * Keystone NetCP Core driver
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/module.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/knav_qmss.h>
30 #include <linux/soc/ti/knav_dma.h>
34 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35 #define NETCP_NAPI_WEIGHT 64
36 #define NETCP_TX_TIMEOUT (5 * HZ)
37 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
38 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
39 #define NETCP_MAX_MCAST_ADDR 16
41 #define NETCP_EFUSE_REG_INDEX 0
43 #define NETCP_MOD_PROBE_SKIPPED 1
44 #define NETCP_MOD_PROBE_FAILED 2
46 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
47 NETIF_MSG_DRV | NETIF_MSG_LINK | \
48 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
49 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
50 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
51 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
52 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
55 #define NETCP_EFUSE_ADDR_SWAP 2
57 #define knav_queue_get_id(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
60 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
61 KNAV_QUEUE_ENABLE_NOTIFY, \
64 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
65 KNAV_QUEUE_DISABLE_NOTIFY, \
68 #define knav_queue_get_count(q) knav_queue_device_control(q, \
69 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
71 #define for_each_netcp_module(module) \
72 list_for_each_entry(module, &netcp_modules, module_list)
74 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
75 list_for_each_entry(inst_modpriv, \
76 &((netcp_device)->modpriv_head), inst_list)
78 #define for_each_module(netcp, intf_modpriv) \
79 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
81 /* Module management structures */
83 struct list_head device_list
;
84 struct list_head interface_head
;
85 struct list_head modpriv_head
;
86 struct device
*device
;
89 struct netcp_inst_modpriv
{
90 struct netcp_device
*netcp_device
;
91 struct netcp_module
*netcp_module
;
92 struct list_head inst_list
;
96 struct netcp_intf_modpriv
{
97 struct netcp_intf
*netcp_priv
;
98 struct netcp_module
*netcp_module
;
99 struct list_head intf_list
;
105 void (*txtstamp
)(void *context
, struct sk_buff
*skb
);
108 static LIST_HEAD(netcp_devices
);
109 static LIST_HEAD(netcp_modules
);
110 static DEFINE_MUTEX(netcp_modules_lock
);
112 static int netcp_debug_level
= -1;
113 module_param(netcp_debug_level
, int, 0);
114 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
116 /* Helper functions - Get/Set */
117 static void get_pkt_info(dma_addr_t
*buff
, u32
*buff_len
, dma_addr_t
*ndesc
,
118 struct knav_dma_desc
*desc
)
120 *buff_len
= le32_to_cpu(desc
->buff_len
);
121 *buff
= le32_to_cpu(desc
->buff
);
122 *ndesc
= le32_to_cpu(desc
->next_desc
);
125 static void get_desc_info(u32
*desc_info
, u32
*pkt_info
,
126 struct knav_dma_desc
*desc
)
128 *desc_info
= le32_to_cpu(desc
->desc_info
);
129 *pkt_info
= le32_to_cpu(desc
->packet_info
);
132 static u32
get_sw_data(int index
, struct knav_dma_desc
*desc
)
134 /* No Endian conversion needed as this data is untouched by hw */
135 return desc
->sw_data
[index
];
138 /* use these macros to get sw data */
139 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
140 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
141 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
142 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
144 static void get_org_pkt_info(dma_addr_t
*buff
, u32
*buff_len
,
145 struct knav_dma_desc
*desc
)
147 *buff
= le32_to_cpu(desc
->orig_buff
);
148 *buff_len
= le32_to_cpu(desc
->orig_len
);
151 static void get_words(dma_addr_t
*words
, int num_words
, __le32
*desc
)
155 for (i
= 0; i
< num_words
; i
++)
156 words
[i
] = le32_to_cpu(desc
[i
]);
159 static void set_pkt_info(dma_addr_t buff
, u32 buff_len
, u32 ndesc
,
160 struct knav_dma_desc
*desc
)
162 desc
->buff_len
= cpu_to_le32(buff_len
);
163 desc
->buff
= cpu_to_le32(buff
);
164 desc
->next_desc
= cpu_to_le32(ndesc
);
167 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
168 struct knav_dma_desc
*desc
)
170 desc
->desc_info
= cpu_to_le32(desc_info
);
171 desc
->packet_info
= cpu_to_le32(pkt_info
);
174 static void set_sw_data(int index
, u32 data
, struct knav_dma_desc
*desc
)
176 /* No Endian conversion needed as this data is untouched by hw */
177 desc
->sw_data
[index
] = data
;
180 /* use these macros to set sw data */
181 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
182 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
183 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
184 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
186 static void set_org_pkt_info(dma_addr_t buff
, u32 buff_len
,
187 struct knav_dma_desc
*desc
)
189 desc
->orig_buff
= cpu_to_le32(buff
);
190 desc
->orig_len
= cpu_to_le32(buff_len
);
193 static void set_words(u32
*words
, int num_words
, __le32
*desc
)
197 for (i
= 0; i
< num_words
; i
++)
198 desc
[i
] = cpu_to_le32(words
[i
]);
201 /* Read the e-fuse value as 32 bit values to be endian independent */
202 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
, u32 swap
)
204 unsigned int addr0
, addr1
;
206 addr1
= readl(efuse_mac
+ 4);
207 addr0
= readl(efuse_mac
);
210 case NETCP_EFUSE_ADDR_SWAP
:
212 addr1
= readl(efuse_mac
);
218 x
[0] = (addr1
& 0x0000ff00) >> 8;
219 x
[1] = addr1
& 0x000000ff;
220 x
[2] = (addr0
& 0xff000000) >> 24;
221 x
[3] = (addr0
& 0x00ff0000) >> 16;
222 x
[4] = (addr0
& 0x0000ff00) >> 8;
223 x
[5] = addr0
& 0x000000ff;
228 /* Module management routines */
229 static int netcp_register_interface(struct netcp_intf
*netcp
)
233 ret
= register_netdev(netcp
->ndev
);
235 netcp
->netdev_registered
= true;
239 static int netcp_module_probe(struct netcp_device
*netcp_device
,
240 struct netcp_module
*module
)
242 struct device
*dev
= netcp_device
->device
;
243 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
244 struct device_node
*child
;
245 struct netcp_inst_modpriv
*inst_modpriv
;
246 struct netcp_intf
*netcp_intf
;
247 struct netcp_module
*tmp
;
248 bool primary_module_registered
= false;
251 /* Find this module in the sub-tree for this device */
252 devices
= of_get_child_by_name(node
, "netcp-devices");
254 dev_err(dev
, "could not find netcp-devices node\n");
255 return NETCP_MOD_PROBE_SKIPPED
;
258 for_each_available_child_of_node(devices
, child
) {
262 if (of_property_read_string(node
, "label", &name
) < 0) {
263 snprintf(node_name
, sizeof(node_name
), "%pOFn", child
);
266 if (!strcasecmp(module
->name
, name
))
270 of_node_put(devices
);
271 /* If module not used for this device, skip it */
273 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
274 return NETCP_MOD_PROBE_SKIPPED
;
277 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
283 inst_modpriv
->netcp_device
= netcp_device
;
284 inst_modpriv
->netcp_module
= module
;
285 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
287 ret
= module
->probe(netcp_device
, dev
, child
,
288 &inst_modpriv
->module_priv
);
291 dev_err(dev
, "Probe of module(%s) failed with %d\n",
293 list_del(&inst_modpriv
->inst_list
);
294 devm_kfree(dev
, inst_modpriv
);
295 return NETCP_MOD_PROBE_FAILED
;
298 /* Attach modules only if the primary module is probed */
299 for_each_netcp_module(tmp
) {
301 primary_module_registered
= true;
304 if (!primary_module_registered
)
307 /* Attach module to interfaces */
308 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
310 struct netcp_intf_modpriv
*intf_modpriv
;
312 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
317 interface
= of_parse_phandle(netcp_intf
->node_interface
,
321 devm_kfree(dev
, intf_modpriv
);
325 intf_modpriv
->netcp_priv
= netcp_intf
;
326 intf_modpriv
->netcp_module
= module
;
327 list_add_tail(&intf_modpriv
->intf_list
,
328 &netcp_intf
->module_head
);
330 ret
= module
->attach(inst_modpriv
->module_priv
,
331 netcp_intf
->ndev
, interface
,
332 &intf_modpriv
->module_priv
);
333 of_node_put(interface
);
335 dev_dbg(dev
, "Attach of module %s declined with %d\n",
337 list_del(&intf_modpriv
->intf_list
);
338 devm_kfree(dev
, intf_modpriv
);
343 /* Now register the interface with netdev */
344 list_for_each_entry(netcp_intf
,
345 &netcp_device
->interface_head
,
347 /* If interface not registered then register now */
348 if (!netcp_intf
->netdev_registered
) {
349 ret
= netcp_register_interface(netcp_intf
);
357 int netcp_register_module(struct netcp_module
*module
)
359 struct netcp_device
*netcp_device
;
360 struct netcp_module
*tmp
;
364 WARN(1, "error registering netcp module: no name\n");
368 if (!module
->probe
) {
369 WARN(1, "error registering netcp module: no probe\n");
373 mutex_lock(&netcp_modules_lock
);
375 for_each_netcp_module(tmp
) {
376 if (!strcasecmp(tmp
->name
, module
->name
)) {
377 mutex_unlock(&netcp_modules_lock
);
381 list_add_tail(&module
->module_list
, &netcp_modules
);
383 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
384 ret
= netcp_module_probe(netcp_device
, module
);
388 mutex_unlock(&netcp_modules_lock
);
392 mutex_unlock(&netcp_modules_lock
);
393 netcp_unregister_module(module
);
396 EXPORT_SYMBOL_GPL(netcp_register_module
);
398 static void netcp_release_module(struct netcp_device
*netcp_device
,
399 struct netcp_module
*module
)
401 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
402 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
403 struct device
*dev
= netcp_device
->device
;
405 /* Release the module from each interface */
406 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
407 &netcp_device
->interface_head
,
409 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
411 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
412 &netcp_intf
->module_head
,
414 if (intf_modpriv
->netcp_module
== module
) {
415 module
->release(intf_modpriv
->module_priv
);
416 list_del(&intf_modpriv
->intf_list
);
417 devm_kfree(dev
, intf_modpriv
);
423 /* Remove the module from each instance */
424 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
425 &netcp_device
->modpriv_head
, inst_list
) {
426 if (inst_modpriv
->netcp_module
== module
) {
427 module
->remove(netcp_device
,
428 inst_modpriv
->module_priv
);
429 list_del(&inst_modpriv
->inst_list
);
430 devm_kfree(dev
, inst_modpriv
);
436 void netcp_unregister_module(struct netcp_module
*module
)
438 struct netcp_device
*netcp_device
;
439 struct netcp_module
*module_tmp
;
441 mutex_lock(&netcp_modules_lock
);
443 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
444 netcp_release_module(netcp_device
, module
);
447 /* Remove the module from the module list */
448 for_each_netcp_module(module_tmp
) {
449 if (module
== module_tmp
) {
450 list_del(&module
->module_list
);
455 mutex_unlock(&netcp_modules_lock
);
457 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
459 void *netcp_module_get_intf_data(struct netcp_module
*module
,
460 struct netcp_intf
*intf
)
462 struct netcp_intf_modpriv
*intf_modpriv
;
464 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
465 if (intf_modpriv
->netcp_module
== module
)
466 return intf_modpriv
->module_priv
;
469 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
471 /* Module TX and RX Hook management */
472 struct netcp_hook_list
{
473 struct list_head list
;
474 netcp_hook_rtn
*hook_rtn
;
479 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
480 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
482 struct netcp_hook_list
*entry
;
483 struct netcp_hook_list
*next
;
486 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
490 entry
->hook_rtn
= hook_rtn
;
491 entry
->hook_data
= hook_data
;
492 entry
->order
= order
;
494 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
495 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
496 if (next
->order
> order
)
499 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
500 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
504 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
506 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
507 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
509 struct netcp_hook_list
*next
, *n
;
512 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
513 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
514 if ((next
->order
== order
) &&
515 (next
->hook_rtn
== hook_rtn
) &&
516 (next
->hook_data
== hook_data
)) {
517 list_del(&next
->list
);
518 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
519 devm_kfree(netcp_priv
->dev
, next
);
523 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
526 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
528 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
529 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
531 struct netcp_hook_list
*entry
;
532 struct netcp_hook_list
*next
;
535 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
539 entry
->hook_rtn
= hook_rtn
;
540 entry
->hook_data
= hook_data
;
541 entry
->order
= order
;
543 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
544 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
545 if (next
->order
> order
)
548 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
549 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
553 EXPORT_SYMBOL_GPL(netcp_register_rxhook
);
555 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
556 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
558 struct netcp_hook_list
*next
, *n
;
561 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
562 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
563 if ((next
->order
== order
) &&
564 (next
->hook_rtn
== hook_rtn
) &&
565 (next
->hook_data
== hook_data
)) {
566 list_del(&next
->list
);
567 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
568 devm_kfree(netcp_priv
->dev
, next
);
572 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
576 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook
);
578 static void netcp_frag_free(bool is_frag
, void *ptr
)
586 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
587 struct knav_dma_desc
*desc
)
589 struct knav_dma_desc
*ndesc
;
590 dma_addr_t dma_desc
, dma_buf
;
591 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
595 get_words(&dma_desc
, 1, &desc
->next_desc
);
598 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
599 if (unlikely(!ndesc
)) {
600 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
603 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
604 /* warning!!!! We are retrieving the virtual ptr in the sw_data
605 * field as a 32bit value. Will not work on 64bit machines
607 buf_ptr
= (void *)GET_SW_DATA0(ndesc
);
608 buf_len
= (int)GET_SW_DATA1(desc
);
609 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
610 __free_page(buf_ptr
);
611 knav_pool_desc_put(netcp
->rx_pool
, desc
);
613 /* warning!!!! We are retrieving the virtual ptr in the sw_data
614 * field as a 32bit value. Will not work on 64bit machines
616 buf_ptr
= (void *)GET_SW_DATA0(desc
);
617 buf_len
= (int)GET_SW_DATA1(desc
);
620 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
621 knav_pool_desc_put(netcp
->rx_pool
, desc
);
624 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
626 struct netcp_stats
*rx_stats
= &netcp
->stats
;
627 struct knav_dma_desc
*desc
;
632 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
636 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
637 if (unlikely(!desc
)) {
638 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
640 rx_stats
->rx_errors
++;
643 netcp_free_rx_desc_chain(netcp
, desc
);
644 rx_stats
->rx_dropped
++;
648 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
650 struct netcp_stats
*rx_stats
= &netcp
->stats
;
651 unsigned int dma_sz
, buf_len
, org_buf_len
;
652 struct knav_dma_desc
*desc
, *ndesc
;
653 unsigned int pkt_sz
= 0, accum_sz
;
654 struct netcp_hook_list
*rx_hook
;
655 dma_addr_t dma_desc
, dma_buff
;
656 struct netcp_packet p_info
;
661 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
665 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
666 if (unlikely(!desc
)) {
667 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
671 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
672 /* warning!!!! We are retrieving the virtual ptr in the sw_data
673 * field as a 32bit value. Will not work on 64bit machines
675 org_buf_ptr
= (void *)GET_SW_DATA0(desc
);
676 org_buf_len
= (int)GET_SW_DATA1(desc
);
678 if (unlikely(!org_buf_ptr
)) {
679 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
683 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
685 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
687 /* Build a new sk_buff for the primary buffer */
688 skb
= build_skb(org_buf_ptr
, org_buf_len
);
689 if (unlikely(!skb
)) {
690 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
694 /* update data, tail and len */
695 skb_reserve(skb
, NETCP_SOP_OFFSET
);
696 __skb_put(skb
, buf_len
);
698 /* Fill in the page fragment list */
702 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
703 if (unlikely(!ndesc
)) {
704 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
708 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
709 /* warning!!!! We are retrieving the virtual ptr in the sw_data
710 * field as a 32bit value. Will not work on 64bit machines
712 page
= (struct page
*)GET_SW_DATA0(ndesc
);
714 if (likely(dma_buff
&& buf_len
&& page
)) {
715 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
718 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
719 &dma_buff
, buf_len
, page
);
723 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
724 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
727 /* Free the descriptor */
728 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
731 /* check for packet len and warn */
732 if (unlikely(pkt_sz
!= accum_sz
))
733 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
736 /* Newer version of the Ethernet switch can trim the Ethernet FCS
737 * from the packet and is indicated in hw_cap. So trim it only for
740 if (!(netcp
->hw_cap
& ETH_SW_CAN_REMOVE_ETH_FCS
))
741 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
743 /* Call each of the RX hooks */
745 skb
->dev
= netcp
->ndev
;
746 p_info
.rxtstamp_complete
= false;
747 get_desc_info(&tmp
, &p_info
.eflags
, desc
);
748 p_info
.epib
= desc
->epib
;
749 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
750 p_info
.eflags
= ((p_info
.eflags
>> KNAV_DMA_DESC_EFLAGS_SHIFT
) &
751 KNAV_DMA_DESC_EFLAGS_MASK
);
752 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
755 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
758 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
759 rx_hook
->order
, ret
);
760 /* Free the primary descriptor */
761 rx_stats
->rx_dropped
++;
762 knav_pool_desc_put(netcp
->rx_pool
, desc
);
767 /* Free the primary descriptor */
768 knav_pool_desc_put(netcp
->rx_pool
, desc
);
770 u64_stats_update_begin(&rx_stats
->syncp_rx
);
771 rx_stats
->rx_packets
++;
772 rx_stats
->rx_bytes
+= skb
->len
;
773 u64_stats_update_end(&rx_stats
->syncp_rx
);
775 /* push skb up the stack */
776 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
777 netif_receive_skb(skb
);
781 netcp_free_rx_desc_chain(netcp
, desc
);
782 rx_stats
->rx_errors
++;
786 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
791 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
796 /* Release descriptors and attached buffers from Rx FDQ */
797 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
799 struct knav_dma_desc
*desc
;
800 unsigned int buf_len
, dma_sz
;
804 /* Allocate descriptor */
805 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
806 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
807 if (unlikely(!desc
)) {
808 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
812 get_org_pkt_info(&dma
, &buf_len
, desc
);
813 /* warning!!!! We are retrieving the virtual ptr in the sw_data
814 * field as a 32bit value. Will not work on 64bit machines
816 buf_ptr
= (void *)GET_SW_DATA0(desc
);
818 if (unlikely(!dma
)) {
819 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
820 knav_pool_desc_put(netcp
->rx_pool
, desc
);
824 if (unlikely(!buf_ptr
)) {
825 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
826 knav_pool_desc_put(netcp
->rx_pool
, desc
);
831 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
833 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
835 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
837 __free_page(buf_ptr
);
840 knav_pool_desc_put(netcp
->rx_pool
, desc
);
844 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
848 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
849 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
850 netcp_free_rx_buf(netcp
, i
);
852 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
853 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
854 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
856 knav_pool_destroy(netcp
->rx_pool
);
857 netcp
->rx_pool
= NULL
;
860 static int netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
862 struct knav_dma_desc
*hwdesc
;
863 unsigned int buf_len
, dma_sz
;
864 u32 desc_info
, pkt_info
;
870 /* Allocate descriptor */
871 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
872 if (IS_ERR_OR_NULL(hwdesc
)) {
873 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
877 if (likely(fdq
== 0)) {
878 unsigned int primary_buf_len
;
879 /* Allocate a primary receive queue entry */
880 buf_len
= NETCP_PACKET_SIZE
+ NETCP_SOP_OFFSET
;
881 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
882 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
884 bufptr
= netdev_alloc_frag(primary_buf_len
);
885 sw_data
[1] = primary_buf_len
;
887 if (unlikely(!bufptr
)) {
888 dev_warn_ratelimited(netcp
->ndev_dev
,
889 "Primary RX buffer alloc failed\n");
892 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
894 if (unlikely(dma_mapping_error(netcp
->dev
, dma
)))
897 /* warning!!!! We are saving the virtual ptr in the sw_data
898 * field as a 32bit value. Will not work on 64bit machines
900 sw_data
[0] = (u32
)bufptr
;
902 /* Allocate a secondary receive queue entry */
903 page
= alloc_page(GFP_ATOMIC
| GFP_DMA
);
904 if (unlikely(!page
)) {
905 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
909 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
910 /* warning!!!! We are saving the virtual ptr in the sw_data
911 * field as a 32bit value. Will not work on 64bit machines
913 sw_data
[0] = (u32
)page
;
917 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
918 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
919 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
920 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
921 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
922 KNAV_DMA_DESC_RETQ_SHIFT
;
923 set_org_pkt_info(dma
, buf_len
, hwdesc
);
924 SET_SW_DATA0(sw_data
[0], hwdesc
);
925 SET_SW_DATA1(sw_data
[1], hwdesc
);
926 set_desc_info(desc_info
, pkt_info
, hwdesc
);
929 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
931 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
935 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
939 /* Refill Rx FDQ with descriptors & attached buffers */
940 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
942 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
945 /* Calculate the FDQ deficit and refill */
946 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
947 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
948 knav_queue_get_count(netcp
->rx_fdq
[i
]);
950 while (fdq_deficit
[i
]-- && !ret
)
951 ret
= netcp_allocate_rx_buf(netcp
, i
);
956 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
958 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
960 unsigned int packets
;
962 packets
= netcp_process_rx_packets(netcp
, budget
);
964 netcp_rxpool_refill(netcp
);
965 if (packets
< budget
) {
966 napi_complete_done(&netcp
->rx_napi
, packets
);
967 knav_queue_enable_notify(netcp
->rx_queue
);
973 static void netcp_rx_notify(void *arg
)
975 struct netcp_intf
*netcp
= arg
;
977 knav_queue_disable_notify(netcp
->rx_queue
);
978 napi_schedule(&netcp
->rx_napi
);
981 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
982 struct knav_dma_desc
*desc
,
983 unsigned int desc_sz
)
985 struct knav_dma_desc
*ndesc
= desc
;
986 dma_addr_t dma_desc
, dma_buf
;
987 unsigned int buf_len
;
990 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
992 if (dma_buf
&& buf_len
)
993 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
996 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%pad), len(%d)\n",
999 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
1002 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
1005 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1010 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
1011 unsigned int budget
)
1013 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1014 struct knav_dma_desc
*desc
;
1015 struct netcp_tx_cb
*tx_cb
;
1016 struct sk_buff
*skb
;
1017 unsigned int dma_sz
;
1022 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
1025 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
1026 if (unlikely(!desc
)) {
1027 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1028 tx_stats
->tx_errors
++;
1032 /* warning!!!! We are retrieving the virtual ptr in the sw_data
1033 * field as a 32bit value. Will not work on 64bit machines
1035 skb
= (struct sk_buff
*)GET_SW_DATA0(desc
);
1036 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
1038 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
1039 tx_stats
->tx_errors
++;
1043 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1044 if (tx_cb
->txtstamp
)
1045 tx_cb
->txtstamp(tx_cb
->ts_context
, skb
);
1047 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
1048 netif_running(netcp
->ndev
) &&
1049 (knav_pool_count(netcp
->tx_pool
) >
1050 netcp
->tx_resume_threshold
)) {
1051 u16 subqueue
= skb_get_queue_mapping(skb
);
1053 netif_wake_subqueue(netcp
->ndev
, subqueue
);
1056 u64_stats_update_begin(&tx_stats
->syncp_tx
);
1057 tx_stats
->tx_packets
++;
1058 tx_stats
->tx_bytes
+= skb
->len
;
1059 u64_stats_update_end(&tx_stats
->syncp_tx
);
1066 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
1069 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
1072 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
1073 if (packets
< budget
) {
1074 napi_complete(&netcp
->tx_napi
);
1075 knav_queue_enable_notify(netcp
->tx_compl_q
);
1081 static void netcp_tx_notify(void *arg
)
1083 struct netcp_intf
*netcp
= arg
;
1085 knav_queue_disable_notify(netcp
->tx_compl_q
);
1086 napi_schedule(&netcp
->tx_napi
);
1089 static struct knav_dma_desc
*
1090 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1092 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1093 unsigned int pkt_len
= skb_headlen(skb
);
1094 struct device
*dev
= netcp
->dev
;
1095 dma_addr_t dma_addr
;
1096 unsigned int dma_sz
;
1099 /* Map the linear buffer */
1100 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1101 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
1102 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1106 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1107 if (IS_ERR_OR_NULL(desc
)) {
1108 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1109 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1113 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1114 if (skb_is_nonlinear(skb
)) {
1115 prefetchw(skb_shinfo(skb
));
1117 desc
->next_desc
= 0;
1123 /* Handle the case where skb is fragmented in pages */
1124 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1125 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1126 struct page
*page
= skb_frag_page(frag
);
1127 u32 page_offset
= frag
->page_offset
;
1128 u32 buf_len
= skb_frag_size(frag
);
1129 dma_addr_t desc_dma
;
1132 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1134 if (unlikely(!dma_addr
)) {
1135 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1139 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1140 if (IS_ERR_OR_NULL(ndesc
)) {
1141 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1142 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1146 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
, ndesc
);
1147 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1148 desc_dma_32
= (u32
)desc_dma
;
1149 set_words(&desc_dma_32
, 1, &pdesc
->next_desc
);
1152 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1153 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1157 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1158 &dma_addr
, &dma_sz
);
1160 /* frag list based linkage is not supported for now. */
1161 if (skb_shinfo(skb
)->frag_list
) {
1162 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1167 WARN_ON(pkt_len
!= skb
->len
);
1169 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1170 set_words(&pkt_len
, 1, &desc
->desc_info
);
1174 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1178 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1179 struct sk_buff
*skb
,
1180 struct knav_dma_desc
*desc
)
1182 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1183 struct netcp_hook_list
*tx_hook
;
1184 struct netcp_packet p_info
;
1185 struct netcp_tx_cb
*tx_cb
;
1186 unsigned int dma_sz
;
1191 p_info
.netcp
= netcp
;
1193 p_info
.tx_pipe
= NULL
;
1194 p_info
.psdata_len
= 0;
1195 p_info
.ts_context
= NULL
;
1196 p_info
.txtstamp
= NULL
;
1197 p_info
.epib
= desc
->epib
;
1198 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
1199 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(__le32
));
1201 /* Find out where to inject the packet for transmission */
1202 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1203 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1205 if (unlikely(ret
!= 0)) {
1206 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1207 tx_hook
->order
, ret
);
1208 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1213 /* Make sure some TX hook claimed the packet */
1214 tx_pipe
= p_info
.tx_pipe
;
1216 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1221 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1222 tx_cb
->ts_context
= p_info
.ts_context
;
1223 tx_cb
->txtstamp
= p_info
.txtstamp
;
1225 /* update descriptor */
1226 if (p_info
.psdata_len
) {
1227 /* psdata points to both native-endian and device-endian data */
1228 __le32
*psdata
= (void __force
*)p_info
.psdata
;
1230 set_words((u32
*)psdata
+
1231 (KNAV_DMA_NUM_PS_WORDS
- p_info
.psdata_len
),
1232 p_info
.psdata_len
, psdata
);
1233 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1234 KNAV_DMA_DESC_PSLEN_SHIFT
;
1237 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1238 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1239 KNAV_DMA_DESC_RETQ_SHIFT
);
1241 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1242 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1243 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1246 set_words(&tmp
, 1, &desc
->packet_info
);
1247 /* warning!!!! We are saving the virtual ptr in the sw_data
1248 * field as a 32bit value. Will not work on 64bit machines
1250 SET_SW_DATA0((u32
)skb
, desc
);
1252 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1253 tmp
= tx_pipe
->switch_to_port
;
1254 set_words(&tmp
, 1, &desc
->tag_info
);
1257 /* submit packet descriptor */
1258 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1260 if (unlikely(ret
)) {
1261 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1265 skb_tx_timestamp(skb
);
1266 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1272 /* Submit the packet */
1273 static int netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1275 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1276 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1277 int subqueue
= skb_get_queue_mapping(skb
);
1278 struct knav_dma_desc
*desc
;
1279 int desc_count
, ret
= 0;
1281 if (unlikely(skb
->len
<= 0)) {
1283 return NETDEV_TX_OK
;
1286 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1287 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1289 /* If we get here, the skb has already been dropped */
1290 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1292 tx_stats
->tx_dropped
++;
1295 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1298 desc
= netcp_tx_map_skb(skb
, netcp
);
1299 if (unlikely(!desc
)) {
1300 netif_stop_subqueue(ndev
, subqueue
);
1305 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1309 /* Check Tx pool count & stop subqueue if needed */
1310 desc_count
= knav_pool_count(netcp
->tx_pool
);
1311 if (desc_count
< netcp
->tx_pause_threshold
) {
1312 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1313 netif_stop_subqueue(ndev
, subqueue
);
1315 return NETDEV_TX_OK
;
1318 tx_stats
->tx_dropped
++;
1320 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1325 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1327 if (tx_pipe
->dma_channel
) {
1328 knav_dma_close_channel(tx_pipe
->dma_channel
);
1329 tx_pipe
->dma_channel
= NULL
;
1333 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1335 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1337 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1338 struct knav_dma_cfg config
;
1342 memset(&config
, 0, sizeof(config
));
1343 config
.direction
= DMA_MEM_TO_DEV
;
1344 config
.u
.tx
.filt_einfo
= false;
1345 config
.u
.tx
.filt_pswords
= false;
1346 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1348 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1349 tx_pipe
->dma_chan_name
, &config
);
1350 if (IS_ERR(tx_pipe
->dma_channel
)) {
1351 dev_err(dev
, "failed opening tx chan(%s)\n",
1352 tx_pipe
->dma_chan_name
);
1353 ret
= PTR_ERR(tx_pipe
->dma_channel
);
1357 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1358 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1360 if (IS_ERR(tx_pipe
->dma_queue
)) {
1361 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %d\n",
1363 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1367 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1371 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1372 knav_dma_close_channel(tx_pipe
->dma_channel
);
1373 tx_pipe
->dma_channel
= NULL
;
1376 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1378 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1379 struct netcp_device
*netcp_device
,
1380 const char *dma_chan_name
, unsigned int dma_queue_id
)
1382 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1383 tx_pipe
->netcp_device
= netcp_device
;
1384 tx_pipe
->dma_chan_name
= dma_chan_name
;
1385 tx_pipe
->dma_queue_id
= dma_queue_id
;
1388 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1390 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1392 enum netcp_addr_type type
)
1394 struct netcp_addr
*naddr
;
1396 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1397 if (naddr
->type
!= type
)
1399 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1407 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1409 enum netcp_addr_type type
)
1411 struct netcp_addr
*naddr
;
1413 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1419 naddr
->netcp
= netcp
;
1421 ether_addr_copy(naddr
->addr
, addr
);
1423 eth_zero_addr(naddr
->addr
);
1424 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1429 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1431 list_del(&naddr
->node
);
1432 devm_kfree(netcp
->dev
, naddr
);
1435 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1437 struct netcp_addr
*naddr
;
1439 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1443 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1444 enum netcp_addr_type type
)
1446 struct netcp_addr
*naddr
;
1448 naddr
= netcp_addr_find(netcp
, addr
, type
);
1450 naddr
->flags
|= ADDR_VALID
;
1454 naddr
= netcp_addr_add(netcp
, addr
, type
);
1455 if (!WARN_ON(!naddr
))
1456 naddr
->flags
|= ADDR_NEW
;
1459 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1461 struct netcp_addr
*naddr
, *tmp
;
1462 struct netcp_intf_modpriv
*priv
;
1463 struct netcp_module
*module
;
1466 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1467 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1469 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1470 naddr
->addr
, naddr
->type
);
1471 for_each_module(netcp
, priv
) {
1472 module
= priv
->netcp_module
;
1473 if (!module
->del_addr
)
1475 error
= module
->del_addr(priv
->module_priv
,
1479 netcp_addr_del(netcp
, naddr
);
1483 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1485 struct netcp_addr
*naddr
, *tmp
;
1486 struct netcp_intf_modpriv
*priv
;
1487 struct netcp_module
*module
;
1490 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1491 if (!(naddr
->flags
& ADDR_NEW
))
1493 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1494 naddr
->addr
, naddr
->type
);
1496 for_each_module(netcp
, priv
) {
1497 module
= priv
->netcp_module
;
1498 if (!module
->add_addr
)
1500 error
= module
->add_addr(priv
->module_priv
, naddr
);
1506 static int netcp_set_promiscuous(struct netcp_intf
*netcp
, bool promisc
)
1508 struct netcp_intf_modpriv
*priv
;
1509 struct netcp_module
*module
;
1512 for_each_module(netcp
, priv
) {
1513 module
= priv
->netcp_module
;
1514 if (!module
->set_rx_mode
)
1517 error
= module
->set_rx_mode(priv
->module_priv
, promisc
);
1524 static void netcp_set_rx_mode(struct net_device
*ndev
)
1526 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1527 struct netdev_hw_addr
*ndev_addr
;
1530 promisc
= (ndev
->flags
& IFF_PROMISC
||
1531 ndev
->flags
& IFF_ALLMULTI
||
1532 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1534 spin_lock(&netcp
->lock
);
1535 /* first clear all marks */
1536 netcp_addr_clear_mark(netcp
);
1538 /* next add new entries, mark existing ones */
1539 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1540 for_each_dev_addr(ndev
, ndev_addr
)
1541 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1542 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1543 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1544 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1545 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1548 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1550 /* finally sweep and callout into modules */
1551 netcp_addr_sweep_del(netcp
);
1552 netcp_addr_sweep_add(netcp
);
1553 netcp_set_promiscuous(netcp
, promisc
);
1554 spin_unlock(&netcp
->lock
);
1557 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1561 if (netcp
->rx_channel
) {
1562 knav_dma_close_channel(netcp
->rx_channel
);
1563 netcp
->rx_channel
= NULL
;
1566 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1567 netcp_rxpool_free(netcp
);
1569 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1570 knav_queue_close(netcp
->rx_queue
);
1571 netcp
->rx_queue
= NULL
;
1574 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1575 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1576 knav_queue_close(netcp
->rx_fdq
[i
]);
1577 netcp
->rx_fdq
[i
] = NULL
;
1580 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1581 knav_queue_close(netcp
->tx_compl_q
);
1582 netcp
->tx_compl_q
= NULL
;
1585 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1586 knav_pool_destroy(netcp
->tx_pool
);
1587 netcp
->tx_pool
= NULL
;
1591 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1593 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1594 struct knav_queue_notify_config notify_cfg
;
1595 struct knav_dma_cfg config
;
1601 /* Create Rx/Tx descriptor pools */
1602 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1603 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1604 netcp
->rx_pool_region_id
);
1605 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1606 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1607 ret
= PTR_ERR(netcp
->rx_pool
);
1611 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1612 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1613 netcp
->tx_pool_region_id
);
1614 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1615 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1616 ret
= PTR_ERR(netcp
->tx_pool
);
1620 /* open Tx completion queue */
1621 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1622 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1623 if (IS_ERR(netcp
->tx_compl_q
)) {
1624 ret
= PTR_ERR(netcp
->tx_compl_q
);
1627 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1629 /* Set notification for Tx completion */
1630 notify_cfg
.fn
= netcp_tx_notify
;
1631 notify_cfg
.fn_arg
= netcp
;
1632 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1633 KNAV_QUEUE_SET_NOTIFIER
,
1634 (unsigned long)¬ify_cfg
);
1638 knav_queue_disable_notify(netcp
->tx_compl_q
);
1640 /* open Rx completion queue */
1641 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1642 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1643 if (IS_ERR(netcp
->rx_queue
)) {
1644 ret
= PTR_ERR(netcp
->rx_queue
);
1647 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1649 /* Set notification for Rx completion */
1650 notify_cfg
.fn
= netcp_rx_notify
;
1651 notify_cfg
.fn_arg
= netcp
;
1652 ret
= knav_queue_device_control(netcp
->rx_queue
,
1653 KNAV_QUEUE_SET_NOTIFIER
,
1654 (unsigned long)¬ify_cfg
);
1658 knav_queue_disable_notify(netcp
->rx_queue
);
1661 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_queue_depths
[i
];
1663 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1664 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1665 if (IS_ERR(netcp
->rx_fdq
[i
])) {
1666 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1671 memset(&config
, 0, sizeof(config
));
1672 config
.direction
= DMA_DEV_TO_MEM
;
1673 config
.u
.rx
.einfo_present
= true;
1674 config
.u
.rx
.psinfo_present
= true;
1675 config
.u
.rx
.err_mode
= DMA_DROP
;
1676 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1677 config
.u
.rx
.psinfo_at_sop
= false;
1678 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1679 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1680 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1682 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1683 if (netcp
->rx_fdq
[i
])
1684 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1685 config
.u
.rx
.fdq
[i
] = last_fdq
;
1688 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1689 netcp
->dma_chan_name
, &config
);
1690 if (IS_ERR(netcp
->rx_channel
)) {
1691 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1692 netcp
->dma_chan_name
);
1693 ret
= PTR_ERR(netcp
->rx_channel
);
1697 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1701 netcp_free_navigator_resources(netcp
);
1705 /* Open the device */
1706 static int netcp_ndo_open(struct net_device
*ndev
)
1708 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1709 struct netcp_intf_modpriv
*intf_modpriv
;
1710 struct netcp_module
*module
;
1713 netif_carrier_off(ndev
);
1714 ret
= netcp_setup_navigator_resources(ndev
);
1716 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1720 for_each_module(netcp
, intf_modpriv
) {
1721 module
= intf_modpriv
->netcp_module
;
1723 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1725 dev_err(netcp
->ndev_dev
, "module open failed\n");
1731 napi_enable(&netcp
->rx_napi
);
1732 napi_enable(&netcp
->tx_napi
);
1733 knav_queue_enable_notify(netcp
->tx_compl_q
);
1734 knav_queue_enable_notify(netcp
->rx_queue
);
1735 netcp_rxpool_refill(netcp
);
1736 netif_tx_wake_all_queues(ndev
);
1737 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1741 for_each_module(netcp
, intf_modpriv
) {
1742 module
= intf_modpriv
->netcp_module
;
1744 module
->close(intf_modpriv
->module_priv
, ndev
);
1748 netcp_free_navigator_resources(netcp
);
1752 /* Close the device */
1753 static int netcp_ndo_stop(struct net_device
*ndev
)
1755 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1756 struct netcp_intf_modpriv
*intf_modpriv
;
1757 struct netcp_module
*module
;
1760 netif_tx_stop_all_queues(ndev
);
1761 netif_carrier_off(ndev
);
1762 netcp_addr_clear_mark(netcp
);
1763 netcp_addr_sweep_del(netcp
);
1764 knav_queue_disable_notify(netcp
->rx_queue
);
1765 knav_queue_disable_notify(netcp
->tx_compl_q
);
1766 napi_disable(&netcp
->rx_napi
);
1767 napi_disable(&netcp
->tx_napi
);
1769 for_each_module(netcp
, intf_modpriv
) {
1770 module
= intf_modpriv
->netcp_module
;
1771 if (module
->close
) {
1772 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1774 dev_err(netcp
->ndev_dev
, "Close failed\n");
1778 /* Recycle Rx descriptors from completion queue */
1779 netcp_empty_rx_queue(netcp
);
1781 /* Recycle Tx descriptors from completion queue */
1782 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1784 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1785 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1786 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1788 netcp_free_navigator_resources(netcp
);
1789 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1793 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1794 struct ifreq
*req
, int cmd
)
1796 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1797 struct netcp_intf_modpriv
*intf_modpriv
;
1798 struct netcp_module
*module
;
1799 int ret
= -1, err
= -EOPNOTSUPP
;
1801 if (!netif_running(ndev
))
1804 for_each_module(netcp
, intf_modpriv
) {
1805 module
= intf_modpriv
->netcp_module
;
1809 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1810 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1819 return (ret
== 0) ? 0 : err
;
1822 static void netcp_ndo_tx_timeout(struct net_device
*ndev
)
1824 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1825 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1827 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1828 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1829 netif_trans_update(ndev
);
1830 netif_tx_wake_all_queues(ndev
);
1833 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1835 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1836 struct netcp_intf_modpriv
*intf_modpriv
;
1837 struct netcp_module
*module
;
1838 unsigned long flags
;
1841 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1843 spin_lock_irqsave(&netcp
->lock
, flags
);
1844 for_each_module(netcp
, intf_modpriv
) {
1845 module
= intf_modpriv
->netcp_module
;
1846 if ((module
->add_vid
) && (vid
!= 0)) {
1847 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1849 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1855 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1860 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1862 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1863 struct netcp_intf_modpriv
*intf_modpriv
;
1864 struct netcp_module
*module
;
1865 unsigned long flags
;
1868 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1870 spin_lock_irqsave(&netcp
->lock
, flags
);
1871 for_each_module(netcp
, intf_modpriv
) {
1872 module
= intf_modpriv
->netcp_module
;
1873 if (module
->del_vid
) {
1874 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1876 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1882 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1886 static int netcp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1889 struct tc_mqprio_qopt
*mqprio
= type_data
;
1893 /* setup tc must be called under rtnl lock */
1896 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1899 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
1900 num_tc
= mqprio
->num_tc
;
1902 /* Sanity-check the number of traffic classes requested */
1903 if ((dev
->real_num_tx_queues
<= 1) ||
1904 (dev
->real_num_tx_queues
< num_tc
))
1907 /* Configure traffic class to queue mappings */
1909 netdev_set_num_tc(dev
, num_tc
);
1910 for (i
= 0; i
< num_tc
; i
++)
1911 netdev_set_tc_queue(dev
, i
, 1, i
);
1913 netdev_reset_tc(dev
);
1920 netcp_get_stats(struct net_device
*ndev
, struct rtnl_link_stats64
*stats
)
1922 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1923 struct netcp_stats
*p
= &netcp
->stats
;
1924 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1928 start
= u64_stats_fetch_begin_irq(&p
->syncp_rx
);
1929 rxpackets
= p
->rx_packets
;
1930 rxbytes
= p
->rx_bytes
;
1931 } while (u64_stats_fetch_retry_irq(&p
->syncp_rx
, start
));
1934 start
= u64_stats_fetch_begin_irq(&p
->syncp_tx
);
1935 txpackets
= p
->tx_packets
;
1936 txbytes
= p
->tx_bytes
;
1937 } while (u64_stats_fetch_retry_irq(&p
->syncp_tx
, start
));
1939 stats
->rx_packets
= rxpackets
;
1940 stats
->rx_bytes
= rxbytes
;
1941 stats
->tx_packets
= txpackets
;
1942 stats
->tx_bytes
= txbytes
;
1944 /* The following are stored as 32 bit */
1945 stats
->rx_errors
= p
->rx_errors
;
1946 stats
->rx_dropped
= p
->rx_dropped
;
1947 stats
->tx_dropped
= p
->tx_dropped
;
1950 static const struct net_device_ops netcp_netdev_ops
= {
1951 .ndo_open
= netcp_ndo_open
,
1952 .ndo_stop
= netcp_ndo_stop
,
1953 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1954 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1955 .ndo_do_ioctl
= netcp_ndo_ioctl
,
1956 .ndo_get_stats64
= netcp_get_stats
,
1957 .ndo_set_mac_address
= eth_mac_addr
,
1958 .ndo_validate_addr
= eth_validate_addr
,
1959 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1960 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1961 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1962 .ndo_select_queue
= dev_pick_tx_zero
,
1963 .ndo_setup_tc
= netcp_setup_tc
,
1966 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1967 struct device_node
*node_interface
)
1969 struct device
*dev
= netcp_device
->device
;
1970 struct device_node
*node
= dev
->of_node
;
1971 struct netcp_intf
*netcp
;
1972 struct net_device
*ndev
;
1973 resource_size_t size
;
1974 struct resource res
;
1975 void __iomem
*efuse
= NULL
;
1977 const void *mac_addr
;
1978 u8 efuse_mac_addr
[6];
1982 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1984 dev_err(dev
, "Error allocating netdev\n");
1988 ndev
->features
|= NETIF_F_SG
;
1989 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1990 ndev
->hw_features
= ndev
->features
;
1991 ndev
->vlan_features
|= NETIF_F_SG
;
1993 /* MTU range: 68 - 9486 */
1994 ndev
->min_mtu
= ETH_MIN_MTU
;
1995 ndev
->max_mtu
= NETCP_MAX_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
);
1997 netcp
= netdev_priv(ndev
);
1998 spin_lock_init(&netcp
->lock
);
1999 INIT_LIST_HEAD(&netcp
->module_head
);
2000 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
2001 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
2002 INIT_LIST_HEAD(&netcp
->addr_list
);
2003 u64_stats_init(&netcp
->stats
.syncp_rx
);
2004 u64_stats_init(&netcp
->stats
.syncp_tx
);
2005 netcp
->netcp_device
= netcp_device
;
2006 netcp
->dev
= netcp_device
->device
;
2008 netcp
->ndev_dev
= &ndev
->dev
;
2009 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
2010 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
2011 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
2012 netcp
->node_interface
= node_interface
;
2014 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
2016 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
2017 dev_err(dev
, "could not find efuse-mac reg resource\n");
2021 size
= resource_size(&res
);
2023 if (!devm_request_mem_region(dev
, res
.start
, size
,
2025 dev_err(dev
, "could not reserve resource\n");
2030 efuse
= devm_ioremap_nocache(dev
, res
.start
, size
);
2032 dev_err(dev
, "could not map resource\n");
2033 devm_release_mem_region(dev
, res
.start
, size
);
2038 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
, efuse_mac
);
2039 if (is_valid_ether_addr(efuse_mac_addr
))
2040 ether_addr_copy(ndev
->dev_addr
, efuse_mac_addr
);
2042 eth_random_addr(ndev
->dev_addr
);
2044 devm_iounmap(dev
, efuse
);
2045 devm_release_mem_region(dev
, res
.start
, size
);
2047 mac_addr
= of_get_mac_address(node_interface
);
2049 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
2051 eth_random_addr(ndev
->dev_addr
);
2054 ret
= of_property_read_string(node_interface
, "rx-channel",
2055 &netcp
->dma_chan_name
);
2057 dev_err(dev
, "missing \"rx-channel\" parameter\n");
2062 ret
= of_property_read_u32(node_interface
, "rx-queue",
2063 &netcp
->rx_queue_id
);
2065 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
2066 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
2069 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
2070 netcp
->rx_queue_depths
,
2071 KNAV_DMA_FDQ_PER_CHAN
);
2073 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
2074 netcp
->rx_queue_depths
[0] = 128;
2077 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
2079 dev_err(dev
, "missing \"rx-pool\" parameter\n");
2083 netcp
->rx_pool_size
= temp
[0];
2084 netcp
->rx_pool_region_id
= temp
[1];
2086 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
2088 dev_err(dev
, "missing \"tx-pool\" parameter\n");
2092 netcp
->tx_pool_size
= temp
[0];
2093 netcp
->tx_pool_region_id
= temp
[1];
2095 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
2096 dev_err(dev
, "tx-pool size too small, must be atleast(%ld)\n",
2102 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
2103 &netcp
->tx_compl_qid
);
2105 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
2106 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
2110 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
, NETCP_NAPI_WEIGHT
);
2111 netif_tx_napi_add(ndev
, &netcp
->tx_napi
, netcp_tx_poll
, NETCP_NAPI_WEIGHT
);
2113 /* Register the network device */
2115 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
2116 ndev
->netdev_ops
= &netcp_netdev_ops
;
2117 SET_NETDEV_DEV(ndev
, dev
);
2119 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2127 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2128 struct net_device
*ndev
)
2130 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2131 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2132 struct netcp_module
*module
;
2134 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2137 /* Notify each of the modules that the interface is going away */
2138 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2140 module
= intf_modpriv
->netcp_module
;
2141 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2143 if (module
->release
)
2144 module
->release(intf_modpriv
->module_priv
);
2145 list_del(&intf_modpriv
->intf_list
);
2147 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2150 list_del(&netcp
->interface_list
);
2152 of_node_put(netcp
->node_interface
);
2153 unregister_netdev(ndev
);
2157 static int netcp_probe(struct platform_device
*pdev
)
2159 struct device_node
*node
= pdev
->dev
.of_node
;
2160 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2161 struct device_node
*child
, *interfaces
;
2162 struct netcp_device
*netcp_device
;
2163 struct device
*dev
= &pdev
->dev
;
2164 struct netcp_module
*module
;
2167 if (!knav_dma_device_ready() ||
2168 !knav_qmss_device_ready())
2169 return -EPROBE_DEFER
;
2172 dev_err(dev
, "could not find device info\n");
2176 /* Allocate a new NETCP device instance */
2177 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2181 pm_runtime_enable(&pdev
->dev
);
2182 ret
= pm_runtime_get_sync(&pdev
->dev
);
2184 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2185 pm_runtime_disable(&pdev
->dev
);
2189 /* Initialize the NETCP device instance */
2190 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2191 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2192 netcp_device
->device
= dev
;
2193 platform_set_drvdata(pdev
, netcp_device
);
2195 /* create interfaces */
2196 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2198 dev_err(dev
, "could not find netcp-interfaces node\n");
2203 for_each_available_child_of_node(interfaces
, child
) {
2204 ret
= netcp_create_interface(netcp_device
, child
);
2206 dev_err(dev
, "could not create interface(%pOFn)\n",
2208 goto probe_quit_interface
;
2212 of_node_put(interfaces
);
2214 /* Add the device instance to the list */
2215 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2217 /* Probe & attach any modules already registered */
2218 mutex_lock(&netcp_modules_lock
);
2219 for_each_netcp_module(module
) {
2220 ret
= netcp_module_probe(netcp_device
, module
);
2222 dev_err(dev
, "module(%s) probe failed\n", module
->name
);
2224 mutex_unlock(&netcp_modules_lock
);
2227 probe_quit_interface
:
2228 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2229 &netcp_device
->interface_head
,
2231 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2234 of_node_put(interfaces
);
2237 pm_runtime_put_sync(&pdev
->dev
);
2238 pm_runtime_disable(&pdev
->dev
);
2239 platform_set_drvdata(pdev
, NULL
);
2243 static int netcp_remove(struct platform_device
*pdev
)
2245 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2246 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2247 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2248 struct netcp_module
*module
;
2250 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2252 module
= inst_modpriv
->netcp_module
;
2253 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2254 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2255 list_del(&inst_modpriv
->inst_list
);
2258 /* now that all modules are removed, clean up the interfaces */
2259 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2260 &netcp_device
->interface_head
,
2262 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2265 WARN(!list_empty(&netcp_device
->interface_head
),
2266 "%s interface list not empty!\n", pdev
->name
);
2268 pm_runtime_put_sync(&pdev
->dev
);
2269 pm_runtime_disable(&pdev
->dev
);
2270 platform_set_drvdata(pdev
, NULL
);
2274 static const struct of_device_id of_match
[] = {
2275 { .compatible
= "ti,netcp-1.0", },
2278 MODULE_DEVICE_TABLE(of
, of_match
);
2280 static struct platform_driver netcp_driver
= {
2282 .name
= "netcp-1.0",
2283 .of_match_table
= of_match
,
2285 .probe
= netcp_probe
,
2286 .remove
= netcp_remove
,
2288 module_platform_driver(netcp_driver
);
2290 MODULE_LICENSE("GPL v2");
2291 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2292 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");