2 * Keystone NetCP Core driver
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/module.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/knav_qmss.h>
30 #include <linux/soc/ti/knav_dma.h>
34 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35 #define NETCP_NAPI_WEIGHT 64
36 #define NETCP_TX_TIMEOUT (5 * HZ)
37 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38 #define NETCP_MAX_MCAST_ADDR 16
40 #define NETCP_EFUSE_REG_INDEX 0
42 #define NETCP_MOD_PROBE_SKIPPED 1
43 #define NETCP_MOD_PROBE_FAILED 2
45 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
46 NETIF_MSG_DRV | NETIF_MSG_LINK | \
47 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
48 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
51 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
54 #define knav_queue_get_id(q) knav_queue_device_control(q, \
55 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
57 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_ENABLE_NOTIFY, \
61 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
62 KNAV_QUEUE_DISABLE_NOTIFY, \
65 #define knav_queue_get_count(q) knav_queue_device_control(q, \
66 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
68 #define for_each_netcp_module(module) \
69 list_for_each_entry(module, &netcp_modules, module_list)
71 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
72 list_for_each_entry(inst_modpriv, \
73 &((netcp_device)->modpriv_head), inst_list)
75 #define for_each_module(netcp, intf_modpriv) \
76 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
78 /* Module management structures */
80 struct list_head device_list
;
81 struct list_head interface_head
;
82 struct list_head modpriv_head
;
83 struct device
*device
;
86 struct netcp_inst_modpriv
{
87 struct netcp_device
*netcp_device
;
88 struct netcp_module
*netcp_module
;
89 struct list_head inst_list
;
93 struct netcp_intf_modpriv
{
94 struct netcp_intf
*netcp_priv
;
95 struct netcp_module
*netcp_module
;
96 struct list_head intf_list
;
100 static LIST_HEAD(netcp_devices
);
101 static LIST_HEAD(netcp_modules
);
102 static DEFINE_MUTEX(netcp_modules_lock
);
104 static int netcp_debug_level
= -1;
105 module_param(netcp_debug_level
, int, 0);
106 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
108 /* Helper functions - Get/Set */
109 static void get_pkt_info(u32
*buff
, u32
*buff_len
, u32
*ndesc
,
110 struct knav_dma_desc
*desc
)
112 *buff_len
= desc
->buff_len
;
114 *ndesc
= desc
->next_desc
;
117 static void get_pad_info(u32
*pad0
, u32
*pad1
, struct knav_dma_desc
*desc
)
119 *pad0
= desc
->pad
[0];
120 *pad1
= desc
->pad
[1];
123 static void get_org_pkt_info(u32
*buff
, u32
*buff_len
,
124 struct knav_dma_desc
*desc
)
126 *buff
= desc
->orig_buff
;
127 *buff_len
= desc
->orig_len
;
130 static void get_words(u32
*words
, int num_words
, u32
*desc
)
134 for (i
= 0; i
< num_words
; i
++)
138 static void set_pkt_info(u32 buff
, u32 buff_len
, u32 ndesc
,
139 struct knav_dma_desc
*desc
)
141 desc
->buff_len
= buff_len
;
143 desc
->next_desc
= ndesc
;
146 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
147 struct knav_dma_desc
*desc
)
149 desc
->desc_info
= desc_info
;
150 desc
->packet_info
= pkt_info
;
153 static void set_pad_info(u32 pad0
, u32 pad1
, struct knav_dma_desc
*desc
)
159 static void set_org_pkt_info(u32 buff
, u32 buff_len
,
160 struct knav_dma_desc
*desc
)
162 desc
->orig_buff
= buff
;
163 desc
->orig_len
= buff_len
;
166 static void set_words(u32
*words
, int num_words
, u32
*desc
)
170 for (i
= 0; i
< num_words
; i
++)
174 /* Read the e-fuse value as 32 bit values to be endian independent */
175 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
)
177 unsigned int addr0
, addr1
;
179 addr1
= readl(efuse_mac
+ 4);
180 addr0
= readl(efuse_mac
);
182 x
[0] = (addr1
& 0x0000ff00) >> 8;
183 x
[1] = addr1
& 0x000000ff;
184 x
[2] = (addr0
& 0xff000000) >> 24;
185 x
[3] = (addr0
& 0x00ff0000) >> 16;
186 x
[4] = (addr0
& 0x0000ff00) >> 8;
187 x
[5] = addr0
& 0x000000ff;
192 static const char *netcp_node_name(struct device_node
*node
)
196 if (of_property_read_string(node
, "label", &name
) < 0)
203 /* Module management routines */
204 static int netcp_register_interface(struct netcp_intf
*netcp
)
208 ret
= register_netdev(netcp
->ndev
);
210 netcp
->netdev_registered
= true;
214 static int netcp_module_probe(struct netcp_device
*netcp_device
,
215 struct netcp_module
*module
)
217 struct device
*dev
= netcp_device
->device
;
218 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
219 struct device_node
*child
;
220 struct netcp_inst_modpriv
*inst_modpriv
;
221 struct netcp_intf
*netcp_intf
;
222 struct netcp_module
*tmp
;
223 bool primary_module_registered
= false;
226 /* Find this module in the sub-tree for this device */
227 devices
= of_get_child_by_name(node
, "netcp-devices");
229 dev_err(dev
, "could not find netcp-devices node\n");
230 return NETCP_MOD_PROBE_SKIPPED
;
233 for_each_available_child_of_node(devices
, child
) {
234 const char *name
= netcp_node_name(child
);
236 if (!strcasecmp(module
->name
, name
))
240 of_node_put(devices
);
241 /* If module not used for this device, skip it */
243 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
244 return NETCP_MOD_PROBE_SKIPPED
;
247 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
253 inst_modpriv
->netcp_device
= netcp_device
;
254 inst_modpriv
->netcp_module
= module
;
255 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
257 ret
= module
->probe(netcp_device
, dev
, child
,
258 &inst_modpriv
->module_priv
);
261 dev_err(dev
, "Probe of module(%s) failed with %d\n",
263 list_del(&inst_modpriv
->inst_list
);
264 devm_kfree(dev
, inst_modpriv
);
265 return NETCP_MOD_PROBE_FAILED
;
268 /* Attach modules only if the primary module is probed */
269 for_each_netcp_module(tmp
) {
271 primary_module_registered
= true;
274 if (!primary_module_registered
)
277 /* Attach module to interfaces */
278 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
280 struct netcp_intf_modpriv
*intf_modpriv
;
282 /* If interface not registered then register now */
283 if (!netcp_intf
->netdev_registered
)
284 ret
= netcp_register_interface(netcp_intf
);
289 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
294 interface
= of_parse_phandle(netcp_intf
->node_interface
,
297 intf_modpriv
->netcp_priv
= netcp_intf
;
298 intf_modpriv
->netcp_module
= module
;
299 list_add_tail(&intf_modpriv
->intf_list
,
300 &netcp_intf
->module_head
);
302 ret
= module
->attach(inst_modpriv
->module_priv
,
303 netcp_intf
->ndev
, interface
,
304 &intf_modpriv
->module_priv
);
305 of_node_put(interface
);
307 dev_dbg(dev
, "Attach of module %s declined with %d\n",
309 list_del(&intf_modpriv
->intf_list
);
310 devm_kfree(dev
, intf_modpriv
);
317 int netcp_register_module(struct netcp_module
*module
)
319 struct netcp_device
*netcp_device
;
320 struct netcp_module
*tmp
;
324 WARN(1, "error registering netcp module: no name\n");
328 if (!module
->probe
) {
329 WARN(1, "error registering netcp module: no probe\n");
333 mutex_lock(&netcp_modules_lock
);
335 for_each_netcp_module(tmp
) {
336 if (!strcasecmp(tmp
->name
, module
->name
)) {
337 mutex_unlock(&netcp_modules_lock
);
341 list_add_tail(&module
->module_list
, &netcp_modules
);
343 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
344 ret
= netcp_module_probe(netcp_device
, module
);
349 mutex_unlock(&netcp_modules_lock
);
353 mutex_unlock(&netcp_modules_lock
);
354 netcp_unregister_module(module
);
357 EXPORT_SYMBOL_GPL(netcp_register_module
);
359 static void netcp_release_module(struct netcp_device
*netcp_device
,
360 struct netcp_module
*module
)
362 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
363 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
364 struct device
*dev
= netcp_device
->device
;
366 /* Release the module from each interface */
367 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
368 &netcp_device
->interface_head
,
370 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
372 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
373 &netcp_intf
->module_head
,
375 if (intf_modpriv
->netcp_module
== module
) {
376 module
->release(intf_modpriv
->module_priv
);
377 list_del(&intf_modpriv
->intf_list
);
378 devm_kfree(dev
, intf_modpriv
);
384 /* Remove the module from each instance */
385 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
386 &netcp_device
->modpriv_head
, inst_list
) {
387 if (inst_modpriv
->netcp_module
== module
) {
388 module
->remove(netcp_device
,
389 inst_modpriv
->module_priv
);
390 list_del(&inst_modpriv
->inst_list
);
391 devm_kfree(dev
, inst_modpriv
);
397 void netcp_unregister_module(struct netcp_module
*module
)
399 struct netcp_device
*netcp_device
;
400 struct netcp_module
*module_tmp
;
402 mutex_lock(&netcp_modules_lock
);
404 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
405 netcp_release_module(netcp_device
, module
);
408 /* Remove the module from the module list */
409 for_each_netcp_module(module_tmp
) {
410 if (module
== module_tmp
) {
411 list_del(&module
->module_list
);
416 mutex_unlock(&netcp_modules_lock
);
418 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
420 void *netcp_module_get_intf_data(struct netcp_module
*module
,
421 struct netcp_intf
*intf
)
423 struct netcp_intf_modpriv
*intf_modpriv
;
425 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
426 if (intf_modpriv
->netcp_module
== module
)
427 return intf_modpriv
->module_priv
;
430 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
432 /* Module TX and RX Hook management */
433 struct netcp_hook_list
{
434 struct list_head list
;
435 netcp_hook_rtn
*hook_rtn
;
440 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
441 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
443 struct netcp_hook_list
*entry
;
444 struct netcp_hook_list
*next
;
447 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
451 entry
->hook_rtn
= hook_rtn
;
452 entry
->hook_data
= hook_data
;
453 entry
->order
= order
;
455 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
456 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
457 if (next
->order
> order
)
460 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
461 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
465 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
467 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
468 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
470 struct netcp_hook_list
*next
, *n
;
473 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
474 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
475 if ((next
->order
== order
) &&
476 (next
->hook_rtn
== hook_rtn
) &&
477 (next
->hook_data
== hook_data
)) {
478 list_del(&next
->list
);
479 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
480 devm_kfree(netcp_priv
->dev
, next
);
484 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
487 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
489 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
490 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
492 struct netcp_hook_list
*entry
;
493 struct netcp_hook_list
*next
;
496 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
500 entry
->hook_rtn
= hook_rtn
;
501 entry
->hook_data
= hook_data
;
502 entry
->order
= order
;
504 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
505 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
506 if (next
->order
> order
)
509 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
510 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
515 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
516 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
518 struct netcp_hook_list
*next
, *n
;
521 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
522 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
523 if ((next
->order
== order
) &&
524 (next
->hook_rtn
== hook_rtn
) &&
525 (next
->hook_data
== hook_data
)) {
526 list_del(&next
->list
);
527 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
528 devm_kfree(netcp_priv
->dev
, next
);
532 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
537 static void netcp_frag_free(bool is_frag
, void *ptr
)
545 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
546 struct knav_dma_desc
*desc
)
548 struct knav_dma_desc
*ndesc
;
549 dma_addr_t dma_desc
, dma_buf
;
550 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
554 get_words(&dma_desc
, 1, &desc
->next_desc
);
557 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
558 if (unlikely(!ndesc
)) {
559 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
562 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
563 get_pad_info((u32
*)&buf_ptr
, &tmp
, ndesc
);
564 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
565 __free_page(buf_ptr
);
566 knav_pool_desc_put(netcp
->rx_pool
, desc
);
569 get_pad_info((u32
*)&buf_ptr
, &buf_len
, desc
);
571 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
572 knav_pool_desc_put(netcp
->rx_pool
, desc
);
575 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
577 struct knav_dma_desc
*desc
;
582 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
586 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
587 if (unlikely(!desc
)) {
588 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
590 netcp
->ndev
->stats
.rx_errors
++;
593 netcp_free_rx_desc_chain(netcp
, desc
);
594 netcp
->ndev
->stats
.rx_dropped
++;
598 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
600 unsigned int dma_sz
, buf_len
, org_buf_len
;
601 struct knav_dma_desc
*desc
, *ndesc
;
602 unsigned int pkt_sz
= 0, accum_sz
;
603 struct netcp_hook_list
*rx_hook
;
604 dma_addr_t dma_desc
, dma_buff
;
605 struct netcp_packet p_info
;
610 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
614 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
615 if (unlikely(!desc
)) {
616 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
620 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
621 get_pad_info((u32
*)&org_buf_ptr
, &org_buf_len
, desc
);
623 if (unlikely(!org_buf_ptr
)) {
624 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
628 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
630 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
632 /* Build a new sk_buff for the primary buffer */
633 skb
= build_skb(org_buf_ptr
, org_buf_len
);
634 if (unlikely(!skb
)) {
635 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
639 /* update data, tail and len */
640 skb_reserve(skb
, NETCP_SOP_OFFSET
);
641 __skb_put(skb
, buf_len
);
643 /* Fill in the page fragment list */
647 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
648 if (unlikely(!ndesc
)) {
649 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
653 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
654 get_pad_info((u32
*)&page
, &tmp
, ndesc
);
656 if (likely(dma_buff
&& buf_len
&& page
)) {
657 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
660 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%p), len(%d), page(%p)\n",
661 (void *)dma_buff
, buf_len
, page
);
665 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
666 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
669 /* Free the descriptor */
670 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
673 /* Free the primary descriptor */
674 knav_pool_desc_put(netcp
->rx_pool
, desc
);
676 /* check for packet len and warn */
677 if (unlikely(pkt_sz
!= accum_sz
))
678 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
681 /* Remove ethernet FCS from the packet */
682 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
684 /* Call each of the RX hooks */
686 p_info
.rxtstamp_complete
= false;
687 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
690 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
693 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
694 rx_hook
->order
, ret
);
695 netcp
->ndev
->stats
.rx_errors
++;
701 netcp
->ndev
->stats
.rx_packets
++;
702 netcp
->ndev
->stats
.rx_bytes
+= skb
->len
;
704 /* push skb up the stack */
705 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
706 netif_receive_skb(skb
);
710 netcp_free_rx_desc_chain(netcp
, desc
);
711 netcp
->ndev
->stats
.rx_errors
++;
715 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
720 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
725 /* Release descriptors and attached buffers from Rx FDQ */
726 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
728 struct knav_dma_desc
*desc
;
729 unsigned int buf_len
, dma_sz
;
734 /* Allocate descriptor */
735 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
736 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
737 if (unlikely(!desc
)) {
738 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
742 get_org_pkt_info(&dma
, &buf_len
, desc
);
743 get_pad_info((u32
*)&buf_ptr
, &tmp
, desc
);
745 if (unlikely(!dma
)) {
746 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
747 knav_pool_desc_put(netcp
->rx_pool
, desc
);
751 if (unlikely(!buf_ptr
)) {
752 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
753 knav_pool_desc_put(netcp
->rx_pool
, desc
);
758 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
760 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
762 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
764 __free_page(buf_ptr
);
767 knav_pool_desc_put(netcp
->rx_pool
, desc
);
771 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
775 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
776 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
777 netcp_free_rx_buf(netcp
, i
);
779 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
780 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
781 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
783 knav_pool_destroy(netcp
->rx_pool
);
784 netcp
->rx_pool
= NULL
;
787 static void netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
789 struct knav_dma_desc
*hwdesc
;
790 unsigned int buf_len
, dma_sz
;
791 u32 desc_info
, pkt_info
;
797 /* Allocate descriptor */
798 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
799 if (IS_ERR_OR_NULL(hwdesc
)) {
800 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
804 if (likely(fdq
== 0)) {
805 unsigned int primary_buf_len
;
806 /* Allocate a primary receive queue entry */
807 buf_len
= netcp
->rx_buffer_sizes
[0] + NETCP_SOP_OFFSET
;
808 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
811 if (primary_buf_len
<= PAGE_SIZE
) {
812 bufptr
= netdev_alloc_frag(primary_buf_len
);
813 pad
[1] = primary_buf_len
;
815 bufptr
= kmalloc(primary_buf_len
, GFP_ATOMIC
|
816 GFP_DMA32
| __GFP_COLD
);
820 if (unlikely(!bufptr
)) {
821 dev_warn_ratelimited(netcp
->ndev_dev
, "Primary RX buffer alloc failed\n");
824 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
826 pad
[0] = (u32
)bufptr
;
829 /* Allocate a secondary receive queue entry */
830 page
= alloc_page(GFP_ATOMIC
| GFP_DMA32
| __GFP_COLD
);
831 if (unlikely(!page
)) {
832 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
836 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
841 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
842 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
843 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
844 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
845 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
846 KNAV_DMA_DESC_RETQ_SHIFT
;
847 set_org_pkt_info(dma
, buf_len
, hwdesc
);
848 set_pad_info(pad
[0], pad
[1], hwdesc
);
849 set_desc_info(desc_info
, pkt_info
, hwdesc
);
852 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
854 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
858 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
861 /* Refill Rx FDQ with descriptors & attached buffers */
862 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
864 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
867 /* Calculate the FDQ deficit and refill */
868 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
869 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
870 knav_queue_get_count(netcp
->rx_fdq
[i
]);
872 while (fdq_deficit
[i
]--)
873 netcp_allocate_rx_buf(netcp
, i
);
878 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
880 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
882 unsigned int packets
;
884 packets
= netcp_process_rx_packets(netcp
, budget
);
886 if (packets
< budget
) {
887 napi_complete(&netcp
->rx_napi
);
888 knav_queue_enable_notify(netcp
->rx_queue
);
891 netcp_rxpool_refill(netcp
);
895 static void netcp_rx_notify(void *arg
)
897 struct netcp_intf
*netcp
= arg
;
899 knav_queue_disable_notify(netcp
->rx_queue
);
900 napi_schedule(&netcp
->rx_napi
);
903 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
904 struct knav_dma_desc
*desc
,
905 unsigned int desc_sz
)
907 struct knav_dma_desc
*ndesc
= desc
;
908 dma_addr_t dma_desc
, dma_buf
;
909 unsigned int buf_len
;
912 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
914 if (dma_buf
&& buf_len
)
915 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
918 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%p), len(%d)\n",
919 (void *)dma_buf
, buf_len
);
921 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
924 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
927 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
932 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
935 struct knav_dma_desc
*desc
;
943 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
946 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
947 if (unlikely(!desc
)) {
948 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
949 netcp
->ndev
->stats
.tx_errors
++;
953 get_pad_info((u32
*)&skb
, &tmp
, desc
);
954 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
956 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
957 netcp
->ndev
->stats
.tx_errors
++;
961 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
962 netif_running(netcp
->ndev
) &&
963 (knav_pool_count(netcp
->tx_pool
) >
964 netcp
->tx_resume_threshold
)) {
965 u16 subqueue
= skb_get_queue_mapping(skb
);
967 netif_wake_subqueue(netcp
->ndev
, subqueue
);
970 netcp
->ndev
->stats
.tx_packets
++;
971 netcp
->ndev
->stats
.tx_bytes
+= skb
->len
;
978 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
981 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
984 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
985 if (packets
< budget
) {
986 napi_complete(&netcp
->tx_napi
);
987 knav_queue_enable_notify(netcp
->tx_compl_q
);
993 static void netcp_tx_notify(void *arg
)
995 struct netcp_intf
*netcp
= arg
;
997 knav_queue_disable_notify(netcp
->tx_compl_q
);
998 napi_schedule(&netcp
->tx_napi
);
1001 static struct knav_dma_desc
*
1002 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1004 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1005 unsigned int pkt_len
= skb_headlen(skb
);
1006 struct device
*dev
= netcp
->dev
;
1007 dma_addr_t dma_addr
;
1008 unsigned int dma_sz
;
1011 /* Map the linear buffer */
1012 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1013 if (unlikely(!dma_addr
)) {
1014 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1018 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1019 if (unlikely(IS_ERR_OR_NULL(desc
))) {
1020 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1021 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1025 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1026 if (skb_is_nonlinear(skb
)) {
1027 prefetchw(skb_shinfo(skb
));
1029 desc
->next_desc
= 0;
1035 /* Handle the case where skb is fragmented in pages */
1036 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1037 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1038 struct page
*page
= skb_frag_page(frag
);
1039 u32 page_offset
= frag
->page_offset
;
1040 u32 buf_len
= skb_frag_size(frag
);
1041 dma_addr_t desc_dma
;
1044 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1046 if (unlikely(!dma_addr
)) {
1047 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1051 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1052 if (unlikely(IS_ERR_OR_NULL(ndesc
))) {
1053 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1054 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1058 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
,
1061 (netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1062 KNAV_DMA_DESC_RETQ_SHIFT
;
1063 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1064 set_words(&desc_dma
, 1, &pdesc
->next_desc
);
1067 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1068 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1072 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1073 &dma_addr
, &dma_sz
);
1075 /* frag list based linkage is not supported for now. */
1076 if (skb_shinfo(skb
)->frag_list
) {
1077 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1082 WARN_ON(pkt_len
!= skb
->len
);
1084 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1085 set_words(&pkt_len
, 1, &desc
->desc_info
);
1089 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1093 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1094 struct sk_buff
*skb
,
1095 struct knav_dma_desc
*desc
)
1097 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1098 struct netcp_hook_list
*tx_hook
;
1099 struct netcp_packet p_info
;
1100 unsigned int dma_sz
;
1105 p_info
.netcp
= netcp
;
1107 p_info
.tx_pipe
= NULL
;
1108 p_info
.psdata_len
= 0;
1109 p_info
.ts_context
= NULL
;
1110 p_info
.txtstamp_complete
= NULL
;
1111 p_info
.epib
= desc
->epib
;
1112 p_info
.psdata
= desc
->psdata
;
1113 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(u32
));
1115 /* Find out where to inject the packet for transmission */
1116 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1117 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1119 if (unlikely(ret
!= 0)) {
1120 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1121 tx_hook
->order
, ret
);
1122 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1127 /* Make sure some TX hook claimed the packet */
1128 tx_pipe
= p_info
.tx_pipe
;
1130 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1135 /* update descriptor */
1136 if (p_info
.psdata_len
) {
1137 u32
*psdata
= p_info
.psdata
;
1139 memmove(p_info
.psdata
, p_info
.psdata
+ p_info
.psdata_len
,
1141 set_words(psdata
, p_info
.psdata_len
, psdata
);
1142 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1143 KNAV_DMA_DESC_PSLEN_SHIFT
;
1146 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1147 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1148 KNAV_DMA_DESC_RETQ_SHIFT
);
1150 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1151 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1152 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1155 set_words(&tmp
, 1, &desc
->packet_info
);
1156 set_words((u32
*)&skb
, 1, &desc
->pad
[0]);
1158 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1159 tmp
= tx_pipe
->switch_to_port
;
1160 set_words((u32
*)&tmp
, 1, &desc
->tag_info
);
1163 /* submit packet descriptor */
1164 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1166 if (unlikely(ret
)) {
1167 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1171 skb_tx_timestamp(skb
);
1172 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1178 /* Submit the packet */
1179 static int netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1181 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1182 int subqueue
= skb_get_queue_mapping(skb
);
1183 struct knav_dma_desc
*desc
;
1184 int desc_count
, ret
= 0;
1186 if (unlikely(skb
->len
<= 0)) {
1188 return NETDEV_TX_OK
;
1191 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1192 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1194 /* If we get here, the skb has already been dropped */
1195 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1197 ndev
->stats
.tx_dropped
++;
1200 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1203 desc
= netcp_tx_map_skb(skb
, netcp
);
1204 if (unlikely(!desc
)) {
1205 netif_stop_subqueue(ndev
, subqueue
);
1210 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1214 ndev
->trans_start
= jiffies
;
1216 /* Check Tx pool count & stop subqueue if needed */
1217 desc_count
= knav_pool_count(netcp
->tx_pool
);
1218 if (desc_count
< netcp
->tx_pause_threshold
) {
1219 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1220 netif_stop_subqueue(ndev
, subqueue
);
1222 return NETDEV_TX_OK
;
1225 ndev
->stats
.tx_dropped
++;
1227 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1232 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1234 if (tx_pipe
->dma_channel
) {
1235 knav_dma_close_channel(tx_pipe
->dma_channel
);
1236 tx_pipe
->dma_channel
= NULL
;
1240 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1242 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1244 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1245 struct knav_dma_cfg config
;
1249 memset(&config
, 0, sizeof(config
));
1250 config
.direction
= DMA_MEM_TO_DEV
;
1251 config
.u
.tx
.filt_einfo
= false;
1252 config
.u
.tx
.filt_pswords
= false;
1253 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1255 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1256 tx_pipe
->dma_chan_name
, &config
);
1257 if (IS_ERR_OR_NULL(tx_pipe
->dma_channel
)) {
1258 dev_err(dev
, "failed opening tx chan(%s)\n",
1259 tx_pipe
->dma_chan_name
);
1263 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1264 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1266 if (IS_ERR(tx_pipe
->dma_queue
)) {
1267 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %d\n",
1269 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1273 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1277 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1278 knav_dma_close_channel(tx_pipe
->dma_channel
);
1279 tx_pipe
->dma_channel
= NULL
;
1282 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1284 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1285 struct netcp_device
*netcp_device
,
1286 const char *dma_chan_name
, unsigned int dma_queue_id
)
1288 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1289 tx_pipe
->netcp_device
= netcp_device
;
1290 tx_pipe
->dma_chan_name
= dma_chan_name
;
1291 tx_pipe
->dma_queue_id
= dma_queue_id
;
1294 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1296 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1298 enum netcp_addr_type type
)
1300 struct netcp_addr
*naddr
;
1302 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1303 if (naddr
->type
!= type
)
1305 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1313 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1315 enum netcp_addr_type type
)
1317 struct netcp_addr
*naddr
;
1319 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1325 naddr
->netcp
= netcp
;
1327 ether_addr_copy(naddr
->addr
, addr
);
1329 eth_zero_addr(naddr
->addr
);
1330 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1335 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1337 list_del(&naddr
->node
);
1338 devm_kfree(netcp
->dev
, naddr
);
1341 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1343 struct netcp_addr
*naddr
;
1345 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1349 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1350 enum netcp_addr_type type
)
1352 struct netcp_addr
*naddr
;
1354 naddr
= netcp_addr_find(netcp
, addr
, type
);
1356 naddr
->flags
|= ADDR_VALID
;
1360 naddr
= netcp_addr_add(netcp
, addr
, type
);
1361 if (!WARN_ON(!naddr
))
1362 naddr
->flags
|= ADDR_NEW
;
1365 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1367 struct netcp_addr
*naddr
, *tmp
;
1368 struct netcp_intf_modpriv
*priv
;
1369 struct netcp_module
*module
;
1372 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1373 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1375 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1376 naddr
->addr
, naddr
->type
);
1377 mutex_lock(&netcp_modules_lock
);
1378 for_each_module(netcp
, priv
) {
1379 module
= priv
->netcp_module
;
1380 if (!module
->del_addr
)
1382 error
= module
->del_addr(priv
->module_priv
,
1386 mutex_unlock(&netcp_modules_lock
);
1387 netcp_addr_del(netcp
, naddr
);
1391 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1393 struct netcp_addr
*naddr
, *tmp
;
1394 struct netcp_intf_modpriv
*priv
;
1395 struct netcp_module
*module
;
1398 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1399 if (!(naddr
->flags
& ADDR_NEW
))
1401 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1402 naddr
->addr
, naddr
->type
);
1403 mutex_lock(&netcp_modules_lock
);
1404 for_each_module(netcp
, priv
) {
1405 module
= priv
->netcp_module
;
1406 if (!module
->add_addr
)
1408 error
= module
->add_addr(priv
->module_priv
, naddr
);
1411 mutex_unlock(&netcp_modules_lock
);
1415 static void netcp_set_rx_mode(struct net_device
*ndev
)
1417 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1418 struct netdev_hw_addr
*ndev_addr
;
1421 promisc
= (ndev
->flags
& IFF_PROMISC
||
1422 ndev
->flags
& IFF_ALLMULTI
||
1423 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1425 /* first clear all marks */
1426 netcp_addr_clear_mark(netcp
);
1428 /* next add new entries, mark existing ones */
1429 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1430 for_each_dev_addr(ndev
, ndev_addr
)
1431 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1432 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1433 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1434 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1435 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1438 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1440 /* finally sweep and callout into modules */
1441 netcp_addr_sweep_del(netcp
);
1442 netcp_addr_sweep_add(netcp
);
1445 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1449 if (netcp
->rx_channel
) {
1450 knav_dma_close_channel(netcp
->rx_channel
);
1451 netcp
->rx_channel
= NULL
;
1454 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1455 netcp_rxpool_free(netcp
);
1457 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1458 knav_queue_close(netcp
->rx_queue
);
1459 netcp
->rx_queue
= NULL
;
1462 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1463 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1464 knav_queue_close(netcp
->rx_fdq
[i
]);
1465 netcp
->rx_fdq
[i
] = NULL
;
1468 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1469 knav_queue_close(netcp
->tx_compl_q
);
1470 netcp
->tx_compl_q
= NULL
;
1473 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1474 knav_pool_destroy(netcp
->tx_pool
);
1475 netcp
->tx_pool
= NULL
;
1479 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1481 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1482 struct knav_queue_notify_config notify_cfg
;
1483 struct knav_dma_cfg config
;
1489 /* Create Rx/Tx descriptor pools */
1490 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1491 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1492 netcp
->rx_pool_region_id
);
1493 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1494 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1495 ret
= PTR_ERR(netcp
->rx_pool
);
1499 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1500 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1501 netcp
->tx_pool_region_id
);
1502 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1503 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1504 ret
= PTR_ERR(netcp
->tx_pool
);
1508 /* open Tx completion queue */
1509 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1510 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1511 if (IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1512 ret
= PTR_ERR(netcp
->tx_compl_q
);
1515 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1517 /* Set notification for Tx completion */
1518 notify_cfg
.fn
= netcp_tx_notify
;
1519 notify_cfg
.fn_arg
= netcp
;
1520 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1521 KNAV_QUEUE_SET_NOTIFIER
,
1522 (unsigned long)¬ify_cfg
);
1526 knav_queue_disable_notify(netcp
->tx_compl_q
);
1528 /* open Rx completion queue */
1529 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1530 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1531 if (IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1532 ret
= PTR_ERR(netcp
->rx_queue
);
1535 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1537 /* Set notification for Rx completion */
1538 notify_cfg
.fn
= netcp_rx_notify
;
1539 notify_cfg
.fn_arg
= netcp
;
1540 ret
= knav_queue_device_control(netcp
->rx_queue
,
1541 KNAV_QUEUE_SET_NOTIFIER
,
1542 (unsigned long)¬ify_cfg
);
1546 knav_queue_disable_notify(netcp
->rx_queue
);
1549 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1550 netcp
->rx_queue_depths
[i
] && netcp
->rx_buffer_sizes
[i
]; ++i
) {
1551 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1552 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1553 if (IS_ERR_OR_NULL(netcp
->rx_fdq
[i
])) {
1554 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1559 memset(&config
, 0, sizeof(config
));
1560 config
.direction
= DMA_DEV_TO_MEM
;
1561 config
.u
.rx
.einfo_present
= true;
1562 config
.u
.rx
.psinfo_present
= true;
1563 config
.u
.rx
.err_mode
= DMA_DROP
;
1564 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1565 config
.u
.rx
.psinfo_at_sop
= false;
1566 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1567 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1568 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1570 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1571 if (netcp
->rx_fdq
[i
])
1572 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1573 config
.u
.rx
.fdq
[i
] = last_fdq
;
1576 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1577 netcp
->dma_chan_name
, &config
);
1578 if (IS_ERR_OR_NULL(netcp
->rx_channel
)) {
1579 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1580 netcp
->dma_chan_name
);
1584 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1588 netcp_free_navigator_resources(netcp
);
1592 /* Open the device */
1593 static int netcp_ndo_open(struct net_device
*ndev
)
1595 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1596 struct netcp_intf_modpriv
*intf_modpriv
;
1597 struct netcp_module
*module
;
1600 netif_carrier_off(ndev
);
1601 ret
= netcp_setup_navigator_resources(ndev
);
1603 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1607 mutex_lock(&netcp_modules_lock
);
1608 for_each_module(netcp
, intf_modpriv
) {
1609 module
= intf_modpriv
->netcp_module
;
1611 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1613 dev_err(netcp
->ndev_dev
, "module open failed\n");
1618 mutex_unlock(&netcp_modules_lock
);
1620 napi_enable(&netcp
->rx_napi
);
1621 napi_enable(&netcp
->tx_napi
);
1622 knav_queue_enable_notify(netcp
->tx_compl_q
);
1623 knav_queue_enable_notify(netcp
->rx_queue
);
1624 netcp_rxpool_refill(netcp
);
1625 netif_tx_wake_all_queues(ndev
);
1626 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1630 for_each_module(netcp
, intf_modpriv
) {
1631 module
= intf_modpriv
->netcp_module
;
1633 module
->close(intf_modpriv
->module_priv
, ndev
);
1635 mutex_unlock(&netcp_modules_lock
);
1638 netcp_free_navigator_resources(netcp
);
1642 /* Close the device */
1643 static int netcp_ndo_stop(struct net_device
*ndev
)
1645 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1646 struct netcp_intf_modpriv
*intf_modpriv
;
1647 struct netcp_module
*module
;
1650 netif_tx_stop_all_queues(ndev
);
1651 netif_carrier_off(ndev
);
1652 netcp_addr_clear_mark(netcp
);
1653 netcp_addr_sweep_del(netcp
);
1654 knav_queue_disable_notify(netcp
->rx_queue
);
1655 knav_queue_disable_notify(netcp
->tx_compl_q
);
1656 napi_disable(&netcp
->rx_napi
);
1657 napi_disable(&netcp
->tx_napi
);
1659 mutex_lock(&netcp_modules_lock
);
1660 for_each_module(netcp
, intf_modpriv
) {
1661 module
= intf_modpriv
->netcp_module
;
1662 if (module
->close
) {
1663 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1665 dev_err(netcp
->ndev_dev
, "Close failed\n");
1668 mutex_unlock(&netcp_modules_lock
);
1670 /* Recycle Rx descriptors from completion queue */
1671 netcp_empty_rx_queue(netcp
);
1673 /* Recycle Tx descriptors from completion queue */
1674 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1676 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1677 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1678 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1680 netcp_free_navigator_resources(netcp
);
1681 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1685 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1686 struct ifreq
*req
, int cmd
)
1688 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1689 struct netcp_intf_modpriv
*intf_modpriv
;
1690 struct netcp_module
*module
;
1691 int ret
= -1, err
= -EOPNOTSUPP
;
1693 if (!netif_running(ndev
))
1696 mutex_lock(&netcp_modules_lock
);
1697 for_each_module(netcp
, intf_modpriv
) {
1698 module
= intf_modpriv
->netcp_module
;
1702 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1703 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1712 mutex_unlock(&netcp_modules_lock
);
1713 return (ret
== 0) ? 0 : err
;
1716 static int netcp_ndo_change_mtu(struct net_device
*ndev
, int new_mtu
)
1718 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1720 /* MTU < 68 is an error for IPv4 traffic */
1721 if ((new_mtu
< 68) ||
1722 (new_mtu
> (NETCP_MAX_FRAME_SIZE
- ETH_HLEN
- ETH_FCS_LEN
))) {
1723 dev_err(netcp
->ndev_dev
, "Invalid mtu size = %d\n", new_mtu
);
1727 ndev
->mtu
= new_mtu
;
1731 static void netcp_ndo_tx_timeout(struct net_device
*ndev
)
1733 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1734 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1736 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1737 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1738 ndev
->trans_start
= jiffies
;
1739 netif_tx_wake_all_queues(ndev
);
1742 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1744 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1745 struct netcp_intf_modpriv
*intf_modpriv
;
1746 struct netcp_module
*module
;
1749 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1751 mutex_lock(&netcp_modules_lock
);
1752 for_each_module(netcp
, intf_modpriv
) {
1753 module
= intf_modpriv
->netcp_module
;
1754 if ((module
->add_vid
) && (vid
!= 0)) {
1755 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1757 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1763 mutex_unlock(&netcp_modules_lock
);
1767 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1769 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1770 struct netcp_intf_modpriv
*intf_modpriv
;
1771 struct netcp_module
*module
;
1774 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1776 mutex_lock(&netcp_modules_lock
);
1777 for_each_module(netcp
, intf_modpriv
) {
1778 module
= intf_modpriv
->netcp_module
;
1779 if (module
->del_vid
) {
1780 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1782 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1788 mutex_unlock(&netcp_modules_lock
);
1792 static u16
netcp_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1794 select_queue_fallback_t fallback
)
1799 static int netcp_setup_tc(struct net_device
*dev
, u8 num_tc
)
1803 /* setup tc must be called under rtnl lock */
1806 /* Sanity-check the number of traffic classes requested */
1807 if ((dev
->real_num_tx_queues
<= 1) ||
1808 (dev
->real_num_tx_queues
< num_tc
))
1811 /* Configure traffic class to queue mappings */
1813 netdev_set_num_tc(dev
, num_tc
);
1814 for (i
= 0; i
< num_tc
; i
++)
1815 netdev_set_tc_queue(dev
, i
, 1, i
);
1817 netdev_reset_tc(dev
);
1823 static const struct net_device_ops netcp_netdev_ops
= {
1824 .ndo_open
= netcp_ndo_open
,
1825 .ndo_stop
= netcp_ndo_stop
,
1826 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1827 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1828 .ndo_do_ioctl
= netcp_ndo_ioctl
,
1829 .ndo_change_mtu
= netcp_ndo_change_mtu
,
1830 .ndo_set_mac_address
= eth_mac_addr
,
1831 .ndo_validate_addr
= eth_validate_addr
,
1832 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1833 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1834 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1835 .ndo_select_queue
= netcp_select_queue
,
1836 .ndo_setup_tc
= netcp_setup_tc
,
1839 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1840 struct device_node
*node_interface
)
1842 struct device
*dev
= netcp_device
->device
;
1843 struct device_node
*node
= dev
->of_node
;
1844 struct netcp_intf
*netcp
;
1845 struct net_device
*ndev
;
1846 resource_size_t size
;
1847 struct resource res
;
1848 void __iomem
*efuse
= NULL
;
1850 const void *mac_addr
;
1851 u8 efuse_mac_addr
[6];
1855 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1857 dev_err(dev
, "Error allocating netdev\n");
1861 ndev
->features
|= NETIF_F_SG
;
1862 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1863 ndev
->hw_features
= ndev
->features
;
1864 ndev
->vlan_features
|= NETIF_F_SG
;
1866 netcp
= netdev_priv(ndev
);
1867 spin_lock_init(&netcp
->lock
);
1868 INIT_LIST_HEAD(&netcp
->module_head
);
1869 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
1870 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
1871 INIT_LIST_HEAD(&netcp
->addr_list
);
1872 netcp
->netcp_device
= netcp_device
;
1873 netcp
->dev
= netcp_device
->device
;
1875 netcp
->ndev_dev
= &ndev
->dev
;
1876 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
1877 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
1878 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
1879 netcp
->node_interface
= node_interface
;
1881 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
1883 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
1884 dev_err(dev
, "could not find efuse-mac reg resource\n");
1888 size
= resource_size(&res
);
1890 if (!devm_request_mem_region(dev
, res
.start
, size
,
1892 dev_err(dev
, "could not reserve resource\n");
1897 efuse
= devm_ioremap_nocache(dev
, res
.start
, size
);
1899 dev_err(dev
, "could not map resource\n");
1900 devm_release_mem_region(dev
, res
.start
, size
);
1905 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
);
1906 if (is_valid_ether_addr(efuse_mac_addr
))
1907 ether_addr_copy(ndev
->dev_addr
, efuse_mac_addr
);
1909 random_ether_addr(ndev
->dev_addr
);
1911 devm_iounmap(dev
, efuse
);
1912 devm_release_mem_region(dev
, res
.start
, size
);
1914 mac_addr
= of_get_mac_address(node_interface
);
1916 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
1918 random_ether_addr(ndev
->dev_addr
);
1921 ret
= of_property_read_string(node_interface
, "rx-channel",
1922 &netcp
->dma_chan_name
);
1924 dev_err(dev
, "missing \"rx-channel\" parameter\n");
1929 ret
= of_property_read_u32(node_interface
, "rx-queue",
1930 &netcp
->rx_queue_id
);
1932 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
1933 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
1936 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
1937 netcp
->rx_queue_depths
,
1938 KNAV_DMA_FDQ_PER_CHAN
);
1940 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
1941 netcp
->rx_queue_depths
[0] = 128;
1944 ret
= of_property_read_u32_array(node_interface
, "rx-buffer-size",
1945 netcp
->rx_buffer_sizes
,
1946 KNAV_DMA_FDQ_PER_CHAN
);
1948 dev_err(dev
, "missing \"rx-buffer-size\" parameter\n");
1949 netcp
->rx_buffer_sizes
[0] = 1536;
1952 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
1954 dev_err(dev
, "missing \"rx-pool\" parameter\n");
1958 netcp
->rx_pool_size
= temp
[0];
1959 netcp
->rx_pool_region_id
= temp
[1];
1961 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
1963 dev_err(dev
, "missing \"tx-pool\" parameter\n");
1967 netcp
->tx_pool_size
= temp
[0];
1968 netcp
->tx_pool_region_id
= temp
[1];
1970 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
1971 dev_err(dev
, "tx-pool size too small, must be atleast(%ld)\n",
1977 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
1978 &netcp
->tx_compl_qid
);
1980 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
1981 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
1985 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
, NETCP_NAPI_WEIGHT
);
1986 netif_napi_add(ndev
, &netcp
->tx_napi
, netcp_tx_poll
, NETCP_NAPI_WEIGHT
);
1988 /* Register the network device */
1990 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
1991 ndev
->netdev_ops
= &netcp_netdev_ops
;
1992 SET_NETDEV_DEV(ndev
, dev
);
1994 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2002 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2003 struct net_device
*ndev
)
2005 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2006 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2007 struct netcp_module
*module
;
2009 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2012 /* Notify each of the modules that the interface is going away */
2013 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2015 module
= intf_modpriv
->netcp_module
;
2016 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2018 if (module
->release
)
2019 module
->release(intf_modpriv
->module_priv
);
2020 list_del(&intf_modpriv
->intf_list
);
2021 kfree(intf_modpriv
);
2023 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2026 list_del(&netcp
->interface_list
);
2028 of_node_put(netcp
->node_interface
);
2029 unregister_netdev(ndev
);
2030 netif_napi_del(&netcp
->rx_napi
);
2034 static int netcp_probe(struct platform_device
*pdev
)
2036 struct device_node
*node
= pdev
->dev
.of_node
;
2037 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2038 struct device_node
*child
, *interfaces
;
2039 struct netcp_device
*netcp_device
;
2040 struct device
*dev
= &pdev
->dev
;
2041 struct netcp_module
*module
;
2045 dev_err(dev
, "could not find device info\n");
2049 /* Allocate a new NETCP device instance */
2050 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2054 pm_runtime_enable(&pdev
->dev
);
2055 ret
= pm_runtime_get_sync(&pdev
->dev
);
2057 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2058 pm_runtime_disable(&pdev
->dev
);
2062 /* Initialize the NETCP device instance */
2063 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2064 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2065 netcp_device
->device
= dev
;
2066 platform_set_drvdata(pdev
, netcp_device
);
2068 /* create interfaces */
2069 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2071 dev_err(dev
, "could not find netcp-interfaces node\n");
2076 for_each_available_child_of_node(interfaces
, child
) {
2077 ret
= netcp_create_interface(netcp_device
, child
);
2079 dev_err(dev
, "could not create interface(%s)\n",
2081 goto probe_quit_interface
;
2085 /* Add the device instance to the list */
2086 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2088 /* Probe & attach any modules already registered */
2089 mutex_lock(&netcp_modules_lock
);
2090 for_each_netcp_module(module
) {
2091 ret
= netcp_module_probe(netcp_device
, module
);
2093 dev_err(dev
, "module(%s) probe failed\n", module
->name
);
2095 mutex_unlock(&netcp_modules_lock
);
2098 probe_quit_interface
:
2099 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2100 &netcp_device
->interface_head
,
2102 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2106 pm_runtime_put_sync(&pdev
->dev
);
2107 pm_runtime_disable(&pdev
->dev
);
2108 platform_set_drvdata(pdev
, NULL
);
2112 static int netcp_remove(struct platform_device
*pdev
)
2114 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2115 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2116 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2117 struct netcp_module
*module
;
2119 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2121 module
= inst_modpriv
->netcp_module
;
2122 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2123 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2124 list_del(&inst_modpriv
->inst_list
);
2125 kfree(inst_modpriv
);
2128 /* now that all modules are removed, clean up the interfaces */
2129 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2130 &netcp_device
->interface_head
,
2132 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2135 WARN(!list_empty(&netcp_device
->interface_head
),
2136 "%s interface list not empty!\n", pdev
->name
);
2138 pm_runtime_put_sync(&pdev
->dev
);
2139 pm_runtime_disable(&pdev
->dev
);
2140 platform_set_drvdata(pdev
, NULL
);
2144 static const struct of_device_id of_match
[] = {
2145 { .compatible
= "ti,netcp-1.0", },
2148 MODULE_DEVICE_TABLE(of
, of_match
);
2150 static struct platform_driver netcp_driver
= {
2152 .name
= "netcp-1.0",
2153 .owner
= THIS_MODULE
,
2154 .of_match_table
= of_match
,
2156 .probe
= netcp_probe
,
2157 .remove
= netcp_remove
,
2159 module_platform_driver(netcp_driver
);
2161 MODULE_LICENSE("GPL v2");
2162 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2163 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");