2 * Keystone NetCP Core driver
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Murali Karicheri <m-karicheri2@ti.com>
10 * Wingman Kwok <w-kwok2@ti.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License as
14 * published by the Free Software Foundation version 2.
16 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
17 * kind, whether express or implied; without even the implied warranty
18 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
23 #include <linux/module.h>
24 #include <linux/of_net.h>
25 #include <linux/of_address.h>
26 #include <linux/if_vlan.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/platform_device.h>
29 #include <linux/soc/ti/knav_qmss.h>
30 #include <linux/soc/ti/knav_dma.h>
34 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35 #define NETCP_NAPI_WEIGHT 64
36 #define NETCP_TX_TIMEOUT (5 * HZ)
37 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
38 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
39 #define NETCP_MAX_MCAST_ADDR 16
41 #define NETCP_EFUSE_REG_INDEX 0
43 #define NETCP_MOD_PROBE_SKIPPED 1
44 #define NETCP_MOD_PROBE_FAILED 2
46 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
47 NETIF_MSG_DRV | NETIF_MSG_LINK | \
48 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
49 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
50 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
51 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
52 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
55 #define NETCP_EFUSE_ADDR_SWAP 2
57 #define knav_queue_get_id(q) knav_queue_device_control(q, \
58 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
60 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
61 KNAV_QUEUE_ENABLE_NOTIFY, \
64 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
65 KNAV_QUEUE_DISABLE_NOTIFY, \
68 #define knav_queue_get_count(q) knav_queue_device_control(q, \
69 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
71 #define for_each_netcp_module(module) \
72 list_for_each_entry(module, &netcp_modules, module_list)
74 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
75 list_for_each_entry(inst_modpriv, \
76 &((netcp_device)->modpriv_head), inst_list)
78 #define for_each_module(netcp, intf_modpriv) \
79 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
81 /* Module management structures */
83 struct list_head device_list
;
84 struct list_head interface_head
;
85 struct list_head modpriv_head
;
86 struct device
*device
;
89 struct netcp_inst_modpriv
{
90 struct netcp_device
*netcp_device
;
91 struct netcp_module
*netcp_module
;
92 struct list_head inst_list
;
96 struct netcp_intf_modpriv
{
97 struct netcp_intf
*netcp_priv
;
98 struct netcp_module
*netcp_module
;
99 struct list_head intf_list
;
103 static LIST_HEAD(netcp_devices
);
104 static LIST_HEAD(netcp_modules
);
105 static DEFINE_MUTEX(netcp_modules_lock
);
107 static int netcp_debug_level
= -1;
108 module_param(netcp_debug_level
, int, 0);
109 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
111 /* Helper functions - Get/Set */
112 static void get_pkt_info(dma_addr_t
*buff
, u32
*buff_len
, dma_addr_t
*ndesc
,
113 struct knav_dma_desc
*desc
)
115 *buff_len
= le32_to_cpu(desc
->buff_len
);
116 *buff
= le32_to_cpu(desc
->buff
);
117 *ndesc
= le32_to_cpu(desc
->next_desc
);
120 static u32
get_sw_data(int index
, struct knav_dma_desc
*desc
)
122 /* No Endian conversion needed as this data is untouched by hw */
123 return desc
->sw_data
[index
];
126 /* use these macros to get sw data */
127 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
128 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
129 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
130 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
132 static void get_org_pkt_info(dma_addr_t
*buff
, u32
*buff_len
,
133 struct knav_dma_desc
*desc
)
135 *buff
= le32_to_cpu(desc
->orig_buff
);
136 *buff_len
= le32_to_cpu(desc
->orig_len
);
139 static void get_words(dma_addr_t
*words
, int num_words
, __le32
*desc
)
143 for (i
= 0; i
< num_words
; i
++)
144 words
[i
] = le32_to_cpu(desc
[i
]);
147 static void set_pkt_info(dma_addr_t buff
, u32 buff_len
, u32 ndesc
,
148 struct knav_dma_desc
*desc
)
150 desc
->buff_len
= cpu_to_le32(buff_len
);
151 desc
->buff
= cpu_to_le32(buff
);
152 desc
->next_desc
= cpu_to_le32(ndesc
);
155 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
156 struct knav_dma_desc
*desc
)
158 desc
->desc_info
= cpu_to_le32(desc_info
);
159 desc
->packet_info
= cpu_to_le32(pkt_info
);
162 static void set_sw_data(int index
, u32 data
, struct knav_dma_desc
*desc
)
164 /* No Endian conversion needed as this data is untouched by hw */
165 desc
->sw_data
[index
] = data
;
168 /* use these macros to set sw data */
169 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
170 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
171 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
172 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
174 static void set_org_pkt_info(dma_addr_t buff
, u32 buff_len
,
175 struct knav_dma_desc
*desc
)
177 desc
->orig_buff
= cpu_to_le32(buff
);
178 desc
->orig_len
= cpu_to_le32(buff_len
);
181 static void set_words(u32
*words
, int num_words
, __le32
*desc
)
185 for (i
= 0; i
< num_words
; i
++)
186 desc
[i
] = cpu_to_le32(words
[i
]);
189 /* Read the e-fuse value as 32 bit values to be endian independent */
190 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
, u32 swap
)
192 unsigned int addr0
, addr1
;
194 addr1
= readl(efuse_mac
+ 4);
195 addr0
= readl(efuse_mac
);
198 case NETCP_EFUSE_ADDR_SWAP
:
200 addr1
= readl(efuse_mac
);
206 x
[0] = (addr1
& 0x0000ff00) >> 8;
207 x
[1] = addr1
& 0x000000ff;
208 x
[2] = (addr0
& 0xff000000) >> 24;
209 x
[3] = (addr0
& 0x00ff0000) >> 16;
210 x
[4] = (addr0
& 0x0000ff00) >> 8;
211 x
[5] = addr0
& 0x000000ff;
216 static const char *netcp_node_name(struct device_node
*node
)
220 if (of_property_read_string(node
, "label", &name
) < 0)
227 /* Module management routines */
228 static int netcp_register_interface(struct netcp_intf
*netcp
)
232 ret
= register_netdev(netcp
->ndev
);
234 netcp
->netdev_registered
= true;
238 static int netcp_module_probe(struct netcp_device
*netcp_device
,
239 struct netcp_module
*module
)
241 struct device
*dev
= netcp_device
->device
;
242 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
243 struct device_node
*child
;
244 struct netcp_inst_modpriv
*inst_modpriv
;
245 struct netcp_intf
*netcp_intf
;
246 struct netcp_module
*tmp
;
247 bool primary_module_registered
= false;
250 /* Find this module in the sub-tree for this device */
251 devices
= of_get_child_by_name(node
, "netcp-devices");
253 dev_err(dev
, "could not find netcp-devices node\n");
254 return NETCP_MOD_PROBE_SKIPPED
;
257 for_each_available_child_of_node(devices
, child
) {
258 const char *name
= netcp_node_name(child
);
260 if (!strcasecmp(module
->name
, name
))
264 of_node_put(devices
);
265 /* If module not used for this device, skip it */
267 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
268 return NETCP_MOD_PROBE_SKIPPED
;
271 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
277 inst_modpriv
->netcp_device
= netcp_device
;
278 inst_modpriv
->netcp_module
= module
;
279 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
281 ret
= module
->probe(netcp_device
, dev
, child
,
282 &inst_modpriv
->module_priv
);
285 dev_err(dev
, "Probe of module(%s) failed with %d\n",
287 list_del(&inst_modpriv
->inst_list
);
288 devm_kfree(dev
, inst_modpriv
);
289 return NETCP_MOD_PROBE_FAILED
;
292 /* Attach modules only if the primary module is probed */
293 for_each_netcp_module(tmp
) {
295 primary_module_registered
= true;
298 if (!primary_module_registered
)
301 /* Attach module to interfaces */
302 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
304 struct netcp_intf_modpriv
*intf_modpriv
;
306 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
311 interface
= of_parse_phandle(netcp_intf
->node_interface
,
315 devm_kfree(dev
, intf_modpriv
);
319 intf_modpriv
->netcp_priv
= netcp_intf
;
320 intf_modpriv
->netcp_module
= module
;
321 list_add_tail(&intf_modpriv
->intf_list
,
322 &netcp_intf
->module_head
);
324 ret
= module
->attach(inst_modpriv
->module_priv
,
325 netcp_intf
->ndev
, interface
,
326 &intf_modpriv
->module_priv
);
327 of_node_put(interface
);
329 dev_dbg(dev
, "Attach of module %s declined with %d\n",
331 list_del(&intf_modpriv
->intf_list
);
332 devm_kfree(dev
, intf_modpriv
);
337 /* Now register the interface with netdev */
338 list_for_each_entry(netcp_intf
,
339 &netcp_device
->interface_head
,
341 /* If interface not registered then register now */
342 if (!netcp_intf
->netdev_registered
) {
343 ret
= netcp_register_interface(netcp_intf
);
351 int netcp_register_module(struct netcp_module
*module
)
353 struct netcp_device
*netcp_device
;
354 struct netcp_module
*tmp
;
358 WARN(1, "error registering netcp module: no name\n");
362 if (!module
->probe
) {
363 WARN(1, "error registering netcp module: no probe\n");
367 mutex_lock(&netcp_modules_lock
);
369 for_each_netcp_module(tmp
) {
370 if (!strcasecmp(tmp
->name
, module
->name
)) {
371 mutex_unlock(&netcp_modules_lock
);
375 list_add_tail(&module
->module_list
, &netcp_modules
);
377 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
378 ret
= netcp_module_probe(netcp_device
, module
);
382 mutex_unlock(&netcp_modules_lock
);
386 mutex_unlock(&netcp_modules_lock
);
387 netcp_unregister_module(module
);
390 EXPORT_SYMBOL_GPL(netcp_register_module
);
392 static void netcp_release_module(struct netcp_device
*netcp_device
,
393 struct netcp_module
*module
)
395 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
396 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
397 struct device
*dev
= netcp_device
->device
;
399 /* Release the module from each interface */
400 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
401 &netcp_device
->interface_head
,
403 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
405 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
406 &netcp_intf
->module_head
,
408 if (intf_modpriv
->netcp_module
== module
) {
409 module
->release(intf_modpriv
->module_priv
);
410 list_del(&intf_modpriv
->intf_list
);
411 devm_kfree(dev
, intf_modpriv
);
417 /* Remove the module from each instance */
418 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
419 &netcp_device
->modpriv_head
, inst_list
) {
420 if (inst_modpriv
->netcp_module
== module
) {
421 module
->remove(netcp_device
,
422 inst_modpriv
->module_priv
);
423 list_del(&inst_modpriv
->inst_list
);
424 devm_kfree(dev
, inst_modpriv
);
430 void netcp_unregister_module(struct netcp_module
*module
)
432 struct netcp_device
*netcp_device
;
433 struct netcp_module
*module_tmp
;
435 mutex_lock(&netcp_modules_lock
);
437 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
438 netcp_release_module(netcp_device
, module
);
441 /* Remove the module from the module list */
442 for_each_netcp_module(module_tmp
) {
443 if (module
== module_tmp
) {
444 list_del(&module
->module_list
);
449 mutex_unlock(&netcp_modules_lock
);
451 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
453 void *netcp_module_get_intf_data(struct netcp_module
*module
,
454 struct netcp_intf
*intf
)
456 struct netcp_intf_modpriv
*intf_modpriv
;
458 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
459 if (intf_modpriv
->netcp_module
== module
)
460 return intf_modpriv
->module_priv
;
463 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
465 /* Module TX and RX Hook management */
466 struct netcp_hook_list
{
467 struct list_head list
;
468 netcp_hook_rtn
*hook_rtn
;
473 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
474 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
476 struct netcp_hook_list
*entry
;
477 struct netcp_hook_list
*next
;
480 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
484 entry
->hook_rtn
= hook_rtn
;
485 entry
->hook_data
= hook_data
;
486 entry
->order
= order
;
488 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
489 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
490 if (next
->order
> order
)
493 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
494 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
498 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
500 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
501 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
503 struct netcp_hook_list
*next
, *n
;
506 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
507 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
508 if ((next
->order
== order
) &&
509 (next
->hook_rtn
== hook_rtn
) &&
510 (next
->hook_data
== hook_data
)) {
511 list_del(&next
->list
);
512 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
513 devm_kfree(netcp_priv
->dev
, next
);
517 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
520 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
522 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
523 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
525 struct netcp_hook_list
*entry
;
526 struct netcp_hook_list
*next
;
529 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
533 entry
->hook_rtn
= hook_rtn
;
534 entry
->hook_data
= hook_data
;
535 entry
->order
= order
;
537 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
538 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
539 if (next
->order
> order
)
542 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
543 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
548 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
549 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
551 struct netcp_hook_list
*next
, *n
;
554 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
555 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
556 if ((next
->order
== order
) &&
557 (next
->hook_rtn
== hook_rtn
) &&
558 (next
->hook_data
== hook_data
)) {
559 list_del(&next
->list
);
560 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
561 devm_kfree(netcp_priv
->dev
, next
);
565 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
570 static void netcp_frag_free(bool is_frag
, void *ptr
)
578 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
579 struct knav_dma_desc
*desc
)
581 struct knav_dma_desc
*ndesc
;
582 dma_addr_t dma_desc
, dma_buf
;
583 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
587 get_words(&dma_desc
, 1, &desc
->next_desc
);
590 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
591 if (unlikely(!ndesc
)) {
592 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
595 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
599 buf_ptr
= (void *)GET_SW_DATA0(ndesc
);
600 buf_len
= (int)GET_SW_DATA1(desc
);
601 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
602 __free_page(buf_ptr
);
603 knav_pool_desc_put(netcp
->rx_pool
, desc
);
605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
606 * field as a 32bit value. Will not work on 64bit machines
608 buf_ptr
= (void *)GET_SW_DATA0(desc
);
609 buf_len
= (int)GET_SW_DATA1(desc
);
612 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
613 knav_pool_desc_put(netcp
->rx_pool
, desc
);
616 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
618 struct knav_dma_desc
*desc
;
623 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
627 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
628 if (unlikely(!desc
)) {
629 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
631 netcp
->ndev
->stats
.rx_errors
++;
634 netcp_free_rx_desc_chain(netcp
, desc
);
635 netcp
->ndev
->stats
.rx_dropped
++;
639 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
641 unsigned int dma_sz
, buf_len
, org_buf_len
;
642 struct knav_dma_desc
*desc
, *ndesc
;
643 unsigned int pkt_sz
= 0, accum_sz
;
644 struct netcp_hook_list
*rx_hook
;
645 dma_addr_t dma_desc
, dma_buff
;
646 struct netcp_packet p_info
;
650 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
654 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
655 if (unlikely(!desc
)) {
656 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
660 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
661 /* warning!!!! We are retrieving the virtual ptr in the sw_data
662 * field as a 32bit value. Will not work on 64bit machines
664 org_buf_ptr
= (void *)GET_SW_DATA0(desc
);
665 org_buf_len
= (int)GET_SW_DATA1(desc
);
667 if (unlikely(!org_buf_ptr
)) {
668 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
672 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
674 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
676 /* Build a new sk_buff for the primary buffer */
677 skb
= build_skb(org_buf_ptr
, org_buf_len
);
678 if (unlikely(!skb
)) {
679 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
683 /* update data, tail and len */
684 skb_reserve(skb
, NETCP_SOP_OFFSET
);
685 __skb_put(skb
, buf_len
);
687 /* Fill in the page fragment list */
691 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
692 if (unlikely(!ndesc
)) {
693 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
697 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
698 /* warning!!!! We are retrieving the virtual ptr in the sw_data
699 * field as a 32bit value. Will not work on 64bit machines
701 page
= (struct page
*)GET_SW_DATA0(desc
);
703 if (likely(dma_buff
&& buf_len
&& page
)) {
704 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
707 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
708 &dma_buff
, buf_len
, page
);
712 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
713 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
716 /* Free the descriptor */
717 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
720 /* Free the primary descriptor */
721 knav_pool_desc_put(netcp
->rx_pool
, desc
);
723 /* check for packet len and warn */
724 if (unlikely(pkt_sz
!= accum_sz
))
725 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
728 /* Remove ethernet FCS from the packet */
729 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
731 /* Call each of the RX hooks */
733 p_info
.rxtstamp_complete
= false;
734 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
737 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
740 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
741 rx_hook
->order
, ret
);
742 netcp
->ndev
->stats
.rx_errors
++;
748 netcp
->ndev
->stats
.rx_packets
++;
749 netcp
->ndev
->stats
.rx_bytes
+= skb
->len
;
751 /* push skb up the stack */
752 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
753 netif_receive_skb(skb
);
757 netcp_free_rx_desc_chain(netcp
, desc
);
758 netcp
->ndev
->stats
.rx_errors
++;
762 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
767 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
772 /* Release descriptors and attached buffers from Rx FDQ */
773 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
775 struct knav_dma_desc
*desc
;
776 unsigned int buf_len
, dma_sz
;
780 /* Allocate descriptor */
781 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
782 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
783 if (unlikely(!desc
)) {
784 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
788 get_org_pkt_info(&dma
, &buf_len
, desc
);
789 /* warning!!!! We are retrieving the virtual ptr in the sw_data
790 * field as a 32bit value. Will not work on 64bit machines
792 buf_ptr
= (void *)GET_SW_DATA0(desc
);
794 if (unlikely(!dma
)) {
795 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
796 knav_pool_desc_put(netcp
->rx_pool
, desc
);
800 if (unlikely(!buf_ptr
)) {
801 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
802 knav_pool_desc_put(netcp
->rx_pool
, desc
);
807 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
809 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
811 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
813 __free_page(buf_ptr
);
816 knav_pool_desc_put(netcp
->rx_pool
, desc
);
820 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
824 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
825 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
826 netcp_free_rx_buf(netcp
, i
);
828 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
829 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
830 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
832 knav_pool_destroy(netcp
->rx_pool
);
833 netcp
->rx_pool
= NULL
;
836 static int netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
838 struct knav_dma_desc
*hwdesc
;
839 unsigned int buf_len
, dma_sz
;
840 u32 desc_info
, pkt_info
;
846 /* Allocate descriptor */
847 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
848 if (IS_ERR_OR_NULL(hwdesc
)) {
849 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
853 if (likely(fdq
== 0)) {
854 unsigned int primary_buf_len
;
855 /* Allocate a primary receive queue entry */
856 buf_len
= NETCP_PACKET_SIZE
+ NETCP_SOP_OFFSET
;
857 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
860 bufptr
= netdev_alloc_frag(primary_buf_len
);
861 sw_data
[1] = primary_buf_len
;
863 if (unlikely(!bufptr
)) {
864 dev_warn_ratelimited(netcp
->ndev_dev
,
865 "Primary RX buffer alloc failed\n");
868 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
870 if (unlikely(dma_mapping_error(netcp
->dev
, dma
)))
873 /* warning!!!! We are saving the virtual ptr in the sw_data
874 * field as a 32bit value. Will not work on 64bit machines
876 sw_data
[0] = (u32
)bufptr
;
878 /* Allocate a secondary receive queue entry */
879 page
= alloc_page(GFP_ATOMIC
| GFP_DMA
| __GFP_COLD
);
880 if (unlikely(!page
)) {
881 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
885 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
886 /* warning!!!! We are saving the virtual ptr in the sw_data
887 * field as a 32bit value. Will not work on 64bit machines
889 sw_data
[0] = (u32
)page
;
893 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
894 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
895 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
896 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
897 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
898 KNAV_DMA_DESC_RETQ_SHIFT
;
899 set_org_pkt_info(dma
, buf_len
, hwdesc
);
900 SET_SW_DATA0(sw_data
[0], hwdesc
);
901 SET_SW_DATA1(sw_data
[1], hwdesc
);
902 set_desc_info(desc_info
, pkt_info
, hwdesc
);
905 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
907 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
911 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
915 /* Refill Rx FDQ with descriptors & attached buffers */
916 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
918 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
921 /* Calculate the FDQ deficit and refill */
922 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
923 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
924 knav_queue_get_count(netcp
->rx_fdq
[i
]);
926 while (fdq_deficit
[i
]-- && !ret
)
927 ret
= netcp_allocate_rx_buf(netcp
, i
);
932 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
934 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
936 unsigned int packets
;
938 packets
= netcp_process_rx_packets(netcp
, budget
);
940 netcp_rxpool_refill(netcp
);
941 if (packets
< budget
) {
942 napi_complete(&netcp
->rx_napi
);
943 knav_queue_enable_notify(netcp
->rx_queue
);
949 static void netcp_rx_notify(void *arg
)
951 struct netcp_intf
*netcp
= arg
;
953 knav_queue_disable_notify(netcp
->rx_queue
);
954 napi_schedule(&netcp
->rx_napi
);
957 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
958 struct knav_dma_desc
*desc
,
959 unsigned int desc_sz
)
961 struct knav_dma_desc
*ndesc
= desc
;
962 dma_addr_t dma_desc
, dma_buf
;
963 unsigned int buf_len
;
966 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
968 if (dma_buf
&& buf_len
)
969 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
972 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%pad), len(%d)\n",
975 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
978 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
981 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
986 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
989 struct knav_dma_desc
*desc
;
996 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
999 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
1000 if (unlikely(!desc
)) {
1001 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1002 netcp
->ndev
->stats
.tx_errors
++;
1006 /* warning!!!! We are retrieving the virtual ptr in the sw_data
1007 * field as a 32bit value. Will not work on 64bit machines
1009 skb
= (struct sk_buff
*)GET_SW_DATA0(desc
);
1010 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
1012 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
1013 netcp
->ndev
->stats
.tx_errors
++;
1017 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
1018 netif_running(netcp
->ndev
) &&
1019 (knav_pool_count(netcp
->tx_pool
) >
1020 netcp
->tx_resume_threshold
)) {
1021 u16 subqueue
= skb_get_queue_mapping(skb
);
1023 netif_wake_subqueue(netcp
->ndev
, subqueue
);
1026 netcp
->ndev
->stats
.tx_packets
++;
1027 netcp
->ndev
->stats
.tx_bytes
+= skb
->len
;
1034 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
1037 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
1040 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
1041 if (packets
< budget
) {
1042 napi_complete(&netcp
->tx_napi
);
1043 knav_queue_enable_notify(netcp
->tx_compl_q
);
1049 static void netcp_tx_notify(void *arg
)
1051 struct netcp_intf
*netcp
= arg
;
1053 knav_queue_disable_notify(netcp
->tx_compl_q
);
1054 napi_schedule(&netcp
->tx_napi
);
1057 static struct knav_dma_desc
*
1058 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1060 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1061 unsigned int pkt_len
= skb_headlen(skb
);
1062 struct device
*dev
= netcp
->dev
;
1063 dma_addr_t dma_addr
;
1064 unsigned int dma_sz
;
1067 /* Map the linear buffer */
1068 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1069 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
1070 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1074 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1075 if (IS_ERR_OR_NULL(desc
)) {
1076 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1077 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1081 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1082 if (skb_is_nonlinear(skb
)) {
1083 prefetchw(skb_shinfo(skb
));
1085 desc
->next_desc
= 0;
1091 /* Handle the case where skb is fragmented in pages */
1092 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1093 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1094 struct page
*page
= skb_frag_page(frag
);
1095 u32 page_offset
= frag
->page_offset
;
1096 u32 buf_len
= skb_frag_size(frag
);
1097 dma_addr_t desc_dma
;
1101 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1103 if (unlikely(!dma_addr
)) {
1104 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1108 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1109 if (IS_ERR_OR_NULL(ndesc
)) {
1110 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1111 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1115 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
, ndesc
);
1117 (netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1118 KNAV_DMA_DESC_RETQ_SHIFT
;
1119 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1120 desc_dma_32
= (u32
)desc_dma
;
1121 set_words(&desc_dma_32
, 1, &pdesc
->next_desc
);
1124 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1125 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1129 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1130 &dma_addr
, &dma_sz
);
1132 /* frag list based linkage is not supported for now. */
1133 if (skb_shinfo(skb
)->frag_list
) {
1134 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1139 WARN_ON(pkt_len
!= skb
->len
);
1141 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1142 set_words(&pkt_len
, 1, &desc
->desc_info
);
1146 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1150 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1151 struct sk_buff
*skb
,
1152 struct knav_dma_desc
*desc
)
1154 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1155 struct netcp_hook_list
*tx_hook
;
1156 struct netcp_packet p_info
;
1157 unsigned int dma_sz
;
1162 p_info
.netcp
= netcp
;
1164 p_info
.tx_pipe
= NULL
;
1165 p_info
.psdata_len
= 0;
1166 p_info
.ts_context
= NULL
;
1167 p_info
.txtstamp_complete
= NULL
;
1168 p_info
.epib
= desc
->epib
;
1169 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
1170 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(__le32
));
1172 /* Find out where to inject the packet for transmission */
1173 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1174 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1176 if (unlikely(ret
!= 0)) {
1177 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1178 tx_hook
->order
, ret
);
1179 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1184 /* Make sure some TX hook claimed the packet */
1185 tx_pipe
= p_info
.tx_pipe
;
1187 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1192 /* update descriptor */
1193 if (p_info
.psdata_len
) {
1194 /* psdata points to both native-endian and device-endian data */
1195 __le32
*psdata
= (void __force
*)p_info
.psdata
;
1197 memmove(p_info
.psdata
, p_info
.psdata
+ p_info
.psdata_len
,
1199 set_words(p_info
.psdata
, p_info
.psdata_len
, psdata
);
1200 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1201 KNAV_DMA_DESC_PSLEN_SHIFT
;
1204 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1205 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1206 KNAV_DMA_DESC_RETQ_SHIFT
);
1208 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1209 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1210 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1213 set_words(&tmp
, 1, &desc
->packet_info
);
1214 /* warning!!!! We are saving the virtual ptr in the sw_data
1215 * field as a 32bit value. Will not work on 64bit machines
1217 SET_SW_DATA0((u32
)skb
, desc
);
1219 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1220 tmp
= tx_pipe
->switch_to_port
;
1221 set_words(&tmp
, 1, &desc
->tag_info
);
1224 /* submit packet descriptor */
1225 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1227 if (unlikely(ret
)) {
1228 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1232 skb_tx_timestamp(skb
);
1233 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1239 /* Submit the packet */
1240 static int netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1242 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1243 int subqueue
= skb_get_queue_mapping(skb
);
1244 struct knav_dma_desc
*desc
;
1245 int desc_count
, ret
= 0;
1247 if (unlikely(skb
->len
<= 0)) {
1249 return NETDEV_TX_OK
;
1252 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1253 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1255 /* If we get here, the skb has already been dropped */
1256 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1258 ndev
->stats
.tx_dropped
++;
1261 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1264 desc
= netcp_tx_map_skb(skb
, netcp
);
1265 if (unlikely(!desc
)) {
1266 netif_stop_subqueue(ndev
, subqueue
);
1271 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1275 ndev
->trans_start
= jiffies
;
1277 /* Check Tx pool count & stop subqueue if needed */
1278 desc_count
= knav_pool_count(netcp
->tx_pool
);
1279 if (desc_count
< netcp
->tx_pause_threshold
) {
1280 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1281 netif_stop_subqueue(ndev
, subqueue
);
1283 return NETDEV_TX_OK
;
1286 ndev
->stats
.tx_dropped
++;
1288 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1293 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1295 if (tx_pipe
->dma_channel
) {
1296 knav_dma_close_channel(tx_pipe
->dma_channel
);
1297 tx_pipe
->dma_channel
= NULL
;
1301 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1303 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1305 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1306 struct knav_dma_cfg config
;
1310 memset(&config
, 0, sizeof(config
));
1311 config
.direction
= DMA_MEM_TO_DEV
;
1312 config
.u
.tx
.filt_einfo
= false;
1313 config
.u
.tx
.filt_pswords
= false;
1314 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1316 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1317 tx_pipe
->dma_chan_name
, &config
);
1318 if (IS_ERR_OR_NULL(tx_pipe
->dma_channel
)) {
1319 dev_err(dev
, "failed opening tx chan(%s)\n",
1320 tx_pipe
->dma_chan_name
);
1324 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1325 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1327 if (IS_ERR(tx_pipe
->dma_queue
)) {
1328 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %d\n",
1330 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1334 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1338 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1339 knav_dma_close_channel(tx_pipe
->dma_channel
);
1340 tx_pipe
->dma_channel
= NULL
;
1343 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1345 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1346 struct netcp_device
*netcp_device
,
1347 const char *dma_chan_name
, unsigned int dma_queue_id
)
1349 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1350 tx_pipe
->netcp_device
= netcp_device
;
1351 tx_pipe
->dma_chan_name
= dma_chan_name
;
1352 tx_pipe
->dma_queue_id
= dma_queue_id
;
1355 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1357 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1359 enum netcp_addr_type type
)
1361 struct netcp_addr
*naddr
;
1363 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1364 if (naddr
->type
!= type
)
1366 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1374 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1376 enum netcp_addr_type type
)
1378 struct netcp_addr
*naddr
;
1380 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1386 naddr
->netcp
= netcp
;
1388 ether_addr_copy(naddr
->addr
, addr
);
1390 eth_zero_addr(naddr
->addr
);
1391 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1396 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1398 list_del(&naddr
->node
);
1399 devm_kfree(netcp
->dev
, naddr
);
1402 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1404 struct netcp_addr
*naddr
;
1406 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1410 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1411 enum netcp_addr_type type
)
1413 struct netcp_addr
*naddr
;
1415 naddr
= netcp_addr_find(netcp
, addr
, type
);
1417 naddr
->flags
|= ADDR_VALID
;
1421 naddr
= netcp_addr_add(netcp
, addr
, type
);
1422 if (!WARN_ON(!naddr
))
1423 naddr
->flags
|= ADDR_NEW
;
1426 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1428 struct netcp_addr
*naddr
, *tmp
;
1429 struct netcp_intf_modpriv
*priv
;
1430 struct netcp_module
*module
;
1433 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1434 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1436 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1437 naddr
->addr
, naddr
->type
);
1438 for_each_module(netcp
, priv
) {
1439 module
= priv
->netcp_module
;
1440 if (!module
->del_addr
)
1442 error
= module
->del_addr(priv
->module_priv
,
1446 netcp_addr_del(netcp
, naddr
);
1450 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1452 struct netcp_addr
*naddr
, *tmp
;
1453 struct netcp_intf_modpriv
*priv
;
1454 struct netcp_module
*module
;
1457 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1458 if (!(naddr
->flags
& ADDR_NEW
))
1460 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1461 naddr
->addr
, naddr
->type
);
1463 for_each_module(netcp
, priv
) {
1464 module
= priv
->netcp_module
;
1465 if (!module
->add_addr
)
1467 error
= module
->add_addr(priv
->module_priv
, naddr
);
1473 static void netcp_set_rx_mode(struct net_device
*ndev
)
1475 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1476 struct netdev_hw_addr
*ndev_addr
;
1479 promisc
= (ndev
->flags
& IFF_PROMISC
||
1480 ndev
->flags
& IFF_ALLMULTI
||
1481 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1483 spin_lock(&netcp
->lock
);
1484 /* first clear all marks */
1485 netcp_addr_clear_mark(netcp
);
1487 /* next add new entries, mark existing ones */
1488 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1489 for_each_dev_addr(ndev
, ndev_addr
)
1490 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1491 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1492 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1493 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1494 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1497 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1499 /* finally sweep and callout into modules */
1500 netcp_addr_sweep_del(netcp
);
1501 netcp_addr_sweep_add(netcp
);
1502 spin_unlock(&netcp
->lock
);
1505 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1509 if (netcp
->rx_channel
) {
1510 knav_dma_close_channel(netcp
->rx_channel
);
1511 netcp
->rx_channel
= NULL
;
1514 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1515 netcp_rxpool_free(netcp
);
1517 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1518 knav_queue_close(netcp
->rx_queue
);
1519 netcp
->rx_queue
= NULL
;
1522 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1523 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1524 knav_queue_close(netcp
->rx_fdq
[i
]);
1525 netcp
->rx_fdq
[i
] = NULL
;
1528 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1529 knav_queue_close(netcp
->tx_compl_q
);
1530 netcp
->tx_compl_q
= NULL
;
1533 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1534 knav_pool_destroy(netcp
->tx_pool
);
1535 netcp
->tx_pool
= NULL
;
1539 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1541 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1542 struct knav_queue_notify_config notify_cfg
;
1543 struct knav_dma_cfg config
;
1549 /* Create Rx/Tx descriptor pools */
1550 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1551 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1552 netcp
->rx_pool_region_id
);
1553 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1554 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1555 ret
= PTR_ERR(netcp
->rx_pool
);
1559 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1560 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1561 netcp
->tx_pool_region_id
);
1562 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1563 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1564 ret
= PTR_ERR(netcp
->tx_pool
);
1568 /* open Tx completion queue */
1569 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1570 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1571 if (IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1572 ret
= PTR_ERR(netcp
->tx_compl_q
);
1575 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1577 /* Set notification for Tx completion */
1578 notify_cfg
.fn
= netcp_tx_notify
;
1579 notify_cfg
.fn_arg
= netcp
;
1580 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1581 KNAV_QUEUE_SET_NOTIFIER
,
1582 (unsigned long)¬ify_cfg
);
1586 knav_queue_disable_notify(netcp
->tx_compl_q
);
1588 /* open Rx completion queue */
1589 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1590 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1591 if (IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1592 ret
= PTR_ERR(netcp
->rx_queue
);
1595 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1597 /* Set notification for Rx completion */
1598 notify_cfg
.fn
= netcp_rx_notify
;
1599 notify_cfg
.fn_arg
= netcp
;
1600 ret
= knav_queue_device_control(netcp
->rx_queue
,
1601 KNAV_QUEUE_SET_NOTIFIER
,
1602 (unsigned long)¬ify_cfg
);
1606 knav_queue_disable_notify(netcp
->rx_queue
);
1609 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_queue_depths
[i
];
1611 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1612 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1613 if (IS_ERR_OR_NULL(netcp
->rx_fdq
[i
])) {
1614 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1619 memset(&config
, 0, sizeof(config
));
1620 config
.direction
= DMA_DEV_TO_MEM
;
1621 config
.u
.rx
.einfo_present
= true;
1622 config
.u
.rx
.psinfo_present
= true;
1623 config
.u
.rx
.err_mode
= DMA_DROP
;
1624 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1625 config
.u
.rx
.psinfo_at_sop
= false;
1626 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1627 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1628 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1630 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1631 if (netcp
->rx_fdq
[i
])
1632 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1633 config
.u
.rx
.fdq
[i
] = last_fdq
;
1636 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1637 netcp
->dma_chan_name
, &config
);
1638 if (IS_ERR_OR_NULL(netcp
->rx_channel
)) {
1639 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1640 netcp
->dma_chan_name
);
1644 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1648 netcp_free_navigator_resources(netcp
);
1652 /* Open the device */
1653 static int netcp_ndo_open(struct net_device
*ndev
)
1655 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1656 struct netcp_intf_modpriv
*intf_modpriv
;
1657 struct netcp_module
*module
;
1660 netif_carrier_off(ndev
);
1661 ret
= netcp_setup_navigator_resources(ndev
);
1663 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1667 for_each_module(netcp
, intf_modpriv
) {
1668 module
= intf_modpriv
->netcp_module
;
1670 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1672 dev_err(netcp
->ndev_dev
, "module open failed\n");
1678 napi_enable(&netcp
->rx_napi
);
1679 napi_enable(&netcp
->tx_napi
);
1680 knav_queue_enable_notify(netcp
->tx_compl_q
);
1681 knav_queue_enable_notify(netcp
->rx_queue
);
1682 netcp_rxpool_refill(netcp
);
1683 netif_tx_wake_all_queues(ndev
);
1684 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1688 for_each_module(netcp
, intf_modpriv
) {
1689 module
= intf_modpriv
->netcp_module
;
1691 module
->close(intf_modpriv
->module_priv
, ndev
);
1695 netcp_free_navigator_resources(netcp
);
1699 /* Close the device */
1700 static int netcp_ndo_stop(struct net_device
*ndev
)
1702 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1703 struct netcp_intf_modpriv
*intf_modpriv
;
1704 struct netcp_module
*module
;
1707 netif_tx_stop_all_queues(ndev
);
1708 netif_carrier_off(ndev
);
1709 netcp_addr_clear_mark(netcp
);
1710 netcp_addr_sweep_del(netcp
);
1711 knav_queue_disable_notify(netcp
->rx_queue
);
1712 knav_queue_disable_notify(netcp
->tx_compl_q
);
1713 napi_disable(&netcp
->rx_napi
);
1714 napi_disable(&netcp
->tx_napi
);
1716 for_each_module(netcp
, intf_modpriv
) {
1717 module
= intf_modpriv
->netcp_module
;
1718 if (module
->close
) {
1719 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1721 dev_err(netcp
->ndev_dev
, "Close failed\n");
1725 /* Recycle Rx descriptors from completion queue */
1726 netcp_empty_rx_queue(netcp
);
1728 /* Recycle Tx descriptors from completion queue */
1729 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1731 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1732 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1733 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1735 netcp_free_navigator_resources(netcp
);
1736 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1740 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1741 struct ifreq
*req
, int cmd
)
1743 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1744 struct netcp_intf_modpriv
*intf_modpriv
;
1745 struct netcp_module
*module
;
1746 int ret
= -1, err
= -EOPNOTSUPP
;
1748 if (!netif_running(ndev
))
1751 for_each_module(netcp
, intf_modpriv
) {
1752 module
= intf_modpriv
->netcp_module
;
1756 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1757 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1766 return (ret
== 0) ? 0 : err
;
1769 static int netcp_ndo_change_mtu(struct net_device
*ndev
, int new_mtu
)
1771 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1773 /* MTU < 68 is an error for IPv4 traffic */
1774 if ((new_mtu
< 68) ||
1775 (new_mtu
> (NETCP_MAX_FRAME_SIZE
- ETH_HLEN
- ETH_FCS_LEN
))) {
1776 dev_err(netcp
->ndev_dev
, "Invalid mtu size = %d\n", new_mtu
);
1780 ndev
->mtu
= new_mtu
;
1784 static void netcp_ndo_tx_timeout(struct net_device
*ndev
)
1786 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1787 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1789 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1790 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1791 ndev
->trans_start
= jiffies
;
1792 netif_tx_wake_all_queues(ndev
);
1795 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1797 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1798 struct netcp_intf_modpriv
*intf_modpriv
;
1799 struct netcp_module
*module
;
1800 unsigned long flags
;
1803 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1805 spin_lock_irqsave(&netcp
->lock
, flags
);
1806 for_each_module(netcp
, intf_modpriv
) {
1807 module
= intf_modpriv
->netcp_module
;
1808 if ((module
->add_vid
) && (vid
!= 0)) {
1809 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1811 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1817 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1822 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1824 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1825 struct netcp_intf_modpriv
*intf_modpriv
;
1826 struct netcp_module
*module
;
1827 unsigned long flags
;
1830 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1832 spin_lock_irqsave(&netcp
->lock
, flags
);
1833 for_each_module(netcp
, intf_modpriv
) {
1834 module
= intf_modpriv
->netcp_module
;
1835 if (module
->del_vid
) {
1836 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1838 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1844 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1848 static u16
netcp_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1850 select_queue_fallback_t fallback
)
1855 static int netcp_setup_tc(struct net_device
*dev
, u32 handle
, __be16 proto
,
1856 struct tc_to_netdev
*tc
)
1860 /* setup tc must be called under rtnl lock */
1863 if (tc
->type
!= TC_SETUP_MQPRIO
)
1866 /* Sanity-check the number of traffic classes requested */
1867 if ((dev
->real_num_tx_queues
<= 1) ||
1868 (dev
->real_num_tx_queues
< tc
->tc
))
1871 /* Configure traffic class to queue mappings */
1873 netdev_set_num_tc(dev
, tc
->tc
);
1874 for (i
= 0; i
< tc
->tc
; i
++)
1875 netdev_set_tc_queue(dev
, i
, 1, i
);
1877 netdev_reset_tc(dev
);
1883 static const struct net_device_ops netcp_netdev_ops
= {
1884 .ndo_open
= netcp_ndo_open
,
1885 .ndo_stop
= netcp_ndo_stop
,
1886 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1887 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1888 .ndo_do_ioctl
= netcp_ndo_ioctl
,
1889 .ndo_change_mtu
= netcp_ndo_change_mtu
,
1890 .ndo_set_mac_address
= eth_mac_addr
,
1891 .ndo_validate_addr
= eth_validate_addr
,
1892 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1893 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1894 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1895 .ndo_select_queue
= netcp_select_queue
,
1896 .ndo_setup_tc
= netcp_setup_tc
,
1899 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1900 struct device_node
*node_interface
)
1902 struct device
*dev
= netcp_device
->device
;
1903 struct device_node
*node
= dev
->of_node
;
1904 struct netcp_intf
*netcp
;
1905 struct net_device
*ndev
;
1906 resource_size_t size
;
1907 struct resource res
;
1908 void __iomem
*efuse
= NULL
;
1910 const void *mac_addr
;
1911 u8 efuse_mac_addr
[6];
1915 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1917 dev_err(dev
, "Error allocating netdev\n");
1921 ndev
->features
|= NETIF_F_SG
;
1922 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1923 ndev
->hw_features
= ndev
->features
;
1924 ndev
->vlan_features
|= NETIF_F_SG
;
1926 netcp
= netdev_priv(ndev
);
1927 spin_lock_init(&netcp
->lock
);
1928 INIT_LIST_HEAD(&netcp
->module_head
);
1929 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
1930 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
1931 INIT_LIST_HEAD(&netcp
->addr_list
);
1932 netcp
->netcp_device
= netcp_device
;
1933 netcp
->dev
= netcp_device
->device
;
1935 netcp
->ndev_dev
= &ndev
->dev
;
1936 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
1937 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
1938 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
1939 netcp
->node_interface
= node_interface
;
1941 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
1943 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
1944 dev_err(dev
, "could not find efuse-mac reg resource\n");
1948 size
= resource_size(&res
);
1950 if (!devm_request_mem_region(dev
, res
.start
, size
,
1952 dev_err(dev
, "could not reserve resource\n");
1957 efuse
= devm_ioremap_nocache(dev
, res
.start
, size
);
1959 dev_err(dev
, "could not map resource\n");
1960 devm_release_mem_region(dev
, res
.start
, size
);
1965 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
, efuse_mac
);
1966 if (is_valid_ether_addr(efuse_mac_addr
))
1967 ether_addr_copy(ndev
->dev_addr
, efuse_mac_addr
);
1969 random_ether_addr(ndev
->dev_addr
);
1971 devm_iounmap(dev
, efuse
);
1972 devm_release_mem_region(dev
, res
.start
, size
);
1974 mac_addr
= of_get_mac_address(node_interface
);
1976 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
1978 random_ether_addr(ndev
->dev_addr
);
1981 ret
= of_property_read_string(node_interface
, "rx-channel",
1982 &netcp
->dma_chan_name
);
1984 dev_err(dev
, "missing \"rx-channel\" parameter\n");
1989 ret
= of_property_read_u32(node_interface
, "rx-queue",
1990 &netcp
->rx_queue_id
);
1992 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
1993 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
1996 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
1997 netcp
->rx_queue_depths
,
1998 KNAV_DMA_FDQ_PER_CHAN
);
2000 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
2001 netcp
->rx_queue_depths
[0] = 128;
2004 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
2006 dev_err(dev
, "missing \"rx-pool\" parameter\n");
2010 netcp
->rx_pool_size
= temp
[0];
2011 netcp
->rx_pool_region_id
= temp
[1];
2013 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
2015 dev_err(dev
, "missing \"tx-pool\" parameter\n");
2019 netcp
->tx_pool_size
= temp
[0];
2020 netcp
->tx_pool_region_id
= temp
[1];
2022 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
2023 dev_err(dev
, "tx-pool size too small, must be atleast(%ld)\n",
2029 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
2030 &netcp
->tx_compl_qid
);
2032 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
2033 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
2037 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
, NETCP_NAPI_WEIGHT
);
2038 netif_tx_napi_add(ndev
, &netcp
->tx_napi
, netcp_tx_poll
, NETCP_NAPI_WEIGHT
);
2040 /* Register the network device */
2042 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
2043 ndev
->netdev_ops
= &netcp_netdev_ops
;
2044 SET_NETDEV_DEV(ndev
, dev
);
2046 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2054 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2055 struct net_device
*ndev
)
2057 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2058 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2059 struct netcp_module
*module
;
2061 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2064 /* Notify each of the modules that the interface is going away */
2065 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2067 module
= intf_modpriv
->netcp_module
;
2068 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2070 if (module
->release
)
2071 module
->release(intf_modpriv
->module_priv
);
2072 list_del(&intf_modpriv
->intf_list
);
2073 kfree(intf_modpriv
);
2075 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2078 list_del(&netcp
->interface_list
);
2080 of_node_put(netcp
->node_interface
);
2081 unregister_netdev(ndev
);
2082 netif_napi_del(&netcp
->rx_napi
);
2086 static int netcp_probe(struct platform_device
*pdev
)
2088 struct device_node
*node
= pdev
->dev
.of_node
;
2089 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2090 struct device_node
*child
, *interfaces
;
2091 struct netcp_device
*netcp_device
;
2092 struct device
*dev
= &pdev
->dev
;
2096 dev_err(dev
, "could not find device info\n");
2100 /* Allocate a new NETCP device instance */
2101 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2105 pm_runtime_enable(&pdev
->dev
);
2106 ret
= pm_runtime_get_sync(&pdev
->dev
);
2108 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2109 pm_runtime_disable(&pdev
->dev
);
2113 /* Initialize the NETCP device instance */
2114 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2115 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2116 netcp_device
->device
= dev
;
2117 platform_set_drvdata(pdev
, netcp_device
);
2119 /* create interfaces */
2120 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2122 dev_err(dev
, "could not find netcp-interfaces node\n");
2127 for_each_available_child_of_node(interfaces
, child
) {
2128 ret
= netcp_create_interface(netcp_device
, child
);
2130 dev_err(dev
, "could not create interface(%s)\n",
2132 goto probe_quit_interface
;
2136 /* Add the device instance to the list */
2137 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2141 probe_quit_interface
:
2142 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2143 &netcp_device
->interface_head
,
2145 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2149 pm_runtime_put_sync(&pdev
->dev
);
2150 pm_runtime_disable(&pdev
->dev
);
2151 platform_set_drvdata(pdev
, NULL
);
2155 static int netcp_remove(struct platform_device
*pdev
)
2157 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2158 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2159 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2160 struct netcp_module
*module
;
2162 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2164 module
= inst_modpriv
->netcp_module
;
2165 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2166 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2167 list_del(&inst_modpriv
->inst_list
);
2168 kfree(inst_modpriv
);
2171 /* now that all modules are removed, clean up the interfaces */
2172 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2173 &netcp_device
->interface_head
,
2175 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2178 WARN(!list_empty(&netcp_device
->interface_head
),
2179 "%s interface list not empty!\n", pdev
->name
);
2181 pm_runtime_put_sync(&pdev
->dev
);
2182 pm_runtime_disable(&pdev
->dev
);
2183 platform_set_drvdata(pdev
, NULL
);
2187 static const struct of_device_id of_match
[] = {
2188 { .compatible
= "ti,netcp-1.0", },
2191 MODULE_DEVICE_TABLE(of
, of_match
);
2193 static struct platform_driver netcp_driver
= {
2195 .name
= "netcp-1.0",
2196 .of_match_table
= of_match
,
2198 .probe
= netcp_probe
,
2199 .remove
= netcp_remove
,
2201 module_platform_driver(netcp_driver
);
2203 MODULE_LICENSE("GPL v2");
2204 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2205 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");