1 // SPDX-License-Identifier: GPL-2.0
3 * Keystone NetCP Core driver
5 * Copyright (C) 2014 Texas Instruments Incorporated
6 * Authors: Sandeep Nair <sandeep_n@ti.com>
7 * Sandeep Paulraj <s-paulraj@ti.com>
8 * Cyril Chemparathy <cyril@ti.com>
9 * Santosh Shilimkar <santosh.shilimkar@ti.com>
10 * Murali Karicheri <m-karicheri2@ti.com>
11 * Wingman Kwok <w-kwok2@ti.com>
15 #include <linux/module.h>
16 #include <linux/of_net.h>
17 #include <linux/of_address.h>
18 #include <linux/if_vlan.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/platform_device.h>
21 #include <linux/soc/ti/knav_qmss.h>
22 #include <linux/soc/ti/knav_dma.h>
26 #define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
27 #define NETCP_NAPI_WEIGHT 64
28 #define NETCP_TX_TIMEOUT (5 * HZ)
29 #define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
30 #define NETCP_MIN_PACKET_SIZE ETH_ZLEN
31 #define NETCP_MAX_MCAST_ADDR 16
33 #define NETCP_EFUSE_REG_INDEX 0
35 #define NETCP_MOD_PROBE_SKIPPED 1
36 #define NETCP_MOD_PROBE_FAILED 2
38 #define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
39 NETIF_MSG_DRV | NETIF_MSG_LINK | \
40 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
41 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
42 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
44 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
47 #define NETCP_EFUSE_ADDR_SWAP 2
49 #define knav_queue_get_id(q) knav_queue_device_control(q, \
50 KNAV_QUEUE_GET_ID, (unsigned long)NULL)
52 #define knav_queue_enable_notify(q) knav_queue_device_control(q, \
53 KNAV_QUEUE_ENABLE_NOTIFY, \
56 #define knav_queue_disable_notify(q) knav_queue_device_control(q, \
57 KNAV_QUEUE_DISABLE_NOTIFY, \
60 #define knav_queue_get_count(q) knav_queue_device_control(q, \
61 KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
63 #define for_each_netcp_module(module) \
64 list_for_each_entry(module, &netcp_modules, module_list)
66 #define for_each_netcp_device_module(netcp_device, inst_modpriv) \
67 list_for_each_entry(inst_modpriv, \
68 &((netcp_device)->modpriv_head), inst_list)
70 #define for_each_module(netcp, intf_modpriv) \
71 list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
73 /* Module management structures */
75 struct list_head device_list
;
76 struct list_head interface_head
;
77 struct list_head modpriv_head
;
78 struct device
*device
;
81 struct netcp_inst_modpriv
{
82 struct netcp_device
*netcp_device
;
83 struct netcp_module
*netcp_module
;
84 struct list_head inst_list
;
88 struct netcp_intf_modpriv
{
89 struct netcp_intf
*netcp_priv
;
90 struct netcp_module
*netcp_module
;
91 struct list_head intf_list
;
97 void (*txtstamp
)(void *context
, struct sk_buff
*skb
);
100 static LIST_HEAD(netcp_devices
);
101 static LIST_HEAD(netcp_modules
);
102 static DEFINE_MUTEX(netcp_modules_lock
);
104 static int netcp_debug_level
= -1;
105 module_param(netcp_debug_level
, int, 0);
106 MODULE_PARM_DESC(netcp_debug_level
, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
108 /* Helper functions - Get/Set */
109 static void get_pkt_info(dma_addr_t
*buff
, u32
*buff_len
, dma_addr_t
*ndesc
,
110 struct knav_dma_desc
*desc
)
112 *buff_len
= le32_to_cpu(desc
->buff_len
);
113 *buff
= le32_to_cpu(desc
->buff
);
114 *ndesc
= le32_to_cpu(desc
->next_desc
);
117 static void get_desc_info(u32
*desc_info
, u32
*pkt_info
,
118 struct knav_dma_desc
*desc
)
120 *desc_info
= le32_to_cpu(desc
->desc_info
);
121 *pkt_info
= le32_to_cpu(desc
->packet_info
);
124 static u32
get_sw_data(int index
, struct knav_dma_desc
*desc
)
126 /* No Endian conversion needed as this data is untouched by hw */
127 return desc
->sw_data
[index
];
130 /* use these macros to get sw data */
131 #define GET_SW_DATA0(desc) get_sw_data(0, desc)
132 #define GET_SW_DATA1(desc) get_sw_data(1, desc)
133 #define GET_SW_DATA2(desc) get_sw_data(2, desc)
134 #define GET_SW_DATA3(desc) get_sw_data(3, desc)
136 static void get_org_pkt_info(dma_addr_t
*buff
, u32
*buff_len
,
137 struct knav_dma_desc
*desc
)
139 *buff
= le32_to_cpu(desc
->orig_buff
);
140 *buff_len
= le32_to_cpu(desc
->orig_len
);
143 static void get_words(dma_addr_t
*words
, int num_words
, __le32
*desc
)
147 for (i
= 0; i
< num_words
; i
++)
148 words
[i
] = le32_to_cpu(desc
[i
]);
151 static void set_pkt_info(dma_addr_t buff
, u32 buff_len
, u32 ndesc
,
152 struct knav_dma_desc
*desc
)
154 desc
->buff_len
= cpu_to_le32(buff_len
);
155 desc
->buff
= cpu_to_le32(buff
);
156 desc
->next_desc
= cpu_to_le32(ndesc
);
159 static void set_desc_info(u32 desc_info
, u32 pkt_info
,
160 struct knav_dma_desc
*desc
)
162 desc
->desc_info
= cpu_to_le32(desc_info
);
163 desc
->packet_info
= cpu_to_le32(pkt_info
);
166 static void set_sw_data(int index
, u32 data
, struct knav_dma_desc
*desc
)
168 /* No Endian conversion needed as this data is untouched by hw */
169 desc
->sw_data
[index
] = data
;
172 /* use these macros to set sw data */
173 #define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
174 #define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
175 #define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
176 #define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
178 static void set_org_pkt_info(dma_addr_t buff
, u32 buff_len
,
179 struct knav_dma_desc
*desc
)
181 desc
->orig_buff
= cpu_to_le32(buff
);
182 desc
->orig_len
= cpu_to_le32(buff_len
);
185 static void set_words(u32
*words
, int num_words
, __le32
*desc
)
189 for (i
= 0; i
< num_words
; i
++)
190 desc
[i
] = cpu_to_le32(words
[i
]);
193 /* Read the e-fuse value as 32 bit values to be endian independent */
194 static int emac_arch_get_mac_addr(char *x
, void __iomem
*efuse_mac
, u32 swap
)
196 unsigned int addr0
, addr1
;
198 addr1
= readl(efuse_mac
+ 4);
199 addr0
= readl(efuse_mac
);
202 case NETCP_EFUSE_ADDR_SWAP
:
204 addr1
= readl(efuse_mac
);
210 x
[0] = (addr1
& 0x0000ff00) >> 8;
211 x
[1] = addr1
& 0x000000ff;
212 x
[2] = (addr0
& 0xff000000) >> 24;
213 x
[3] = (addr0
& 0x00ff0000) >> 16;
214 x
[4] = (addr0
& 0x0000ff00) >> 8;
215 x
[5] = addr0
& 0x000000ff;
220 /* Module management routines */
221 static int netcp_register_interface(struct netcp_intf
*netcp
)
225 ret
= register_netdev(netcp
->ndev
);
227 netcp
->netdev_registered
= true;
231 static int netcp_module_probe(struct netcp_device
*netcp_device
,
232 struct netcp_module
*module
)
234 struct device
*dev
= netcp_device
->device
;
235 struct device_node
*devices
, *interface
, *node
= dev
->of_node
;
236 struct device_node
*child
;
237 struct netcp_inst_modpriv
*inst_modpriv
;
238 struct netcp_intf
*netcp_intf
;
239 struct netcp_module
*tmp
;
240 bool primary_module_registered
= false;
243 /* Find this module in the sub-tree for this device */
244 devices
= of_get_child_by_name(node
, "netcp-devices");
246 dev_err(dev
, "could not find netcp-devices node\n");
247 return NETCP_MOD_PROBE_SKIPPED
;
250 for_each_available_child_of_node(devices
, child
) {
254 if (of_property_read_string(child
, "label", &name
) < 0) {
255 snprintf(node_name
, sizeof(node_name
), "%pOFn", child
);
258 if (!strcasecmp(module
->name
, name
))
262 of_node_put(devices
);
263 /* If module not used for this device, skip it */
265 dev_warn(dev
, "module(%s) not used for device\n", module
->name
);
266 return NETCP_MOD_PROBE_SKIPPED
;
269 inst_modpriv
= devm_kzalloc(dev
, sizeof(*inst_modpriv
), GFP_KERNEL
);
275 inst_modpriv
->netcp_device
= netcp_device
;
276 inst_modpriv
->netcp_module
= module
;
277 list_add_tail(&inst_modpriv
->inst_list
, &netcp_device
->modpriv_head
);
279 ret
= module
->probe(netcp_device
, dev
, child
,
280 &inst_modpriv
->module_priv
);
283 dev_err(dev
, "Probe of module(%s) failed with %d\n",
285 list_del(&inst_modpriv
->inst_list
);
286 devm_kfree(dev
, inst_modpriv
);
287 return NETCP_MOD_PROBE_FAILED
;
290 /* Attach modules only if the primary module is probed */
291 for_each_netcp_module(tmp
) {
293 primary_module_registered
= true;
296 if (!primary_module_registered
)
299 /* Attach module to interfaces */
300 list_for_each_entry(netcp_intf
, &netcp_device
->interface_head
,
302 struct netcp_intf_modpriv
*intf_modpriv
;
304 intf_modpriv
= devm_kzalloc(dev
, sizeof(*intf_modpriv
),
309 interface
= of_parse_phandle(netcp_intf
->node_interface
,
313 devm_kfree(dev
, intf_modpriv
);
317 intf_modpriv
->netcp_priv
= netcp_intf
;
318 intf_modpriv
->netcp_module
= module
;
319 list_add_tail(&intf_modpriv
->intf_list
,
320 &netcp_intf
->module_head
);
322 ret
= module
->attach(inst_modpriv
->module_priv
,
323 netcp_intf
->ndev
, interface
,
324 &intf_modpriv
->module_priv
);
325 of_node_put(interface
);
327 dev_dbg(dev
, "Attach of module %s declined with %d\n",
329 list_del(&intf_modpriv
->intf_list
);
330 devm_kfree(dev
, intf_modpriv
);
335 /* Now register the interface with netdev */
336 list_for_each_entry(netcp_intf
,
337 &netcp_device
->interface_head
,
339 /* If interface not registered then register now */
340 if (!netcp_intf
->netdev_registered
) {
341 ret
= netcp_register_interface(netcp_intf
);
349 int netcp_register_module(struct netcp_module
*module
)
351 struct netcp_device
*netcp_device
;
352 struct netcp_module
*tmp
;
356 WARN(1, "error registering netcp module: no name\n");
360 if (!module
->probe
) {
361 WARN(1, "error registering netcp module: no probe\n");
365 mutex_lock(&netcp_modules_lock
);
367 for_each_netcp_module(tmp
) {
368 if (!strcasecmp(tmp
->name
, module
->name
)) {
369 mutex_unlock(&netcp_modules_lock
);
373 list_add_tail(&module
->module_list
, &netcp_modules
);
375 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
376 ret
= netcp_module_probe(netcp_device
, module
);
380 mutex_unlock(&netcp_modules_lock
);
384 mutex_unlock(&netcp_modules_lock
);
385 netcp_unregister_module(module
);
388 EXPORT_SYMBOL_GPL(netcp_register_module
);
390 static void netcp_release_module(struct netcp_device
*netcp_device
,
391 struct netcp_module
*module
)
393 struct netcp_inst_modpriv
*inst_modpriv
, *inst_tmp
;
394 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
395 struct device
*dev
= netcp_device
->device
;
397 /* Release the module from each interface */
398 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
399 &netcp_device
->interface_head
,
401 struct netcp_intf_modpriv
*intf_modpriv
, *intf_tmp
;
403 list_for_each_entry_safe(intf_modpriv
, intf_tmp
,
404 &netcp_intf
->module_head
,
406 if (intf_modpriv
->netcp_module
== module
) {
407 module
->release(intf_modpriv
->module_priv
);
408 list_del(&intf_modpriv
->intf_list
);
409 devm_kfree(dev
, intf_modpriv
);
415 /* Remove the module from each instance */
416 list_for_each_entry_safe(inst_modpriv
, inst_tmp
,
417 &netcp_device
->modpriv_head
, inst_list
) {
418 if (inst_modpriv
->netcp_module
== module
) {
419 module
->remove(netcp_device
,
420 inst_modpriv
->module_priv
);
421 list_del(&inst_modpriv
->inst_list
);
422 devm_kfree(dev
, inst_modpriv
);
428 void netcp_unregister_module(struct netcp_module
*module
)
430 struct netcp_device
*netcp_device
;
431 struct netcp_module
*module_tmp
;
433 mutex_lock(&netcp_modules_lock
);
435 list_for_each_entry(netcp_device
, &netcp_devices
, device_list
) {
436 netcp_release_module(netcp_device
, module
);
439 /* Remove the module from the module list */
440 for_each_netcp_module(module_tmp
) {
441 if (module
== module_tmp
) {
442 list_del(&module
->module_list
);
447 mutex_unlock(&netcp_modules_lock
);
449 EXPORT_SYMBOL_GPL(netcp_unregister_module
);
451 void *netcp_module_get_intf_data(struct netcp_module
*module
,
452 struct netcp_intf
*intf
)
454 struct netcp_intf_modpriv
*intf_modpriv
;
456 list_for_each_entry(intf_modpriv
, &intf
->module_head
, intf_list
)
457 if (intf_modpriv
->netcp_module
== module
)
458 return intf_modpriv
->module_priv
;
461 EXPORT_SYMBOL_GPL(netcp_module_get_intf_data
);
463 /* Module TX and RX Hook management */
464 struct netcp_hook_list
{
465 struct list_head list
;
466 netcp_hook_rtn
*hook_rtn
;
471 int netcp_register_txhook(struct netcp_intf
*netcp_priv
, int order
,
472 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
474 struct netcp_hook_list
*entry
;
475 struct netcp_hook_list
*next
;
478 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
482 entry
->hook_rtn
= hook_rtn
;
483 entry
->hook_data
= hook_data
;
484 entry
->order
= order
;
486 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
487 list_for_each_entry(next
, &netcp_priv
->txhook_list_head
, list
) {
488 if (next
->order
> order
)
491 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
492 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
496 EXPORT_SYMBOL_GPL(netcp_register_txhook
);
498 int netcp_unregister_txhook(struct netcp_intf
*netcp_priv
, int order
,
499 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
501 struct netcp_hook_list
*next
, *n
;
504 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
505 list_for_each_entry_safe(next
, n
, &netcp_priv
->txhook_list_head
, list
) {
506 if ((next
->order
== order
) &&
507 (next
->hook_rtn
== hook_rtn
) &&
508 (next
->hook_data
== hook_data
)) {
509 list_del(&next
->list
);
510 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
511 devm_kfree(netcp_priv
->dev
, next
);
515 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
518 EXPORT_SYMBOL_GPL(netcp_unregister_txhook
);
520 int netcp_register_rxhook(struct netcp_intf
*netcp_priv
, int order
,
521 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
523 struct netcp_hook_list
*entry
;
524 struct netcp_hook_list
*next
;
527 entry
= devm_kzalloc(netcp_priv
->dev
, sizeof(*entry
), GFP_KERNEL
);
531 entry
->hook_rtn
= hook_rtn
;
532 entry
->hook_data
= hook_data
;
533 entry
->order
= order
;
535 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
536 list_for_each_entry(next
, &netcp_priv
->rxhook_list_head
, list
) {
537 if (next
->order
> order
)
540 __list_add(&entry
->list
, next
->list
.prev
, &next
->list
);
541 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
545 EXPORT_SYMBOL_GPL(netcp_register_rxhook
);
547 int netcp_unregister_rxhook(struct netcp_intf
*netcp_priv
, int order
,
548 netcp_hook_rtn
*hook_rtn
, void *hook_data
)
550 struct netcp_hook_list
*next
, *n
;
553 spin_lock_irqsave(&netcp_priv
->lock
, flags
);
554 list_for_each_entry_safe(next
, n
, &netcp_priv
->rxhook_list_head
, list
) {
555 if ((next
->order
== order
) &&
556 (next
->hook_rtn
== hook_rtn
) &&
557 (next
->hook_data
== hook_data
)) {
558 list_del(&next
->list
);
559 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
560 devm_kfree(netcp_priv
->dev
, next
);
564 spin_unlock_irqrestore(&netcp_priv
->lock
, flags
);
568 EXPORT_SYMBOL_GPL(netcp_unregister_rxhook
);
570 static void netcp_frag_free(bool is_frag
, void *ptr
)
578 static void netcp_free_rx_desc_chain(struct netcp_intf
*netcp
,
579 struct knav_dma_desc
*desc
)
581 struct knav_dma_desc
*ndesc
;
582 dma_addr_t dma_desc
, dma_buf
;
583 unsigned int buf_len
, dma_sz
= sizeof(*ndesc
);
587 get_words(&dma_desc
, 1, &desc
->next_desc
);
590 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
591 if (unlikely(!ndesc
)) {
592 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
595 get_pkt_info(&dma_buf
, &tmp
, &dma_desc
, ndesc
);
596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
599 buf_ptr
= (void *)GET_SW_DATA0(ndesc
);
600 buf_len
= (int)GET_SW_DATA1(desc
);
601 dma_unmap_page(netcp
->dev
, dma_buf
, PAGE_SIZE
, DMA_FROM_DEVICE
);
602 __free_page(buf_ptr
);
603 knav_pool_desc_put(netcp
->rx_pool
, desc
);
605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
606 * field as a 32bit value. Will not work on 64bit machines
608 buf_ptr
= (void *)GET_SW_DATA0(desc
);
609 buf_len
= (int)GET_SW_DATA1(desc
);
612 netcp_frag_free(buf_len
<= PAGE_SIZE
, buf_ptr
);
613 knav_pool_desc_put(netcp
->rx_pool
, desc
);
616 static void netcp_empty_rx_queue(struct netcp_intf
*netcp
)
618 struct netcp_stats
*rx_stats
= &netcp
->stats
;
619 struct knav_dma_desc
*desc
;
624 dma
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
628 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
629 if (unlikely(!desc
)) {
630 dev_err(netcp
->ndev_dev
, "%s: failed to unmap Rx desc\n",
632 rx_stats
->rx_errors
++;
635 netcp_free_rx_desc_chain(netcp
, desc
);
636 rx_stats
->rx_dropped
++;
640 static int netcp_process_one_rx_packet(struct netcp_intf
*netcp
)
642 struct netcp_stats
*rx_stats
= &netcp
->stats
;
643 unsigned int dma_sz
, buf_len
, org_buf_len
;
644 struct knav_dma_desc
*desc
, *ndesc
;
645 unsigned int pkt_sz
= 0, accum_sz
;
646 struct netcp_hook_list
*rx_hook
;
647 dma_addr_t dma_desc
, dma_buff
;
648 struct netcp_packet p_info
;
653 dma_desc
= knav_queue_pop(netcp
->rx_queue
, &dma_sz
);
657 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
658 if (unlikely(!desc
)) {
659 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
663 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, desc
);
664 /* warning!!!! We are retrieving the virtual ptr in the sw_data
665 * field as a 32bit value. Will not work on 64bit machines
667 org_buf_ptr
= (void *)GET_SW_DATA0(desc
);
668 org_buf_len
= (int)GET_SW_DATA1(desc
);
670 if (unlikely(!org_buf_ptr
)) {
671 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
675 pkt_sz
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
677 dma_unmap_single(netcp
->dev
, dma_buff
, buf_len
, DMA_FROM_DEVICE
);
679 /* Build a new sk_buff for the primary buffer */
680 skb
= build_skb(org_buf_ptr
, org_buf_len
);
681 if (unlikely(!skb
)) {
682 dev_err(netcp
->ndev_dev
, "build_skb() failed\n");
686 /* update data, tail and len */
687 skb_reserve(skb
, NETCP_SOP_OFFSET
);
688 __skb_put(skb
, buf_len
);
690 /* Fill in the page fragment list */
694 ndesc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma_desc
, dma_sz
);
695 if (unlikely(!ndesc
)) {
696 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
700 get_pkt_info(&dma_buff
, &buf_len
, &dma_desc
, ndesc
);
701 /* warning!!!! We are retrieving the virtual ptr in the sw_data
702 * field as a 32bit value. Will not work on 64bit machines
704 page
= (struct page
*)GET_SW_DATA0(ndesc
);
706 if (likely(dma_buff
&& buf_len
&& page
)) {
707 dma_unmap_page(netcp
->dev
, dma_buff
, PAGE_SIZE
,
710 dev_err(netcp
->ndev_dev
, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
711 &dma_buff
, buf_len
, page
);
715 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
,
716 offset_in_page(dma_buff
), buf_len
, PAGE_SIZE
);
719 /* Free the descriptor */
720 knav_pool_desc_put(netcp
->rx_pool
, ndesc
);
723 /* check for packet len and warn */
724 if (unlikely(pkt_sz
!= accum_sz
))
725 dev_dbg(netcp
->ndev_dev
, "mismatch in packet size(%d) & sum of fragments(%d)\n",
728 /* Newer version of the Ethernet switch can trim the Ethernet FCS
729 * from the packet and is indicated in hw_cap. So trim it only for
732 if (!(netcp
->hw_cap
& ETH_SW_CAN_REMOVE_ETH_FCS
))
733 __pskb_trim(skb
, skb
->len
- ETH_FCS_LEN
);
735 /* Call each of the RX hooks */
737 skb
->dev
= netcp
->ndev
;
738 p_info
.rxtstamp_complete
= false;
739 get_desc_info(&tmp
, &p_info
.eflags
, desc
);
740 p_info
.epib
= desc
->epib
;
741 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
742 p_info
.eflags
= ((p_info
.eflags
>> KNAV_DMA_DESC_EFLAGS_SHIFT
) &
743 KNAV_DMA_DESC_EFLAGS_MASK
);
744 list_for_each_entry(rx_hook
, &netcp
->rxhook_list_head
, list
) {
747 ret
= rx_hook
->hook_rtn(rx_hook
->order
, rx_hook
->hook_data
,
750 dev_err(netcp
->ndev_dev
, "RX hook %d failed: %d\n",
751 rx_hook
->order
, ret
);
752 /* Free the primary descriptor */
753 rx_stats
->rx_dropped
++;
754 knav_pool_desc_put(netcp
->rx_pool
, desc
);
759 /* Free the primary descriptor */
760 knav_pool_desc_put(netcp
->rx_pool
, desc
);
762 u64_stats_update_begin(&rx_stats
->syncp_rx
);
763 rx_stats
->rx_packets
++;
764 rx_stats
->rx_bytes
+= skb
->len
;
765 u64_stats_update_end(&rx_stats
->syncp_rx
);
767 /* push skb up the stack */
768 skb
->protocol
= eth_type_trans(skb
, netcp
->ndev
);
769 netif_receive_skb(skb
);
773 netcp_free_rx_desc_chain(netcp
, desc
);
774 rx_stats
->rx_errors
++;
778 static int netcp_process_rx_packets(struct netcp_intf
*netcp
,
783 for (i
= 0; (i
< budget
) && !netcp_process_one_rx_packet(netcp
); i
++)
788 /* Release descriptors and attached buffers from Rx FDQ */
789 static void netcp_free_rx_buf(struct netcp_intf
*netcp
, int fdq
)
791 struct knav_dma_desc
*desc
;
792 unsigned int buf_len
, dma_sz
;
796 /* Allocate descriptor */
797 while ((dma
= knav_queue_pop(netcp
->rx_fdq
[fdq
], &dma_sz
))) {
798 desc
= knav_pool_desc_unmap(netcp
->rx_pool
, dma
, dma_sz
);
799 if (unlikely(!desc
)) {
800 dev_err(netcp
->ndev_dev
, "failed to unmap Rx desc\n");
804 get_org_pkt_info(&dma
, &buf_len
, desc
);
805 /* warning!!!! We are retrieving the virtual ptr in the sw_data
806 * field as a 32bit value. Will not work on 64bit machines
808 buf_ptr
= (void *)GET_SW_DATA0(desc
);
810 if (unlikely(!dma
)) {
811 dev_err(netcp
->ndev_dev
, "NULL orig_buff in desc\n");
812 knav_pool_desc_put(netcp
->rx_pool
, desc
);
816 if (unlikely(!buf_ptr
)) {
817 dev_err(netcp
->ndev_dev
, "NULL bufptr in desc\n");
818 knav_pool_desc_put(netcp
->rx_pool
, desc
);
823 dma_unmap_single(netcp
->dev
, dma
, buf_len
,
825 netcp_frag_free((buf_len
<= PAGE_SIZE
), buf_ptr
);
827 dma_unmap_page(netcp
->dev
, dma
, buf_len
,
829 __free_page(buf_ptr
);
832 knav_pool_desc_put(netcp
->rx_pool
, desc
);
836 static void netcp_rxpool_free(struct netcp_intf
*netcp
)
840 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
841 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]); i
++)
842 netcp_free_rx_buf(netcp
, i
);
844 if (knav_pool_count(netcp
->rx_pool
) != netcp
->rx_pool_size
)
845 dev_err(netcp
->ndev_dev
, "Lost Rx (%d) descriptors\n",
846 netcp
->rx_pool_size
- knav_pool_count(netcp
->rx_pool
));
848 knav_pool_destroy(netcp
->rx_pool
);
849 netcp
->rx_pool
= NULL
;
852 static int netcp_allocate_rx_buf(struct netcp_intf
*netcp
, int fdq
)
854 struct knav_dma_desc
*hwdesc
;
855 unsigned int buf_len
, dma_sz
;
856 u32 desc_info
, pkt_info
;
862 /* Allocate descriptor */
863 hwdesc
= knav_pool_desc_get(netcp
->rx_pool
);
864 if (IS_ERR_OR_NULL(hwdesc
)) {
865 dev_dbg(netcp
->ndev_dev
, "out of rx pool desc\n");
869 if (likely(fdq
== 0)) {
870 unsigned int primary_buf_len
;
871 /* Allocate a primary receive queue entry */
872 buf_len
= NETCP_PACKET_SIZE
+ NETCP_SOP_OFFSET
;
873 primary_buf_len
= SKB_DATA_ALIGN(buf_len
) +
874 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
876 bufptr
= netdev_alloc_frag(primary_buf_len
);
877 sw_data
[1] = primary_buf_len
;
879 if (unlikely(!bufptr
)) {
880 dev_warn_ratelimited(netcp
->ndev_dev
,
881 "Primary RX buffer alloc failed\n");
884 dma
= dma_map_single(netcp
->dev
, bufptr
, buf_len
,
886 if (unlikely(dma_mapping_error(netcp
->dev
, dma
)))
889 /* warning!!!! We are saving the virtual ptr in the sw_data
890 * field as a 32bit value. Will not work on 64bit machines
892 sw_data
[0] = (u32
)bufptr
;
894 /* Allocate a secondary receive queue entry */
895 page
= alloc_page(GFP_ATOMIC
| GFP_DMA
);
896 if (unlikely(!page
)) {
897 dev_warn_ratelimited(netcp
->ndev_dev
, "Secondary page alloc failed\n");
901 dma
= dma_map_page(netcp
->dev
, page
, 0, buf_len
, DMA_TO_DEVICE
);
902 /* warning!!!! We are saving the virtual ptr in the sw_data
903 * field as a 32bit value. Will not work on 64bit machines
905 sw_data
[0] = (u32
)page
;
909 desc_info
= KNAV_DMA_DESC_PS_INFO_IN_DESC
;
910 desc_info
|= buf_len
& KNAV_DMA_DESC_PKT_LEN_MASK
;
911 pkt_info
= KNAV_DMA_DESC_HAS_EPIB
;
912 pkt_info
|= KNAV_DMA_NUM_PS_WORDS
<< KNAV_DMA_DESC_PSLEN_SHIFT
;
913 pkt_info
|= (netcp
->rx_queue_id
& KNAV_DMA_DESC_RETQ_MASK
) <<
914 KNAV_DMA_DESC_RETQ_SHIFT
;
915 set_org_pkt_info(dma
, buf_len
, hwdesc
);
916 SET_SW_DATA0(sw_data
[0], hwdesc
);
917 SET_SW_DATA1(sw_data
[1], hwdesc
);
918 set_desc_info(desc_info
, pkt_info
, hwdesc
);
921 knav_pool_desc_map(netcp
->rx_pool
, hwdesc
, sizeof(*hwdesc
), &dma
,
923 knav_queue_push(netcp
->rx_fdq
[fdq
], dma
, sizeof(*hwdesc
), 0);
927 knav_pool_desc_put(netcp
->rx_pool
, hwdesc
);
931 /* Refill Rx FDQ with descriptors & attached buffers */
932 static void netcp_rxpool_refill(struct netcp_intf
*netcp
)
934 u32 fdq_deficit
[KNAV_DMA_FDQ_PER_CHAN
] = {0};
937 /* Calculate the FDQ deficit and refill */
938 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_fdq
[i
]; i
++) {
939 fdq_deficit
[i
] = netcp
->rx_queue_depths
[i
] -
940 knav_queue_get_count(netcp
->rx_fdq
[i
]);
942 while (fdq_deficit
[i
]-- && !ret
)
943 ret
= netcp_allocate_rx_buf(netcp
, i
);
948 static int netcp_rx_poll(struct napi_struct
*napi
, int budget
)
950 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
952 unsigned int packets
;
954 packets
= netcp_process_rx_packets(netcp
, budget
);
956 netcp_rxpool_refill(netcp
);
957 if (packets
< budget
) {
958 napi_complete_done(&netcp
->rx_napi
, packets
);
959 knav_queue_enable_notify(netcp
->rx_queue
);
965 static void netcp_rx_notify(void *arg
)
967 struct netcp_intf
*netcp
= arg
;
969 knav_queue_disable_notify(netcp
->rx_queue
);
970 napi_schedule(&netcp
->rx_napi
);
973 static void netcp_free_tx_desc_chain(struct netcp_intf
*netcp
,
974 struct knav_dma_desc
*desc
,
975 unsigned int desc_sz
)
977 struct knav_dma_desc
*ndesc
= desc
;
978 dma_addr_t dma_desc
, dma_buf
;
979 unsigned int buf_len
;
982 get_pkt_info(&dma_buf
, &buf_len
, &dma_desc
, ndesc
);
984 if (dma_buf
&& buf_len
)
985 dma_unmap_single(netcp
->dev
, dma_buf
, buf_len
,
988 dev_warn(netcp
->ndev_dev
, "bad Tx desc buf(%pad), len(%d)\n",
991 knav_pool_desc_put(netcp
->tx_pool
, ndesc
);
994 ndesc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma_desc
,
997 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1002 static int netcp_process_tx_compl_packets(struct netcp_intf
*netcp
,
1003 unsigned int budget
)
1005 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1006 struct knav_dma_desc
*desc
;
1007 struct netcp_tx_cb
*tx_cb
;
1008 struct sk_buff
*skb
;
1009 unsigned int dma_sz
;
1014 dma
= knav_queue_pop(netcp
->tx_compl_q
, &dma_sz
);
1017 desc
= knav_pool_desc_unmap(netcp
->tx_pool
, dma
, dma_sz
);
1018 if (unlikely(!desc
)) {
1019 dev_err(netcp
->ndev_dev
, "failed to unmap Tx desc\n");
1020 tx_stats
->tx_errors
++;
1024 /* warning!!!! We are retrieving the virtual ptr in the sw_data
1025 * field as a 32bit value. Will not work on 64bit machines
1027 skb
= (struct sk_buff
*)GET_SW_DATA0(desc
);
1028 netcp_free_tx_desc_chain(netcp
, desc
, dma_sz
);
1030 dev_err(netcp
->ndev_dev
, "No skb in Tx desc\n");
1031 tx_stats
->tx_errors
++;
1035 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1036 if (tx_cb
->txtstamp
)
1037 tx_cb
->txtstamp(tx_cb
->ts_context
, skb
);
1039 if (netif_subqueue_stopped(netcp
->ndev
, skb
) &&
1040 netif_running(netcp
->ndev
) &&
1041 (knav_pool_count(netcp
->tx_pool
) >
1042 netcp
->tx_resume_threshold
)) {
1043 u16 subqueue
= skb_get_queue_mapping(skb
);
1045 netif_wake_subqueue(netcp
->ndev
, subqueue
);
1048 u64_stats_update_begin(&tx_stats
->syncp_tx
);
1049 tx_stats
->tx_packets
++;
1050 tx_stats
->tx_bytes
+= skb
->len
;
1051 u64_stats_update_end(&tx_stats
->syncp_tx
);
1058 static int netcp_tx_poll(struct napi_struct
*napi
, int budget
)
1061 struct netcp_intf
*netcp
= container_of(napi
, struct netcp_intf
,
1064 packets
= netcp_process_tx_compl_packets(netcp
, budget
);
1065 if (packets
< budget
) {
1066 napi_complete(&netcp
->tx_napi
);
1067 knav_queue_enable_notify(netcp
->tx_compl_q
);
1073 static void netcp_tx_notify(void *arg
)
1075 struct netcp_intf
*netcp
= arg
;
1077 knav_queue_disable_notify(netcp
->tx_compl_q
);
1078 napi_schedule(&netcp
->tx_napi
);
1081 static struct knav_dma_desc
*
1082 netcp_tx_map_skb(struct sk_buff
*skb
, struct netcp_intf
*netcp
)
1084 struct knav_dma_desc
*desc
, *ndesc
, *pdesc
;
1085 unsigned int pkt_len
= skb_headlen(skb
);
1086 struct device
*dev
= netcp
->dev
;
1087 dma_addr_t dma_addr
;
1088 unsigned int dma_sz
;
1091 /* Map the linear buffer */
1092 dma_addr
= dma_map_single(dev
, skb
->data
, pkt_len
, DMA_TO_DEVICE
);
1093 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
1094 dev_err(netcp
->ndev_dev
, "Failed to map skb buffer\n");
1098 desc
= knav_pool_desc_get(netcp
->tx_pool
);
1099 if (IS_ERR_OR_NULL(desc
)) {
1100 dev_err(netcp
->ndev_dev
, "out of TX desc\n");
1101 dma_unmap_single(dev
, dma_addr
, pkt_len
, DMA_TO_DEVICE
);
1105 set_pkt_info(dma_addr
, pkt_len
, 0, desc
);
1106 if (skb_is_nonlinear(skb
)) {
1107 prefetchw(skb_shinfo(skb
));
1109 desc
->next_desc
= 0;
1115 /* Handle the case where skb is fragmented in pages */
1116 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1117 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1118 struct page
*page
= skb_frag_page(frag
);
1119 u32 page_offset
= skb_frag_off(frag
);
1120 u32 buf_len
= skb_frag_size(frag
);
1121 dma_addr_t desc_dma
;
1124 dma_addr
= dma_map_page(dev
, page
, page_offset
, buf_len
,
1126 if (unlikely(!dma_addr
)) {
1127 dev_err(netcp
->ndev_dev
, "Failed to map skb page\n");
1131 ndesc
= knav_pool_desc_get(netcp
->tx_pool
);
1132 if (IS_ERR_OR_NULL(ndesc
)) {
1133 dev_err(netcp
->ndev_dev
, "out of TX desc for frags\n");
1134 dma_unmap_page(dev
, dma_addr
, buf_len
, DMA_TO_DEVICE
);
1138 desc_dma
= knav_pool_desc_virt_to_dma(netcp
->tx_pool
, ndesc
);
1139 set_pkt_info(dma_addr
, buf_len
, 0, ndesc
);
1140 desc_dma_32
= (u32
)desc_dma
;
1141 set_words(&desc_dma_32
, 1, &pdesc
->next_desc
);
1144 knav_pool_desc_map(netcp
->tx_pool
, pdesc
,
1145 sizeof(*pdesc
), &desc_dma
, &dma_sz
);
1149 knav_pool_desc_map(netcp
->tx_pool
, pdesc
, sizeof(*pdesc
),
1150 &dma_addr
, &dma_sz
);
1152 /* frag list based linkage is not supported for now. */
1153 if (skb_shinfo(skb
)->frag_list
) {
1154 dev_err_ratelimited(netcp
->ndev_dev
, "NETIF_F_FRAGLIST not supported\n");
1159 WARN_ON(pkt_len
!= skb
->len
);
1161 pkt_len
&= KNAV_DMA_DESC_PKT_LEN_MASK
;
1162 set_words(&pkt_len
, 1, &desc
->desc_info
);
1166 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1170 static int netcp_tx_submit_skb(struct netcp_intf
*netcp
,
1171 struct sk_buff
*skb
,
1172 struct knav_dma_desc
*desc
)
1174 struct netcp_tx_pipe
*tx_pipe
= NULL
;
1175 struct netcp_hook_list
*tx_hook
;
1176 struct netcp_packet p_info
;
1177 struct netcp_tx_cb
*tx_cb
;
1178 unsigned int dma_sz
;
1183 p_info
.netcp
= netcp
;
1185 p_info
.tx_pipe
= NULL
;
1186 p_info
.psdata_len
= 0;
1187 p_info
.ts_context
= NULL
;
1188 p_info
.txtstamp
= NULL
;
1189 p_info
.epib
= desc
->epib
;
1190 p_info
.psdata
= (u32 __force
*)desc
->psdata
;
1191 memset(p_info
.epib
, 0, KNAV_DMA_NUM_EPIB_WORDS
* sizeof(__le32
));
1193 /* Find out where to inject the packet for transmission */
1194 list_for_each_entry(tx_hook
, &netcp
->txhook_list_head
, list
) {
1195 ret
= tx_hook
->hook_rtn(tx_hook
->order
, tx_hook
->hook_data
,
1197 if (unlikely(ret
!= 0)) {
1198 dev_err(netcp
->ndev_dev
, "TX hook %d rejected the packet with reason(%d)\n",
1199 tx_hook
->order
, ret
);
1200 ret
= (ret
< 0) ? ret
: NETDEV_TX_OK
;
1205 /* Make sure some TX hook claimed the packet */
1206 tx_pipe
= p_info
.tx_pipe
;
1208 dev_err(netcp
->ndev_dev
, "No TX hook claimed the packet!\n");
1213 tx_cb
= (struct netcp_tx_cb
*)skb
->cb
;
1214 tx_cb
->ts_context
= p_info
.ts_context
;
1215 tx_cb
->txtstamp
= p_info
.txtstamp
;
1217 /* update descriptor */
1218 if (p_info
.psdata_len
) {
1219 /* psdata points to both native-endian and device-endian data */
1220 __le32
*psdata
= (void __force
*)p_info
.psdata
;
1222 set_words((u32
*)psdata
+
1223 (KNAV_DMA_NUM_PS_WORDS
- p_info
.psdata_len
),
1224 p_info
.psdata_len
, psdata
);
1225 tmp
|= (p_info
.psdata_len
& KNAV_DMA_DESC_PSLEN_MASK
) <<
1226 KNAV_DMA_DESC_PSLEN_SHIFT
;
1229 tmp
|= KNAV_DMA_DESC_HAS_EPIB
|
1230 ((netcp
->tx_compl_qid
& KNAV_DMA_DESC_RETQ_MASK
) <<
1231 KNAV_DMA_DESC_RETQ_SHIFT
);
1233 if (!(tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
)) {
1234 tmp
|= ((tx_pipe
->switch_to_port
& KNAV_DMA_DESC_PSFLAG_MASK
) <<
1235 KNAV_DMA_DESC_PSFLAG_SHIFT
);
1238 set_words(&tmp
, 1, &desc
->packet_info
);
1239 /* warning!!!! We are saving the virtual ptr in the sw_data
1240 * field as a 32bit value. Will not work on 64bit machines
1242 SET_SW_DATA0((u32
)skb
, desc
);
1244 if (tx_pipe
->flags
& SWITCH_TO_PORT_IN_TAGINFO
) {
1245 tmp
= tx_pipe
->switch_to_port
;
1246 set_words(&tmp
, 1, &desc
->tag_info
);
1249 /* submit packet descriptor */
1250 ret
= knav_pool_desc_map(netcp
->tx_pool
, desc
, sizeof(*desc
), &dma
,
1252 if (unlikely(ret
)) {
1253 dev_err(netcp
->ndev_dev
, "%s() failed to map desc\n", __func__
);
1257 skb_tx_timestamp(skb
);
1258 knav_queue_push(tx_pipe
->dma_queue
, dma
, dma_sz
, 0);
1264 /* Submit the packet */
1265 static int netcp_ndo_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
1267 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1268 struct netcp_stats
*tx_stats
= &netcp
->stats
;
1269 int subqueue
= skb_get_queue_mapping(skb
);
1270 struct knav_dma_desc
*desc
;
1271 int desc_count
, ret
= 0;
1273 if (unlikely(skb
->len
<= 0)) {
1275 return NETDEV_TX_OK
;
1278 if (unlikely(skb
->len
< NETCP_MIN_PACKET_SIZE
)) {
1279 ret
= skb_padto(skb
, NETCP_MIN_PACKET_SIZE
);
1281 /* If we get here, the skb has already been dropped */
1282 dev_warn(netcp
->ndev_dev
, "padding failed (%d), packet dropped\n",
1284 tx_stats
->tx_dropped
++;
1287 skb
->len
= NETCP_MIN_PACKET_SIZE
;
1290 desc
= netcp_tx_map_skb(skb
, netcp
);
1291 if (unlikely(!desc
)) {
1292 netif_stop_subqueue(ndev
, subqueue
);
1297 ret
= netcp_tx_submit_skb(netcp
, skb
, desc
);
1301 /* Check Tx pool count & stop subqueue if needed */
1302 desc_count
= knav_pool_count(netcp
->tx_pool
);
1303 if (desc_count
< netcp
->tx_pause_threshold
) {
1304 dev_dbg(netcp
->ndev_dev
, "pausing tx, count(%d)\n", desc_count
);
1305 netif_stop_subqueue(ndev
, subqueue
);
1307 return NETDEV_TX_OK
;
1310 tx_stats
->tx_dropped
++;
1312 netcp_free_tx_desc_chain(netcp
, desc
, sizeof(*desc
));
1317 int netcp_txpipe_close(struct netcp_tx_pipe
*tx_pipe
)
1319 if (tx_pipe
->dma_channel
) {
1320 knav_dma_close_channel(tx_pipe
->dma_channel
);
1321 tx_pipe
->dma_channel
= NULL
;
1325 EXPORT_SYMBOL_GPL(netcp_txpipe_close
);
1327 int netcp_txpipe_open(struct netcp_tx_pipe
*tx_pipe
)
1329 struct device
*dev
= tx_pipe
->netcp_device
->device
;
1330 struct knav_dma_cfg config
;
1334 memset(&config
, 0, sizeof(config
));
1335 config
.direction
= DMA_MEM_TO_DEV
;
1336 config
.u
.tx
.filt_einfo
= false;
1337 config
.u
.tx
.filt_pswords
= false;
1338 config
.u
.tx
.priority
= DMA_PRIO_MED_L
;
1340 tx_pipe
->dma_channel
= knav_dma_open_channel(dev
,
1341 tx_pipe
->dma_chan_name
, &config
);
1342 if (IS_ERR(tx_pipe
->dma_channel
)) {
1343 dev_err(dev
, "failed opening tx chan(%s)\n",
1344 tx_pipe
->dma_chan_name
);
1345 ret
= PTR_ERR(tx_pipe
->dma_channel
);
1349 snprintf(name
, sizeof(name
), "tx-pipe-%s", dev_name(dev
));
1350 tx_pipe
->dma_queue
= knav_queue_open(name
, tx_pipe
->dma_queue_id
,
1352 if (IS_ERR(tx_pipe
->dma_queue
)) {
1353 dev_err(dev
, "Could not open DMA queue for channel \"%s\": %d\n",
1355 ret
= PTR_ERR(tx_pipe
->dma_queue
);
1359 dev_dbg(dev
, "opened tx pipe %s\n", name
);
1363 if (!IS_ERR_OR_NULL(tx_pipe
->dma_channel
))
1364 knav_dma_close_channel(tx_pipe
->dma_channel
);
1365 tx_pipe
->dma_channel
= NULL
;
1368 EXPORT_SYMBOL_GPL(netcp_txpipe_open
);
1370 int netcp_txpipe_init(struct netcp_tx_pipe
*tx_pipe
,
1371 struct netcp_device
*netcp_device
,
1372 const char *dma_chan_name
, unsigned int dma_queue_id
)
1374 memset(tx_pipe
, 0, sizeof(*tx_pipe
));
1375 tx_pipe
->netcp_device
= netcp_device
;
1376 tx_pipe
->dma_chan_name
= dma_chan_name
;
1377 tx_pipe
->dma_queue_id
= dma_queue_id
;
1380 EXPORT_SYMBOL_GPL(netcp_txpipe_init
);
1382 static struct netcp_addr
*netcp_addr_find(struct netcp_intf
*netcp
,
1384 enum netcp_addr_type type
)
1386 struct netcp_addr
*naddr
;
1388 list_for_each_entry(naddr
, &netcp
->addr_list
, node
) {
1389 if (naddr
->type
!= type
)
1391 if (addr
&& memcmp(addr
, naddr
->addr
, ETH_ALEN
))
1399 static struct netcp_addr
*netcp_addr_add(struct netcp_intf
*netcp
,
1401 enum netcp_addr_type type
)
1403 struct netcp_addr
*naddr
;
1405 naddr
= devm_kmalloc(netcp
->dev
, sizeof(*naddr
), GFP_ATOMIC
);
1411 naddr
->netcp
= netcp
;
1413 ether_addr_copy(naddr
->addr
, addr
);
1415 eth_zero_addr(naddr
->addr
);
1416 list_add_tail(&naddr
->node
, &netcp
->addr_list
);
1421 static void netcp_addr_del(struct netcp_intf
*netcp
, struct netcp_addr
*naddr
)
1423 list_del(&naddr
->node
);
1424 devm_kfree(netcp
->dev
, naddr
);
1427 static void netcp_addr_clear_mark(struct netcp_intf
*netcp
)
1429 struct netcp_addr
*naddr
;
1431 list_for_each_entry(naddr
, &netcp
->addr_list
, node
)
1435 static void netcp_addr_add_mark(struct netcp_intf
*netcp
, const u8
*addr
,
1436 enum netcp_addr_type type
)
1438 struct netcp_addr
*naddr
;
1440 naddr
= netcp_addr_find(netcp
, addr
, type
);
1442 naddr
->flags
|= ADDR_VALID
;
1446 naddr
= netcp_addr_add(netcp
, addr
, type
);
1447 if (!WARN_ON(!naddr
))
1448 naddr
->flags
|= ADDR_NEW
;
1451 static void netcp_addr_sweep_del(struct netcp_intf
*netcp
)
1453 struct netcp_addr
*naddr
, *tmp
;
1454 struct netcp_intf_modpriv
*priv
;
1455 struct netcp_module
*module
;
1458 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1459 if (naddr
->flags
& (ADDR_VALID
| ADDR_NEW
))
1461 dev_dbg(netcp
->ndev_dev
, "deleting address %pM, type %x\n",
1462 naddr
->addr
, naddr
->type
);
1463 for_each_module(netcp
, priv
) {
1464 module
= priv
->netcp_module
;
1465 if (!module
->del_addr
)
1467 error
= module
->del_addr(priv
->module_priv
,
1471 netcp_addr_del(netcp
, naddr
);
1475 static void netcp_addr_sweep_add(struct netcp_intf
*netcp
)
1477 struct netcp_addr
*naddr
, *tmp
;
1478 struct netcp_intf_modpriv
*priv
;
1479 struct netcp_module
*module
;
1482 list_for_each_entry_safe(naddr
, tmp
, &netcp
->addr_list
, node
) {
1483 if (!(naddr
->flags
& ADDR_NEW
))
1485 dev_dbg(netcp
->ndev_dev
, "adding address %pM, type %x\n",
1486 naddr
->addr
, naddr
->type
);
1488 for_each_module(netcp
, priv
) {
1489 module
= priv
->netcp_module
;
1490 if (!module
->add_addr
)
1492 error
= module
->add_addr(priv
->module_priv
, naddr
);
1498 static int netcp_set_promiscuous(struct netcp_intf
*netcp
, bool promisc
)
1500 struct netcp_intf_modpriv
*priv
;
1501 struct netcp_module
*module
;
1504 for_each_module(netcp
, priv
) {
1505 module
= priv
->netcp_module
;
1506 if (!module
->set_rx_mode
)
1509 error
= module
->set_rx_mode(priv
->module_priv
, promisc
);
1516 static void netcp_set_rx_mode(struct net_device
*ndev
)
1518 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1519 struct netdev_hw_addr
*ndev_addr
;
1522 promisc
= (ndev
->flags
& IFF_PROMISC
||
1523 ndev
->flags
& IFF_ALLMULTI
||
1524 netdev_mc_count(ndev
) > NETCP_MAX_MCAST_ADDR
);
1526 spin_lock(&netcp
->lock
);
1527 /* first clear all marks */
1528 netcp_addr_clear_mark(netcp
);
1530 /* next add new entries, mark existing ones */
1531 netcp_addr_add_mark(netcp
, ndev
->broadcast
, ADDR_BCAST
);
1532 for_each_dev_addr(ndev
, ndev_addr
)
1533 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_DEV
);
1534 netdev_for_each_uc_addr(ndev_addr
, ndev
)
1535 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_UCAST
);
1536 netdev_for_each_mc_addr(ndev_addr
, ndev
)
1537 netcp_addr_add_mark(netcp
, ndev_addr
->addr
, ADDR_MCAST
);
1540 netcp_addr_add_mark(netcp
, NULL
, ADDR_ANY
);
1542 /* finally sweep and callout into modules */
1543 netcp_addr_sweep_del(netcp
);
1544 netcp_addr_sweep_add(netcp
);
1545 netcp_set_promiscuous(netcp
, promisc
);
1546 spin_unlock(&netcp
->lock
);
1549 static void netcp_free_navigator_resources(struct netcp_intf
*netcp
)
1553 if (netcp
->rx_channel
) {
1554 knav_dma_close_channel(netcp
->rx_channel
);
1555 netcp
->rx_channel
= NULL
;
1558 if (!IS_ERR_OR_NULL(netcp
->rx_pool
))
1559 netcp_rxpool_free(netcp
);
1561 if (!IS_ERR_OR_NULL(netcp
->rx_queue
)) {
1562 knav_queue_close(netcp
->rx_queue
);
1563 netcp
->rx_queue
= NULL
;
1566 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&&
1567 !IS_ERR_OR_NULL(netcp
->rx_fdq
[i
]) ; ++i
) {
1568 knav_queue_close(netcp
->rx_fdq
[i
]);
1569 netcp
->rx_fdq
[i
] = NULL
;
1572 if (!IS_ERR_OR_NULL(netcp
->tx_compl_q
)) {
1573 knav_queue_close(netcp
->tx_compl_q
);
1574 netcp
->tx_compl_q
= NULL
;
1577 if (!IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1578 knav_pool_destroy(netcp
->tx_pool
);
1579 netcp
->tx_pool
= NULL
;
1583 static int netcp_setup_navigator_resources(struct net_device
*ndev
)
1585 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1586 struct knav_queue_notify_config notify_cfg
;
1587 struct knav_dma_cfg config
;
1593 /* Create Rx/Tx descriptor pools */
1594 snprintf(name
, sizeof(name
), "rx-pool-%s", ndev
->name
);
1595 netcp
->rx_pool
= knav_pool_create(name
, netcp
->rx_pool_size
,
1596 netcp
->rx_pool_region_id
);
1597 if (IS_ERR_OR_NULL(netcp
->rx_pool
)) {
1598 dev_err(netcp
->ndev_dev
, "Couldn't create rx pool\n");
1599 ret
= PTR_ERR(netcp
->rx_pool
);
1603 snprintf(name
, sizeof(name
), "tx-pool-%s", ndev
->name
);
1604 netcp
->tx_pool
= knav_pool_create(name
, netcp
->tx_pool_size
,
1605 netcp
->tx_pool_region_id
);
1606 if (IS_ERR_OR_NULL(netcp
->tx_pool
)) {
1607 dev_err(netcp
->ndev_dev
, "Couldn't create tx pool\n");
1608 ret
= PTR_ERR(netcp
->tx_pool
);
1612 /* open Tx completion queue */
1613 snprintf(name
, sizeof(name
), "tx-compl-%s", ndev
->name
);
1614 netcp
->tx_compl_q
= knav_queue_open(name
, netcp
->tx_compl_qid
, 0);
1615 if (IS_ERR(netcp
->tx_compl_q
)) {
1616 ret
= PTR_ERR(netcp
->tx_compl_q
);
1619 netcp
->tx_compl_qid
= knav_queue_get_id(netcp
->tx_compl_q
);
1621 /* Set notification for Tx completion */
1622 notify_cfg
.fn
= netcp_tx_notify
;
1623 notify_cfg
.fn_arg
= netcp
;
1624 ret
= knav_queue_device_control(netcp
->tx_compl_q
,
1625 KNAV_QUEUE_SET_NOTIFIER
,
1626 (unsigned long)¬ify_cfg
);
1630 knav_queue_disable_notify(netcp
->tx_compl_q
);
1632 /* open Rx completion queue */
1633 snprintf(name
, sizeof(name
), "rx-compl-%s", ndev
->name
);
1634 netcp
->rx_queue
= knav_queue_open(name
, netcp
->rx_queue_id
, 0);
1635 if (IS_ERR(netcp
->rx_queue
)) {
1636 ret
= PTR_ERR(netcp
->rx_queue
);
1639 netcp
->rx_queue_id
= knav_queue_get_id(netcp
->rx_queue
);
1641 /* Set notification for Rx completion */
1642 notify_cfg
.fn
= netcp_rx_notify
;
1643 notify_cfg
.fn_arg
= netcp
;
1644 ret
= knav_queue_device_control(netcp
->rx_queue
,
1645 KNAV_QUEUE_SET_NOTIFIER
,
1646 (unsigned long)¬ify_cfg
);
1650 knav_queue_disable_notify(netcp
->rx_queue
);
1653 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
&& netcp
->rx_queue_depths
[i
];
1655 snprintf(name
, sizeof(name
), "rx-fdq-%s-%d", ndev
->name
, i
);
1656 netcp
->rx_fdq
[i
] = knav_queue_open(name
, KNAV_QUEUE_GP
, 0);
1657 if (IS_ERR(netcp
->rx_fdq
[i
])) {
1658 ret
= PTR_ERR(netcp
->rx_fdq
[i
]);
1663 memset(&config
, 0, sizeof(config
));
1664 config
.direction
= DMA_DEV_TO_MEM
;
1665 config
.u
.rx
.einfo_present
= true;
1666 config
.u
.rx
.psinfo_present
= true;
1667 config
.u
.rx
.err_mode
= DMA_DROP
;
1668 config
.u
.rx
.desc_type
= DMA_DESC_HOST
;
1669 config
.u
.rx
.psinfo_at_sop
= false;
1670 config
.u
.rx
.sop_offset
= NETCP_SOP_OFFSET
;
1671 config
.u
.rx
.dst_q
= netcp
->rx_queue_id
;
1672 config
.u
.rx
.thresh
= DMA_THRESH_NONE
;
1674 for (i
= 0; i
< KNAV_DMA_FDQ_PER_CHAN
; ++i
) {
1675 if (netcp
->rx_fdq
[i
])
1676 last_fdq
= knav_queue_get_id(netcp
->rx_fdq
[i
]);
1677 config
.u
.rx
.fdq
[i
] = last_fdq
;
1680 netcp
->rx_channel
= knav_dma_open_channel(netcp
->netcp_device
->device
,
1681 netcp
->dma_chan_name
, &config
);
1682 if (IS_ERR(netcp
->rx_channel
)) {
1683 dev_err(netcp
->ndev_dev
, "failed opening rx chan(%s\n",
1684 netcp
->dma_chan_name
);
1685 ret
= PTR_ERR(netcp
->rx_channel
);
1689 dev_dbg(netcp
->ndev_dev
, "opened RX channel: %p\n", netcp
->rx_channel
);
1693 netcp_free_navigator_resources(netcp
);
1697 /* Open the device */
1698 static int netcp_ndo_open(struct net_device
*ndev
)
1700 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1701 struct netcp_intf_modpriv
*intf_modpriv
;
1702 struct netcp_module
*module
;
1705 netif_carrier_off(ndev
);
1706 ret
= netcp_setup_navigator_resources(ndev
);
1708 dev_err(netcp
->ndev_dev
, "Failed to setup navigator resources\n");
1712 for_each_module(netcp
, intf_modpriv
) {
1713 module
= intf_modpriv
->netcp_module
;
1715 ret
= module
->open(intf_modpriv
->module_priv
, ndev
);
1717 dev_err(netcp
->ndev_dev
, "module open failed\n");
1723 napi_enable(&netcp
->rx_napi
);
1724 napi_enable(&netcp
->tx_napi
);
1725 knav_queue_enable_notify(netcp
->tx_compl_q
);
1726 knav_queue_enable_notify(netcp
->rx_queue
);
1727 netcp_rxpool_refill(netcp
);
1728 netif_tx_wake_all_queues(ndev
);
1729 dev_dbg(netcp
->ndev_dev
, "netcp device %s opened\n", ndev
->name
);
1733 for_each_module(netcp
, intf_modpriv
) {
1734 module
= intf_modpriv
->netcp_module
;
1736 module
->close(intf_modpriv
->module_priv
, ndev
);
1740 netcp_free_navigator_resources(netcp
);
1744 /* Close the device */
1745 static int netcp_ndo_stop(struct net_device
*ndev
)
1747 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1748 struct netcp_intf_modpriv
*intf_modpriv
;
1749 struct netcp_module
*module
;
1752 netif_tx_stop_all_queues(ndev
);
1753 netif_carrier_off(ndev
);
1754 netcp_addr_clear_mark(netcp
);
1755 netcp_addr_sweep_del(netcp
);
1756 knav_queue_disable_notify(netcp
->rx_queue
);
1757 knav_queue_disable_notify(netcp
->tx_compl_q
);
1758 napi_disable(&netcp
->rx_napi
);
1759 napi_disable(&netcp
->tx_napi
);
1761 for_each_module(netcp
, intf_modpriv
) {
1762 module
= intf_modpriv
->netcp_module
;
1763 if (module
->close
) {
1764 err
= module
->close(intf_modpriv
->module_priv
, ndev
);
1766 dev_err(netcp
->ndev_dev
, "Close failed\n");
1770 /* Recycle Rx descriptors from completion queue */
1771 netcp_empty_rx_queue(netcp
);
1773 /* Recycle Tx descriptors from completion queue */
1774 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1776 if (knav_pool_count(netcp
->tx_pool
) != netcp
->tx_pool_size
)
1777 dev_err(netcp
->ndev_dev
, "Lost (%d) Tx descs\n",
1778 netcp
->tx_pool_size
- knav_pool_count(netcp
->tx_pool
));
1780 netcp_free_navigator_resources(netcp
);
1781 dev_dbg(netcp
->ndev_dev
, "netcp device %s stopped\n", ndev
->name
);
1785 static int netcp_ndo_ioctl(struct net_device
*ndev
,
1786 struct ifreq
*req
, int cmd
)
1788 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1789 struct netcp_intf_modpriv
*intf_modpriv
;
1790 struct netcp_module
*module
;
1791 int ret
= -1, err
= -EOPNOTSUPP
;
1793 if (!netif_running(ndev
))
1796 for_each_module(netcp
, intf_modpriv
) {
1797 module
= intf_modpriv
->netcp_module
;
1801 err
= module
->ioctl(intf_modpriv
->module_priv
, req
, cmd
);
1802 if ((err
< 0) && (err
!= -EOPNOTSUPP
)) {
1811 return (ret
== 0) ? 0 : err
;
1814 static void netcp_ndo_tx_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1816 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1817 unsigned int descs
= knav_pool_count(netcp
->tx_pool
);
1819 dev_err(netcp
->ndev_dev
, "transmit timed out tx descs(%d)\n", descs
);
1820 netcp_process_tx_compl_packets(netcp
, netcp
->tx_pool_size
);
1821 netif_trans_update(ndev
);
1822 netif_tx_wake_all_queues(ndev
);
1825 static int netcp_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1827 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1828 struct netcp_intf_modpriv
*intf_modpriv
;
1829 struct netcp_module
*module
;
1830 unsigned long flags
;
1833 dev_dbg(netcp
->ndev_dev
, "adding rx vlan id: %d\n", vid
);
1835 spin_lock_irqsave(&netcp
->lock
, flags
);
1836 for_each_module(netcp
, intf_modpriv
) {
1837 module
= intf_modpriv
->netcp_module
;
1838 if ((module
->add_vid
) && (vid
!= 0)) {
1839 err
= module
->add_vid(intf_modpriv
->module_priv
, vid
);
1841 dev_err(netcp
->ndev_dev
, "Could not add vlan id = %d\n",
1847 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1852 static int netcp_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
1854 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1855 struct netcp_intf_modpriv
*intf_modpriv
;
1856 struct netcp_module
*module
;
1857 unsigned long flags
;
1860 dev_dbg(netcp
->ndev_dev
, "removing rx vlan id: %d\n", vid
);
1862 spin_lock_irqsave(&netcp
->lock
, flags
);
1863 for_each_module(netcp
, intf_modpriv
) {
1864 module
= intf_modpriv
->netcp_module
;
1865 if (module
->del_vid
) {
1866 err
= module
->del_vid(intf_modpriv
->module_priv
, vid
);
1868 dev_err(netcp
->ndev_dev
, "Could not delete vlan id = %d\n",
1874 spin_unlock_irqrestore(&netcp
->lock
, flags
);
1878 static int netcp_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1881 struct tc_mqprio_qopt
*mqprio
= type_data
;
1885 /* setup tc must be called under rtnl lock */
1888 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1891 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
1892 num_tc
= mqprio
->num_tc
;
1894 /* Sanity-check the number of traffic classes requested */
1895 if ((dev
->real_num_tx_queues
<= 1) ||
1896 (dev
->real_num_tx_queues
< num_tc
))
1899 /* Configure traffic class to queue mappings */
1901 netdev_set_num_tc(dev
, num_tc
);
1902 for (i
= 0; i
< num_tc
; i
++)
1903 netdev_set_tc_queue(dev
, i
, 1, i
);
1905 netdev_reset_tc(dev
);
1912 netcp_get_stats(struct net_device
*ndev
, struct rtnl_link_stats64
*stats
)
1914 struct netcp_intf
*netcp
= netdev_priv(ndev
);
1915 struct netcp_stats
*p
= &netcp
->stats
;
1916 u64 rxpackets
, rxbytes
, txpackets
, txbytes
;
1920 start
= u64_stats_fetch_begin_irq(&p
->syncp_rx
);
1921 rxpackets
= p
->rx_packets
;
1922 rxbytes
= p
->rx_bytes
;
1923 } while (u64_stats_fetch_retry_irq(&p
->syncp_rx
, start
));
1926 start
= u64_stats_fetch_begin_irq(&p
->syncp_tx
);
1927 txpackets
= p
->tx_packets
;
1928 txbytes
= p
->tx_bytes
;
1929 } while (u64_stats_fetch_retry_irq(&p
->syncp_tx
, start
));
1931 stats
->rx_packets
= rxpackets
;
1932 stats
->rx_bytes
= rxbytes
;
1933 stats
->tx_packets
= txpackets
;
1934 stats
->tx_bytes
= txbytes
;
1936 /* The following are stored as 32 bit */
1937 stats
->rx_errors
= p
->rx_errors
;
1938 stats
->rx_dropped
= p
->rx_dropped
;
1939 stats
->tx_dropped
= p
->tx_dropped
;
1942 static const struct net_device_ops netcp_netdev_ops
= {
1943 .ndo_open
= netcp_ndo_open
,
1944 .ndo_stop
= netcp_ndo_stop
,
1945 .ndo_start_xmit
= netcp_ndo_start_xmit
,
1946 .ndo_set_rx_mode
= netcp_set_rx_mode
,
1947 .ndo_do_ioctl
= netcp_ndo_ioctl
,
1948 .ndo_get_stats64
= netcp_get_stats
,
1949 .ndo_set_mac_address
= eth_mac_addr
,
1950 .ndo_validate_addr
= eth_validate_addr
,
1951 .ndo_vlan_rx_add_vid
= netcp_rx_add_vid
,
1952 .ndo_vlan_rx_kill_vid
= netcp_rx_kill_vid
,
1953 .ndo_tx_timeout
= netcp_ndo_tx_timeout
,
1954 .ndo_select_queue
= dev_pick_tx_zero
,
1955 .ndo_setup_tc
= netcp_setup_tc
,
1958 static int netcp_create_interface(struct netcp_device
*netcp_device
,
1959 struct device_node
*node_interface
)
1961 struct device
*dev
= netcp_device
->device
;
1962 struct device_node
*node
= dev
->of_node
;
1963 struct netcp_intf
*netcp
;
1964 struct net_device
*ndev
;
1965 resource_size_t size
;
1966 struct resource res
;
1967 void __iomem
*efuse
= NULL
;
1969 const void *mac_addr
;
1970 u8 efuse_mac_addr
[6];
1974 ndev
= alloc_etherdev_mqs(sizeof(*netcp
), 1, 1);
1976 dev_err(dev
, "Error allocating netdev\n");
1980 ndev
->features
|= NETIF_F_SG
;
1981 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
1982 ndev
->hw_features
= ndev
->features
;
1983 ndev
->vlan_features
|= NETIF_F_SG
;
1985 /* MTU range: 68 - 9486 */
1986 ndev
->min_mtu
= ETH_MIN_MTU
;
1987 ndev
->max_mtu
= NETCP_MAX_FRAME_SIZE
- (ETH_HLEN
+ ETH_FCS_LEN
);
1989 netcp
= netdev_priv(ndev
);
1990 spin_lock_init(&netcp
->lock
);
1991 INIT_LIST_HEAD(&netcp
->module_head
);
1992 INIT_LIST_HEAD(&netcp
->txhook_list_head
);
1993 INIT_LIST_HEAD(&netcp
->rxhook_list_head
);
1994 INIT_LIST_HEAD(&netcp
->addr_list
);
1995 u64_stats_init(&netcp
->stats
.syncp_rx
);
1996 u64_stats_init(&netcp
->stats
.syncp_tx
);
1997 netcp
->netcp_device
= netcp_device
;
1998 netcp
->dev
= netcp_device
->device
;
2000 netcp
->ndev_dev
= &ndev
->dev
;
2001 netcp
->msg_enable
= netif_msg_init(netcp_debug_level
, NETCP_DEBUG
);
2002 netcp
->tx_pause_threshold
= MAX_SKB_FRAGS
;
2003 netcp
->tx_resume_threshold
= netcp
->tx_pause_threshold
;
2004 netcp
->node_interface
= node_interface
;
2006 ret
= of_property_read_u32(node_interface
, "efuse-mac", &efuse_mac
);
2008 if (of_address_to_resource(node
, NETCP_EFUSE_REG_INDEX
, &res
)) {
2009 dev_err(dev
, "could not find efuse-mac reg resource\n");
2013 size
= resource_size(&res
);
2015 if (!devm_request_mem_region(dev
, res
.start
, size
,
2017 dev_err(dev
, "could not reserve resource\n");
2022 efuse
= devm_ioremap(dev
, res
.start
, size
);
2024 dev_err(dev
, "could not map resource\n");
2025 devm_release_mem_region(dev
, res
.start
, size
);
2030 emac_arch_get_mac_addr(efuse_mac_addr
, efuse
, efuse_mac
);
2031 if (is_valid_ether_addr(efuse_mac_addr
))
2032 ether_addr_copy(ndev
->dev_addr
, efuse_mac_addr
);
2034 eth_random_addr(ndev
->dev_addr
);
2036 devm_iounmap(dev
, efuse
);
2037 devm_release_mem_region(dev
, res
.start
, size
);
2039 mac_addr
= of_get_mac_address(node_interface
);
2040 if (!IS_ERR(mac_addr
))
2041 ether_addr_copy(ndev
->dev_addr
, mac_addr
);
2043 eth_random_addr(ndev
->dev_addr
);
2046 ret
= of_property_read_string(node_interface
, "rx-channel",
2047 &netcp
->dma_chan_name
);
2049 dev_err(dev
, "missing \"rx-channel\" parameter\n");
2054 ret
= of_property_read_u32(node_interface
, "rx-queue",
2055 &netcp
->rx_queue_id
);
2057 dev_warn(dev
, "missing \"rx-queue\" parameter\n");
2058 netcp
->rx_queue_id
= KNAV_QUEUE_QPEND
;
2061 ret
= of_property_read_u32_array(node_interface
, "rx-queue-depth",
2062 netcp
->rx_queue_depths
,
2063 KNAV_DMA_FDQ_PER_CHAN
);
2065 dev_err(dev
, "missing \"rx-queue-depth\" parameter\n");
2066 netcp
->rx_queue_depths
[0] = 128;
2069 ret
= of_property_read_u32_array(node_interface
, "rx-pool", temp
, 2);
2071 dev_err(dev
, "missing \"rx-pool\" parameter\n");
2075 netcp
->rx_pool_size
= temp
[0];
2076 netcp
->rx_pool_region_id
= temp
[1];
2078 ret
= of_property_read_u32_array(node_interface
, "tx-pool", temp
, 2);
2080 dev_err(dev
, "missing \"tx-pool\" parameter\n");
2084 netcp
->tx_pool_size
= temp
[0];
2085 netcp
->tx_pool_region_id
= temp
[1];
2087 if (netcp
->tx_pool_size
< MAX_SKB_FRAGS
) {
2088 dev_err(dev
, "tx-pool size too small, must be atleast(%ld)\n",
2094 ret
= of_property_read_u32(node_interface
, "tx-completion-queue",
2095 &netcp
->tx_compl_qid
);
2097 dev_warn(dev
, "missing \"tx-completion-queue\" parameter\n");
2098 netcp
->tx_compl_qid
= KNAV_QUEUE_QPEND
;
2102 netif_napi_add(ndev
, &netcp
->rx_napi
, netcp_rx_poll
, NETCP_NAPI_WEIGHT
);
2103 netif_tx_napi_add(ndev
, &netcp
->tx_napi
, netcp_tx_poll
, NETCP_NAPI_WEIGHT
);
2105 /* Register the network device */
2107 ndev
->watchdog_timeo
= NETCP_TX_TIMEOUT
;
2108 ndev
->netdev_ops
= &netcp_netdev_ops
;
2109 SET_NETDEV_DEV(ndev
, dev
);
2111 list_add_tail(&netcp
->interface_list
, &netcp_device
->interface_head
);
2119 static void netcp_delete_interface(struct netcp_device
*netcp_device
,
2120 struct net_device
*ndev
)
2122 struct netcp_intf_modpriv
*intf_modpriv
, *tmp
;
2123 struct netcp_intf
*netcp
= netdev_priv(ndev
);
2124 struct netcp_module
*module
;
2126 dev_dbg(netcp_device
->device
, "Removing interface \"%s\"\n",
2129 /* Notify each of the modules that the interface is going away */
2130 list_for_each_entry_safe(intf_modpriv
, tmp
, &netcp
->module_head
,
2132 module
= intf_modpriv
->netcp_module
;
2133 dev_dbg(netcp_device
->device
, "Releasing module \"%s\"\n",
2135 if (module
->release
)
2136 module
->release(intf_modpriv
->module_priv
);
2137 list_del(&intf_modpriv
->intf_list
);
2139 WARN(!list_empty(&netcp
->module_head
), "%s interface module list is not empty!\n",
2142 list_del(&netcp
->interface_list
);
2144 of_node_put(netcp
->node_interface
);
2145 unregister_netdev(ndev
);
2149 static int netcp_probe(struct platform_device
*pdev
)
2151 struct device_node
*node
= pdev
->dev
.of_node
;
2152 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2153 struct device_node
*child
, *interfaces
;
2154 struct netcp_device
*netcp_device
;
2155 struct device
*dev
= &pdev
->dev
;
2156 struct netcp_module
*module
;
2159 if (!knav_dma_device_ready() ||
2160 !knav_qmss_device_ready())
2161 return -EPROBE_DEFER
;
2164 dev_err(dev
, "could not find device info\n");
2168 /* Allocate a new NETCP device instance */
2169 netcp_device
= devm_kzalloc(dev
, sizeof(*netcp_device
), GFP_KERNEL
);
2173 pm_runtime_enable(&pdev
->dev
);
2174 ret
= pm_runtime_get_sync(&pdev
->dev
);
2176 dev_err(dev
, "Failed to enable NETCP power-domain\n");
2177 pm_runtime_disable(&pdev
->dev
);
2181 /* Initialize the NETCP device instance */
2182 INIT_LIST_HEAD(&netcp_device
->interface_head
);
2183 INIT_LIST_HEAD(&netcp_device
->modpriv_head
);
2184 netcp_device
->device
= dev
;
2185 platform_set_drvdata(pdev
, netcp_device
);
2187 /* create interfaces */
2188 interfaces
= of_get_child_by_name(node
, "netcp-interfaces");
2190 dev_err(dev
, "could not find netcp-interfaces node\n");
2195 for_each_available_child_of_node(interfaces
, child
) {
2196 ret
= netcp_create_interface(netcp_device
, child
);
2198 dev_err(dev
, "could not create interface(%pOFn)\n",
2200 goto probe_quit_interface
;
2204 of_node_put(interfaces
);
2206 /* Add the device instance to the list */
2207 list_add_tail(&netcp_device
->device_list
, &netcp_devices
);
2209 /* Probe & attach any modules already registered */
2210 mutex_lock(&netcp_modules_lock
);
2211 for_each_netcp_module(module
) {
2212 ret
= netcp_module_probe(netcp_device
, module
);
2214 dev_err(dev
, "module(%s) probe failed\n", module
->name
);
2216 mutex_unlock(&netcp_modules_lock
);
2219 probe_quit_interface
:
2220 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2221 &netcp_device
->interface_head
,
2223 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2226 of_node_put(interfaces
);
2229 pm_runtime_put_sync(&pdev
->dev
);
2230 pm_runtime_disable(&pdev
->dev
);
2231 platform_set_drvdata(pdev
, NULL
);
2235 static int netcp_remove(struct platform_device
*pdev
)
2237 struct netcp_device
*netcp_device
= platform_get_drvdata(pdev
);
2238 struct netcp_intf
*netcp_intf
, *netcp_tmp
;
2239 struct netcp_inst_modpriv
*inst_modpriv
, *tmp
;
2240 struct netcp_module
*module
;
2242 list_for_each_entry_safe(inst_modpriv
, tmp
, &netcp_device
->modpriv_head
,
2244 module
= inst_modpriv
->netcp_module
;
2245 dev_dbg(&pdev
->dev
, "Removing module \"%s\"\n", module
->name
);
2246 module
->remove(netcp_device
, inst_modpriv
->module_priv
);
2247 list_del(&inst_modpriv
->inst_list
);
2250 /* now that all modules are removed, clean up the interfaces */
2251 list_for_each_entry_safe(netcp_intf
, netcp_tmp
,
2252 &netcp_device
->interface_head
,
2254 netcp_delete_interface(netcp_device
, netcp_intf
->ndev
);
2257 WARN(!list_empty(&netcp_device
->interface_head
),
2258 "%s interface list not empty!\n", pdev
->name
);
2260 pm_runtime_put_sync(&pdev
->dev
);
2261 pm_runtime_disable(&pdev
->dev
);
2262 platform_set_drvdata(pdev
, NULL
);
2266 static const struct of_device_id of_match
[] = {
2267 { .compatible
= "ti,netcp-1.0", },
2270 MODULE_DEVICE_TABLE(of
, of_match
);
2272 static struct platform_driver netcp_driver
= {
2274 .name
= "netcp-1.0",
2275 .of_match_table
= of_match
,
2277 .probe
= netcp_probe
,
2278 .remove
= netcp_remove
,
2280 module_platform_driver(netcp_driver
);
2282 MODULE_LICENSE("GPL v2");
2283 MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
2284 MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");