2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2007 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
23 #include <asm/octeon/octeon.h>
25 #include "ethernet-defines.h"
26 #include "octeon-ethernet.h"
27 #include "ethernet-mem.h"
28 #include "ethernet-rx.h"
29 #include "ethernet-tx.h"
30 #include "ethernet-mdio.h"
31 #include "ethernet-util.h"
33 #include <asm/octeon/cvmx-pip.h>
34 #include <asm/octeon/cvmx-pko.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-ipd.h>
37 #include <asm/octeon/cvmx-helper.h>
39 #include <asm/octeon/cvmx-gmxx-defs.h>
40 #include <asm/octeon/cvmx-smix-defs.h>
42 static int num_packet_buffers
= 1024;
43 module_param(num_packet_buffers
, int, 0444);
44 MODULE_PARM_DESC(num_packet_buffers
, "\n"
45 "\tNumber of packet buffers to allocate and store in the\n"
46 "\tFPA. By default, 1024 packet buffers are used.\n");
48 int pow_receive_group
= 15;
49 module_param(pow_receive_group
, int, 0444);
50 MODULE_PARM_DESC(pow_receive_group
, "\n"
51 "\tPOW group to receive packets from. All ethernet hardware\n"
52 "\twill be configured to send incoming packets to this POW\n"
53 "\tgroup. Also any other software can submit packets to this\n"
54 "\tgroup for the kernel to process.");
56 int pow_send_group
= -1;
57 module_param(pow_send_group
, int, 0644);
58 MODULE_PARM_DESC(pow_send_group
, "\n"
59 "\tPOW group to send packets to other software on. This\n"
60 "\tcontrols the creation of the virtual device pow0.\n"
61 "\talways_use_pow also depends on this value.");
64 module_param(always_use_pow
, int, 0444);
65 MODULE_PARM_DESC(always_use_pow
, "\n"
66 "\tWhen set, always send to the pow group. This will cause\n"
67 "\tpackets sent to real ethernet devices to be sent to the\n"
68 "\tPOW group instead of the hardware. Unless some other\n"
69 "\tapplication changes the config, packets will still be\n"
70 "\treceived from the low level hardware. Use this option\n"
71 "\tto allow a CVMX app to intercept all packets from the\n"
72 "\tlinux kernel. You must specify pow_send_group along with\n"
75 char pow_send_list
[128] = "";
76 module_param_string(pow_send_list
, pow_send_list
, sizeof(pow_send_list
), 0444);
77 MODULE_PARM_DESC(pow_send_list
, "\n"
78 "\tComma separated list of ethernet devices that should use the\n"
79 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 "\tis a per port version of always_use_pow. always_use_pow takes\n"
81 "\tprecedence over this list. For example, setting this to\n"
82 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 "\tusing the pow_send_group.");
85 int rx_napi_weight
= 32;
86 module_param(rx_napi_weight
, int, 0444);
87 MODULE_PARM_DESC(rx_napi_weight
, "The NAPI WEIGHT parameter.");
90 * cvm_oct_poll_queue - Workqueue for polling operations.
92 struct workqueue_struct
*cvm_oct_poll_queue
;
95 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
97 * Set to one right before cvm_oct_poll_queue is destroyed.
99 atomic_t cvm_oct_poll_queue_stopping
= ATOMIC_INIT(0);
102 * Array of every ethernet device owned by this driver indexed by
103 * the ipd input port number.
105 struct net_device
*cvm_oct_device
[TOTAL_NUMBER_OF_PORTS
];
107 u64 cvm_oct_tx_poll_interval
;
109 static void cvm_oct_rx_refill_worker(struct work_struct
*work
);
110 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work
, cvm_oct_rx_refill_worker
);
112 static void cvm_oct_rx_refill_worker(struct work_struct
*work
)
115 * FPA 0 may have been drained, try to refill it if we need
116 * more than num_packet_buffers / 2, otherwise normal receive
117 * processing will refill it. If it were drained, no packets
118 * could be received so cvm_oct_napi_poll would never be
119 * invoked to do the refill.
121 cvm_oct_rx_refill_pool(num_packet_buffers
/ 2);
123 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
124 queue_delayed_work(cvm_oct_poll_queue
,
125 &cvm_oct_rx_refill_work
, HZ
);
128 static void cvm_oct_periodic_worker(struct work_struct
*work
)
130 struct octeon_ethernet
*priv
= container_of(work
,
131 struct octeon_ethernet
,
132 port_periodic_work
.work
);
135 priv
->poll(cvm_oct_device
[priv
->port
]);
137 cvm_oct_device
[priv
->port
]->netdev_ops
->ndo_get_stats(
138 cvm_oct_device
[priv
->port
]);
140 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
141 queue_delayed_work(cvm_oct_poll_queue
,
142 &priv
->port_periodic_work
, HZ
);
145 static void cvm_oct_configure_common_hw(void)
149 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
151 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
153 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
154 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
155 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 1024);
157 #ifdef __LITTLE_ENDIAN
159 union cvmx_ipd_ctl_status ipd_ctl_status
;
161 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
162 ipd_ctl_status
.s
.pkt_lend
= 1;
163 ipd_ctl_status
.s
.wqe_lend
= 1;
164 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
168 cvmx_helper_setup_red(num_packet_buffers
/ 4, num_packet_buffers
/ 8);
172 * cvm_oct_free_work- Free a work queue entry
174 * @work_queue_entry: Work queue entry to free
176 * Returns Zero on success, Negative on failure.
178 int cvm_oct_free_work(void *work_queue_entry
)
180 cvmx_wqe_t
*work
= work_queue_entry
;
182 int segments
= work
->word2
.s
.bufs
;
183 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
186 union cvmx_buf_ptr next_ptr
= *(union cvmx_buf_ptr
*)
187 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
188 if (unlikely(!segment_ptr
.s
.i
))
189 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr
),
191 CVMX_FPA_PACKET_POOL_SIZE
/ 128);
192 segment_ptr
= next_ptr
;
194 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, 1);
198 EXPORT_SYMBOL(cvm_oct_free_work
);
201 * cvm_oct_common_get_stats - get the low level ethernet statistics
202 * @dev: Device to get the statistics from
204 * Returns Pointer to the statistics
206 static struct net_device_stats
*cvm_oct_common_get_stats(struct net_device
*dev
)
208 cvmx_pip_port_status_t rx_status
;
209 cvmx_pko_port_status_t tx_status
;
210 struct octeon_ethernet
*priv
= netdev_priv(dev
);
212 if (priv
->port
< CVMX_PIP_NUM_INPUT_PORTS
) {
213 if (octeon_is_simulation()) {
214 /* The simulator doesn't support statistics */
215 memset(&rx_status
, 0, sizeof(rx_status
));
216 memset(&tx_status
, 0, sizeof(tx_status
));
218 cvmx_pip_get_port_status(priv
->port
, 1, &rx_status
);
219 cvmx_pko_get_port_status(priv
->port
, 1, &tx_status
);
222 priv
->stats
.rx_packets
+= rx_status
.inb_packets
;
223 priv
->stats
.tx_packets
+= tx_status
.packets
;
224 priv
->stats
.rx_bytes
+= rx_status
.inb_octets
;
225 priv
->stats
.tx_bytes
+= tx_status
.octets
;
226 priv
->stats
.multicast
+= rx_status
.multicast_packets
;
227 priv
->stats
.rx_crc_errors
+= rx_status
.inb_errors
;
228 priv
->stats
.rx_frame_errors
+= rx_status
.fcs_align_err_packets
;
231 * The drop counter must be incremented atomically
232 * since the RX tasklet also increments it.
235 atomic64_add(rx_status
.dropped_packets
,
236 (atomic64_t
*)&priv
->stats
.rx_dropped
);
238 atomic_add(rx_status
.dropped_packets
,
239 (atomic_t
*)&priv
->stats
.rx_dropped
);
247 * cvm_oct_common_change_mtu - change the link MTU
248 * @dev: Device to change
249 * @new_mtu: The new MTU
251 * Returns Zero on success
253 static int cvm_oct_common_change_mtu(struct net_device
*dev
, int new_mtu
)
255 struct octeon_ethernet
*priv
= netdev_priv(dev
);
256 int interface
= INTERFACE(priv
->port
);
257 int index
= INDEX(priv
->port
);
258 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
265 * Limit the MTU to make sure the ethernet packets are between
266 * 64 bytes and 65535 bytes.
268 if ((new_mtu
+ 14 + 4 + vlan_bytes
< 64)
269 || (new_mtu
+ 14 + 4 + vlan_bytes
> 65392)) {
270 pr_err("MTU must be between %d and %d.\n",
271 64 - 14 - 4 - vlan_bytes
, 65392 - 14 - 4 - vlan_bytes
);
277 && (cvmx_helper_interface_get_mode(interface
) !=
278 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
279 /* Add ethernet header and FCS, and VLAN if configured. */
280 int max_packet
= new_mtu
+ 14 + 4 + vlan_bytes
;
282 if (OCTEON_IS_MODEL(OCTEON_CN3XXX
)
283 || OCTEON_IS_MODEL(OCTEON_CN58XX
)) {
284 /* Signal errors on packets larger than the MTU */
285 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index
, interface
),
289 * Set the hardware to truncate packets larger
290 * than the MTU and smaller the 64 bytes.
292 union cvmx_pip_frm_len_chkx frm_len_chk
;
295 frm_len_chk
.s
.minlen
= 64;
296 frm_len_chk
.s
.maxlen
= max_packet
;
297 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface
),
301 * Set the hardware to truncate packets larger than
302 * the MTU. The jabber register must be set to a
303 * multiple of 8 bytes, so round up.
305 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index
, interface
),
306 (max_packet
+ 7) & ~7u);
312 * cvm_oct_common_set_multicast_list - set the multicast list
313 * @dev: Device to work on
315 static void cvm_oct_common_set_multicast_list(struct net_device
*dev
)
317 union cvmx_gmxx_prtx_cfg gmx_cfg
;
318 struct octeon_ethernet
*priv
= netdev_priv(dev
);
319 int interface
= INTERFACE(priv
->port
);
320 int index
= INDEX(priv
->port
);
323 && (cvmx_helper_interface_get_mode(interface
) !=
324 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
325 union cvmx_gmxx_rxx_adr_ctl control
;
328 control
.s
.bcst
= 1; /* Allow broadcast MAC addresses */
330 if (!netdev_mc_empty(dev
) || (dev
->flags
& IFF_ALLMULTI
) ||
331 (dev
->flags
& IFF_PROMISC
))
332 /* Force accept multicast packets */
335 /* Force reject multicast packets */
338 if (dev
->flags
& IFF_PROMISC
)
340 * Reject matches if promisc. Since CAM is
341 * shut off, should accept everything.
343 control
.s
.cam_mode
= 0;
345 /* Filter packets based on the CAM */
346 control
.s
.cam_mode
= 1;
349 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
350 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
351 gmx_cfg
.u64
& ~1ull);
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index
, interface
),
355 if (dev
->flags
& IFF_PROMISC
)
356 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 (index
, interface
), 0);
359 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 (index
, interface
), 1);
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
367 static int cvm_oct_set_mac_filter(struct net_device
*dev
)
369 struct octeon_ethernet
*priv
= netdev_priv(dev
);
370 union cvmx_gmxx_prtx_cfg gmx_cfg
;
371 int interface
= INTERFACE(priv
->port
);
372 int index
= INDEX(priv
->port
);
375 && (cvmx_helper_interface_get_mode(interface
) !=
376 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
378 u8
*ptr
= dev
->dev_addr
;
381 for (i
= 0; i
< 6; i
++)
382 mac
= (mac
<< 8) | (u64
)ptr
[i
];
385 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
386 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
387 gmx_cfg
.u64
& ~1ull);
389 cvmx_write_csr(CVMX_GMXX_SMACX(index
, interface
), mac
);
390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index
, interface
),
392 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index
, interface
),
394 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index
, interface
),
396 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index
, interface
),
398 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index
, interface
),
400 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index
, interface
),
402 cvm_oct_common_set_multicast_list(dev
);
403 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
410 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
411 * @dev: The device in question.
412 * @addr: Socket address.
414 * Returns Zero on success
416 static int cvm_oct_common_set_mac_address(struct net_device
*dev
, void *addr
)
418 int r
= eth_mac_addr(dev
, addr
);
422 return cvm_oct_set_mac_filter(dev
);
426 * cvm_oct_common_init - per network device initialization
427 * @dev: Device to initialize
429 * Returns Zero on success
431 int cvm_oct_common_init(struct net_device
*dev
)
433 struct octeon_ethernet
*priv
= netdev_priv(dev
);
434 const u8
*mac
= NULL
;
437 mac
= of_get_mac_address(priv
->of_node
);
440 ether_addr_copy(dev
->dev_addr
, mac
);
442 eth_hw_addr_random(dev
);
445 * Force the interface to use the POW send if always_use_pow
446 * was specified or it is in the pow send list.
448 if ((pow_send_group
!= -1)
449 && (always_use_pow
|| strstr(pow_send_list
, dev
->name
)))
452 if (priv
->queue
!= -1)
453 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
455 /* We do our own locking, Linux doesn't need to */
456 dev
->features
|= NETIF_F_LLTX
;
457 dev
->ethtool_ops
= &cvm_oct_ethtool_ops
;
459 cvm_oct_set_mac_filter(dev
);
460 dev
->netdev_ops
->ndo_change_mtu(dev
, dev
->mtu
);
463 * Zero out stats for port so we won't mistakenly show
464 * counters from the bootloader.
466 memset(dev
->netdev_ops
->ndo_get_stats(dev
), 0,
467 sizeof(struct net_device_stats
));
469 if (dev
->netdev_ops
->ndo_stop
)
470 dev
->netdev_ops
->ndo_stop(dev
);
475 void cvm_oct_common_uninit(struct net_device
*dev
)
477 struct octeon_ethernet
*priv
= netdev_priv(dev
);
480 phy_disconnect(priv
->phydev
);
483 int cvm_oct_common_open(struct net_device
*dev
,
484 void (*link_poll
)(struct net_device
*))
486 union cvmx_gmxx_prtx_cfg gmx_cfg
;
487 struct octeon_ethernet
*priv
= netdev_priv(dev
);
488 int interface
= INTERFACE(priv
->port
);
489 int index
= INDEX(priv
->port
);
490 cvmx_helper_link_info_t link_info
;
493 rv
= cvm_oct_phy_setup_device(dev
);
497 gmx_cfg
.u64
= cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
499 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
), gmx_cfg
.u64
);
501 if (octeon_is_simulation())
505 int r
= phy_read_status(priv
->phydev
);
507 if (r
== 0 && priv
->phydev
->link
== 0)
508 netif_carrier_off(dev
);
509 cvm_oct_adjust_link(dev
);
511 link_info
= cvmx_helper_link_get(priv
->port
);
512 if (!link_info
.s
.link_up
)
513 netif_carrier_off(dev
);
514 priv
->poll
= link_poll
;
521 void cvm_oct_link_poll(struct net_device
*dev
)
523 struct octeon_ethernet
*priv
= netdev_priv(dev
);
524 cvmx_helper_link_info_t link_info
;
526 link_info
= cvmx_helper_link_get(priv
->port
);
527 if (link_info
.u64
== priv
->link_info
)
530 link_info
= cvmx_helper_link_autoconf(priv
->port
);
531 priv
->link_info
= link_info
.u64
;
533 if (link_info
.s
.link_up
) {
534 if (!netif_carrier_ok(dev
))
535 netif_carrier_on(dev
);
536 } else if (netif_carrier_ok(dev
)) {
537 netif_carrier_off(dev
);
539 cvm_oct_note_carrier(priv
, link_info
);
542 static int cvm_oct_xaui_open(struct net_device
*dev
)
544 return cvm_oct_common_open(dev
, cvm_oct_link_poll
);
547 static const struct net_device_ops cvm_oct_npi_netdev_ops
= {
548 .ndo_init
= cvm_oct_common_init
,
549 .ndo_uninit
= cvm_oct_common_uninit
,
550 .ndo_start_xmit
= cvm_oct_xmit
,
551 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
552 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
553 .ndo_do_ioctl
= cvm_oct_ioctl
,
554 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
555 .ndo_get_stats
= cvm_oct_common_get_stats
,
556 #ifdef CONFIG_NET_POLL_CONTROLLER
557 .ndo_poll_controller
= cvm_oct_poll_controller
,
560 static const struct net_device_ops cvm_oct_xaui_netdev_ops
= {
561 .ndo_init
= cvm_oct_common_init
,
562 .ndo_uninit
= cvm_oct_common_uninit
,
563 .ndo_open
= cvm_oct_xaui_open
,
564 .ndo_stop
= cvm_oct_common_stop
,
565 .ndo_start_xmit
= cvm_oct_xmit
,
566 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
567 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
568 .ndo_do_ioctl
= cvm_oct_ioctl
,
569 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
570 .ndo_get_stats
= cvm_oct_common_get_stats
,
571 #ifdef CONFIG_NET_POLL_CONTROLLER
572 .ndo_poll_controller
= cvm_oct_poll_controller
,
575 static const struct net_device_ops cvm_oct_sgmii_netdev_ops
= {
576 .ndo_init
= cvm_oct_sgmii_init
,
577 .ndo_uninit
= cvm_oct_common_uninit
,
578 .ndo_open
= cvm_oct_sgmii_open
,
579 .ndo_stop
= cvm_oct_common_stop
,
580 .ndo_start_xmit
= cvm_oct_xmit
,
581 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
582 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
583 .ndo_do_ioctl
= cvm_oct_ioctl
,
584 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
585 .ndo_get_stats
= cvm_oct_common_get_stats
,
586 #ifdef CONFIG_NET_POLL_CONTROLLER
587 .ndo_poll_controller
= cvm_oct_poll_controller
,
590 static const struct net_device_ops cvm_oct_spi_netdev_ops
= {
591 .ndo_init
= cvm_oct_spi_init
,
592 .ndo_uninit
= cvm_oct_spi_uninit
,
593 .ndo_start_xmit
= cvm_oct_xmit
,
594 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
595 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
596 .ndo_do_ioctl
= cvm_oct_ioctl
,
597 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
598 .ndo_get_stats
= cvm_oct_common_get_stats
,
599 #ifdef CONFIG_NET_POLL_CONTROLLER
600 .ndo_poll_controller
= cvm_oct_poll_controller
,
603 static const struct net_device_ops cvm_oct_rgmii_netdev_ops
= {
604 .ndo_init
= cvm_oct_rgmii_init
,
605 .ndo_uninit
= cvm_oct_rgmii_uninit
,
606 .ndo_open
= cvm_oct_rgmii_open
,
607 .ndo_stop
= cvm_oct_common_stop
,
608 .ndo_start_xmit
= cvm_oct_xmit
,
609 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
610 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
611 .ndo_do_ioctl
= cvm_oct_ioctl
,
612 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
613 .ndo_get_stats
= cvm_oct_common_get_stats
,
614 #ifdef CONFIG_NET_POLL_CONTROLLER
615 .ndo_poll_controller
= cvm_oct_poll_controller
,
618 static const struct net_device_ops cvm_oct_pow_netdev_ops
= {
619 .ndo_init
= cvm_oct_common_init
,
620 .ndo_start_xmit
= cvm_oct_xmit_pow
,
621 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
622 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
623 .ndo_do_ioctl
= cvm_oct_ioctl
,
624 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
625 .ndo_get_stats
= cvm_oct_common_get_stats
,
626 #ifdef CONFIG_NET_POLL_CONTROLLER
627 .ndo_poll_controller
= cvm_oct_poll_controller
,
631 static struct device_node
*cvm_oct_of_get_child(
632 const struct device_node
*parent
, int reg_val
)
634 struct device_node
*node
= NULL
;
639 node
= of_get_next_child(parent
, node
);
642 addr
= of_get_property(node
, "reg", &size
);
643 if (addr
&& (be32_to_cpu(*addr
) == reg_val
))
649 static struct device_node
*cvm_oct_node_for_port(struct device_node
*pip
,
650 int interface
, int port
)
652 struct device_node
*ni
, *np
;
654 ni
= cvm_oct_of_get_child(pip
, interface
);
658 np
= cvm_oct_of_get_child(ni
, port
);
664 static int cvm_oct_probe(struct platform_device
*pdev
)
668 int fau
= FAU_NUM_PACKET_BUFFERS_TO_FREE
;
670 struct device_node
*pip
;
672 octeon_mdiobus_force_mod_depencency();
674 pip
= pdev
->dev
.of_node
;
676 pr_err("Error: No 'pip' in /aliases\n");
680 cvm_oct_poll_queue
= create_singlethread_workqueue("octeon-ethernet");
681 if (!cvm_oct_poll_queue
) {
682 pr_err("octeon-ethernet: Cannot create workqueue");
686 cvm_oct_configure_common_hw();
688 cvmx_helper_initialize_packet_io_global();
690 /* Change the input group for all ports before input is enabled */
691 num_interfaces
= cvmx_helper_get_number_of_interfaces();
692 for (interface
= 0; interface
< num_interfaces
; interface
++) {
693 int num_ports
= cvmx_helper_ports_on_interface(interface
);
696 for (port
= cvmx_helper_get_ipd_port(interface
, 0);
697 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
699 union cvmx_pip_prt_tagx pip_prt_tagx
;
702 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port
));
703 pip_prt_tagx
.s
.grp
= pow_receive_group
;
704 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port
),
709 cvmx_helper_ipd_and_packet_input_enable();
711 memset(cvm_oct_device
, 0, sizeof(cvm_oct_device
));
714 * Initialize the FAU used for counting packet buffers that
717 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
719 /* Initialize the FAU used for counting tx SKBs that need to be freed */
720 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN
, 0);
722 if ((pow_send_group
!= -1)) {
723 struct net_device
*dev
;
725 pr_info("\tConfiguring device for POW only access\n");
726 dev
= alloc_etherdev(sizeof(struct octeon_ethernet
));
728 /* Initialize the device private structure. */
729 struct octeon_ethernet
*priv
= netdev_priv(dev
);
731 dev
->netdev_ops
= &cvm_oct_pow_netdev_ops
;
732 priv
->imode
= CVMX_HELPER_INTERFACE_MODE_DISABLED
;
733 priv
->port
= CVMX_PIP_NUM_INPUT_PORTS
;
735 strcpy(dev
->name
, "pow%d");
736 for (qos
= 0; qos
< 16; qos
++)
737 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
739 if (register_netdev(dev
) < 0) {
740 pr_err("Failed to register ethernet device for POW\n");
743 cvm_oct_device
[CVMX_PIP_NUM_INPUT_PORTS
] = dev
;
744 pr_info("%s: POW send group %d, receive group %d\n",
745 dev
->name
, pow_send_group
,
749 pr_err("Failed to allocate ethernet device for POW\n");
753 num_interfaces
= cvmx_helper_get_number_of_interfaces();
754 for (interface
= 0; interface
< num_interfaces
; interface
++) {
755 cvmx_helper_interface_mode_t imode
=
756 cvmx_helper_interface_get_mode(interface
);
757 int num_ports
= cvmx_helper_ports_on_interface(interface
);
762 port
= cvmx_helper_get_ipd_port(interface
, 0);
763 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
764 port_index
++, port
++) {
765 struct octeon_ethernet
*priv
;
766 struct net_device
*dev
=
767 alloc_etherdev(sizeof(struct octeon_ethernet
));
769 pr_err("Failed to allocate ethernet device for port %d\n",
774 /* Initialize the device private structure. */
775 priv
= netdev_priv(dev
);
777 priv
->of_node
= cvm_oct_node_for_port(pip
, interface
,
780 INIT_DELAYED_WORK(&priv
->port_periodic_work
,
781 cvm_oct_periodic_worker
);
784 priv
->queue
= cvmx_pko_get_base_queue(priv
->port
);
785 priv
->fau
= fau
- cvmx_pko_get_num_queues(port
) * 4;
786 for (qos
= 0; qos
< 16; qos
++)
787 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
788 for (qos
= 0; qos
< cvmx_pko_get_num_queues(port
);
790 cvmx_fau_atomic_write32(priv
->fau
+ qos
* 4, 0);
792 switch (priv
->imode
) {
794 /* These types don't support ports to IPD/PKO */
795 case CVMX_HELPER_INTERFACE_MODE_DISABLED
:
796 case CVMX_HELPER_INTERFACE_MODE_PCIE
:
797 case CVMX_HELPER_INTERFACE_MODE_PICMG
:
800 case CVMX_HELPER_INTERFACE_MODE_NPI
:
801 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
802 strcpy(dev
->name
, "npi%d");
805 case CVMX_HELPER_INTERFACE_MODE_XAUI
:
806 dev
->netdev_ops
= &cvm_oct_xaui_netdev_ops
;
807 strcpy(dev
->name
, "xaui%d");
810 case CVMX_HELPER_INTERFACE_MODE_LOOP
:
811 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
812 strcpy(dev
->name
, "loop%d");
815 case CVMX_HELPER_INTERFACE_MODE_SGMII
:
816 dev
->netdev_ops
= &cvm_oct_sgmii_netdev_ops
;
817 strcpy(dev
->name
, "eth%d");
820 case CVMX_HELPER_INTERFACE_MODE_SPI
:
821 dev
->netdev_ops
= &cvm_oct_spi_netdev_ops
;
822 strcpy(dev
->name
, "spi%d");
825 case CVMX_HELPER_INTERFACE_MODE_RGMII
:
826 case CVMX_HELPER_INTERFACE_MODE_GMII
:
827 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
828 strcpy(dev
->name
, "eth%d");
832 if (!dev
->netdev_ops
) {
834 } else if (register_netdev(dev
) < 0) {
835 pr_err("Failed to register ethernet device for interface %d, port %d\n",
836 interface
, priv
->port
);
839 cvm_oct_device
[priv
->port
] = dev
;
841 cvmx_pko_get_num_queues(priv
->port
) *
843 queue_delayed_work(cvm_oct_poll_queue
,
844 &priv
->port_periodic_work
, HZ
);
849 cvm_oct_tx_initialize();
850 cvm_oct_rx_initialize();
853 * 150 uS: about 10 1500-byte packets at 1GE.
855 cvm_oct_tx_poll_interval
= 150 * (octeon_get_clock_rate() / 1000000);
857 queue_delayed_work(cvm_oct_poll_queue
, &cvm_oct_rx_refill_work
, HZ
);
862 static int cvm_oct_remove(struct platform_device
*pdev
)
866 /* Disable POW interrupt */
867 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
868 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group
), 0);
870 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group
), 0);
874 /* Free the interrupt handler */
875 free_irq(OCTEON_IRQ_WORKQ0
+ pow_receive_group
, cvm_oct_device
);
877 atomic_inc_return(&cvm_oct_poll_queue_stopping
);
878 cancel_delayed_work_sync(&cvm_oct_rx_refill_work
);
880 cvm_oct_rx_shutdown();
881 cvm_oct_tx_shutdown();
885 /* Free the ethernet devices */
886 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
887 if (cvm_oct_device
[port
]) {
888 struct net_device
*dev
= cvm_oct_device
[port
];
889 struct octeon_ethernet
*priv
= netdev_priv(dev
);
891 cancel_delayed_work_sync(&priv
->port_periodic_work
);
893 cvm_oct_tx_shutdown_dev(dev
);
894 unregister_netdev(dev
);
896 cvm_oct_device
[port
] = NULL
;
900 destroy_workqueue(cvm_oct_poll_queue
);
906 /* Free the HW pools */
907 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
909 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
911 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
912 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
913 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
917 static const struct of_device_id cvm_oct_match
[] = {
919 .compatible
= "cavium,octeon-3860-pip",
923 MODULE_DEVICE_TABLE(of
, cvm_oct_match
);
925 static struct platform_driver cvm_oct_driver
= {
926 .probe
= cvm_oct_probe
,
927 .remove
= cvm_oct_remove
,
929 .name
= KBUILD_MODNAME
,
930 .of_match_table
= cvm_oct_match
,
934 module_platform_driver(cvm_oct_driver
);
936 MODULE_LICENSE("GPL");
937 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
938 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");