2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2007 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
26 #include <asm/octeon/octeon.h>
28 #include "ethernet-defines.h"
29 #include "octeon-ethernet.h"
30 #include "ethernet-mem.h"
31 #include "ethernet-rx.h"
32 #include "ethernet-tx.h"
33 #include "ethernet-mdio.h"
34 #include "ethernet-util.h"
36 #include <asm/octeon/cvmx-pip.h>
37 #include <asm/octeon/cvmx-pko.h>
38 #include <asm/octeon/cvmx-fau.h>
39 #include <asm/octeon/cvmx-ipd.h>
40 #include <asm/octeon/cvmx-helper.h>
41 #include <asm/octeon/cvmx-asxx-defs.h>
42 #include <asm/octeon/cvmx-gmxx-defs.h>
43 #include <asm/octeon/cvmx-smix-defs.h>
45 #define OCTEON_MAX_MTU 65392
47 static int num_packet_buffers
= 1024;
48 module_param(num_packet_buffers
, int, 0444);
49 MODULE_PARM_DESC(num_packet_buffers
, "\n"
50 "\tNumber of packet buffers to allocate and store in the\n"
51 "\tFPA. By default, 1024 packet buffers are used.\n");
53 static int pow_receive_group
= 15;
54 module_param(pow_receive_group
, int, 0444);
55 MODULE_PARM_DESC(pow_receive_group
, "\n"
56 "\tPOW group to receive packets from. All ethernet hardware\n"
57 "\twill be configured to send incoming packets to this POW\n"
58 "\tgroup. Also any other software can submit packets to this\n"
59 "\tgroup for the kernel to process.");
61 static int receive_group_order
;
62 module_param(receive_group_order
, int, 0444);
63 MODULE_PARM_DESC(receive_group_order
, "\n"
64 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
65 "\twill be configured to send incoming packets to multiple POW\n"
66 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
67 "\tgroups are taken into use and groups are allocated starting\n"
68 "\tfrom 0. By default, a single group is used.\n");
70 int pow_send_group
= -1;
71 module_param(pow_send_group
, int, 0644);
72 MODULE_PARM_DESC(pow_send_group
, "\n"
73 "\tPOW group to send packets to other software on. This\n"
74 "\tcontrols the creation of the virtual device pow0.\n"
75 "\talways_use_pow also depends on this value.");
78 module_param(always_use_pow
, int, 0444);
79 MODULE_PARM_DESC(always_use_pow
, "\n"
80 "\tWhen set, always send to the pow group. This will cause\n"
81 "\tpackets sent to real ethernet devices to be sent to the\n"
82 "\tPOW group instead of the hardware. Unless some other\n"
83 "\tapplication changes the config, packets will still be\n"
84 "\treceived from the low level hardware. Use this option\n"
85 "\tto allow a CVMX app to intercept all packets from the\n"
86 "\tlinux kernel. You must specify pow_send_group along with\n"
89 char pow_send_list
[128] = "";
90 module_param_string(pow_send_list
, pow_send_list
, sizeof(pow_send_list
), 0444);
91 MODULE_PARM_DESC(pow_send_list
, "\n"
92 "\tComma separated list of ethernet devices that should use the\n"
93 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
94 "\tis a per port version of always_use_pow. always_use_pow takes\n"
95 "\tprecedence over this list. For example, setting this to\n"
96 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
97 "\tusing the pow_send_group.");
99 int rx_napi_weight
= 32;
100 module_param(rx_napi_weight
, int, 0444);
101 MODULE_PARM_DESC(rx_napi_weight
, "The NAPI WEIGHT parameter.");
103 /* Mask indicating which receive groups are in use. */
104 int pow_receive_groups
;
107 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
109 * Set to one right before cvm_oct_poll_queue is destroyed.
111 atomic_t cvm_oct_poll_queue_stopping
= ATOMIC_INIT(0);
114 * Array of every ethernet device owned by this driver indexed by
115 * the ipd input port number.
117 struct net_device
*cvm_oct_device
[TOTAL_NUMBER_OF_PORTS
];
119 u64 cvm_oct_tx_poll_interval
;
121 static void cvm_oct_rx_refill_worker(struct work_struct
*work
);
122 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work
, cvm_oct_rx_refill_worker
);
124 static void cvm_oct_rx_refill_worker(struct work_struct
*work
)
127 * FPA 0 may have been drained, try to refill it if we need
128 * more than num_packet_buffers / 2, otherwise normal receive
129 * processing will refill it. If it were drained, no packets
130 * could be received so cvm_oct_napi_poll would never be
131 * invoked to do the refill.
133 cvm_oct_rx_refill_pool(num_packet_buffers
/ 2);
135 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
136 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
139 static void cvm_oct_periodic_worker(struct work_struct
*work
)
141 struct octeon_ethernet
*priv
= container_of(work
,
142 struct octeon_ethernet
,
143 port_periodic_work
.work
);
146 priv
->poll(cvm_oct_device
[priv
->port
]);
148 cvm_oct_device
[priv
->port
]->netdev_ops
->ndo_get_stats(
149 cvm_oct_device
[priv
->port
]);
151 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
152 schedule_delayed_work(&priv
->port_periodic_work
, HZ
);
155 static void cvm_oct_configure_common_hw(void)
159 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
161 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
163 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
164 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
165 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 1024);
167 #ifdef __LITTLE_ENDIAN
169 union cvmx_ipd_ctl_status ipd_ctl_status
;
171 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
172 ipd_ctl_status
.s
.pkt_lend
= 1;
173 ipd_ctl_status
.s
.wqe_lend
= 1;
174 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
178 cvmx_helper_setup_red(num_packet_buffers
/ 4, num_packet_buffers
/ 8);
182 * cvm_oct_free_work- Free a work queue entry
184 * @work_queue_entry: Work queue entry to free
186 * Returns Zero on success, Negative on failure.
188 int cvm_oct_free_work(void *work_queue_entry
)
190 cvmx_wqe_t
*work
= work_queue_entry
;
192 int segments
= work
->word2
.s
.bufs
;
193 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
196 union cvmx_buf_ptr next_ptr
= *(union cvmx_buf_ptr
*)
197 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
198 if (unlikely(!segment_ptr
.s
.i
))
199 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr
),
201 CVMX_FPA_PACKET_POOL_SIZE
/ 128);
202 segment_ptr
= next_ptr
;
204 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, 1);
208 EXPORT_SYMBOL(cvm_oct_free_work
);
211 * cvm_oct_common_get_stats - get the low level ethernet statistics
212 * @dev: Device to get the statistics from
214 * Returns Pointer to the statistics
216 static struct net_device_stats
*cvm_oct_common_get_stats(struct net_device
*dev
)
218 cvmx_pip_port_status_t rx_status
;
219 cvmx_pko_port_status_t tx_status
;
220 struct octeon_ethernet
*priv
= netdev_priv(dev
);
222 if (priv
->port
< CVMX_PIP_NUM_INPUT_PORTS
) {
223 if (octeon_is_simulation()) {
224 /* The simulator doesn't support statistics */
225 memset(&rx_status
, 0, sizeof(rx_status
));
226 memset(&tx_status
, 0, sizeof(tx_status
));
228 cvmx_pip_get_port_status(priv
->port
, 1, &rx_status
);
229 cvmx_pko_get_port_status(priv
->port
, 1, &tx_status
);
232 priv
->stats
.rx_packets
+= rx_status
.inb_packets
;
233 priv
->stats
.tx_packets
+= tx_status
.packets
;
234 priv
->stats
.rx_bytes
+= rx_status
.inb_octets
;
235 priv
->stats
.tx_bytes
+= tx_status
.octets
;
236 priv
->stats
.multicast
+= rx_status
.multicast_packets
;
237 priv
->stats
.rx_crc_errors
+= rx_status
.inb_errors
;
238 priv
->stats
.rx_frame_errors
+= rx_status
.fcs_align_err_packets
;
239 priv
->stats
.rx_dropped
+= rx_status
.dropped_packets
;
246 * cvm_oct_common_change_mtu - change the link MTU
247 * @dev: Device to change
248 * @new_mtu: The new MTU
250 * Returns Zero on success
252 static int cvm_oct_common_change_mtu(struct net_device
*dev
, int new_mtu
)
254 struct octeon_ethernet
*priv
= netdev_priv(dev
);
255 int interface
= INTERFACE(priv
->port
);
256 #if IS_ENABLED(CONFIG_VLAN_8021Q)
257 int vlan_bytes
= VLAN_HLEN
;
261 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
+ vlan_bytes
;
264 * Limit the MTU to make sure the ethernet packets are between
265 * 64 bytes and 65535 bytes.
267 if ((new_mtu
+ mtu_overhead
< VLAN_ETH_ZLEN
) ||
268 (new_mtu
+ mtu_overhead
> OCTEON_MAX_MTU
)) {
269 pr_err("MTU must be between %d and %d.\n",
270 VLAN_ETH_ZLEN
- mtu_overhead
,
271 OCTEON_MAX_MTU
- mtu_overhead
);
276 if ((interface
< 2) &&
277 (cvmx_helper_interface_get_mode(interface
) !=
278 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
279 int index
= INDEX(priv
->port
);
280 /* Add ethernet header and FCS, and VLAN if configured. */
281 int max_packet
= new_mtu
+ mtu_overhead
;
283 if (OCTEON_IS_MODEL(OCTEON_CN3XXX
) ||
284 OCTEON_IS_MODEL(OCTEON_CN58XX
)) {
285 /* Signal errors on packets larger than the MTU */
286 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index
, interface
),
290 * Set the hardware to truncate packets larger
291 * than the MTU and smaller the 64 bytes.
293 union cvmx_pip_frm_len_chkx frm_len_chk
;
296 frm_len_chk
.s
.minlen
= VLAN_ETH_ZLEN
;
297 frm_len_chk
.s
.maxlen
= max_packet
;
298 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface
),
302 * Set the hardware to truncate packets larger than
303 * the MTU. The jabber register must be set to a
304 * multiple of 8 bytes, so round up.
306 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index
, interface
),
307 (max_packet
+ 7) & ~7u);
313 * cvm_oct_common_set_multicast_list - set the multicast list
314 * @dev: Device to work on
316 static void cvm_oct_common_set_multicast_list(struct net_device
*dev
)
318 union cvmx_gmxx_prtx_cfg gmx_cfg
;
319 struct octeon_ethernet
*priv
= netdev_priv(dev
);
320 int interface
= INTERFACE(priv
->port
);
322 if ((interface
< 2) &&
323 (cvmx_helper_interface_get_mode(interface
) !=
324 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
325 union cvmx_gmxx_rxx_adr_ctl control
;
326 int index
= INDEX(priv
->port
);
329 control
.s
.bcst
= 1; /* Allow broadcast MAC addresses */
331 if (!netdev_mc_empty(dev
) || (dev
->flags
& IFF_ALLMULTI
) ||
332 (dev
->flags
& IFF_PROMISC
))
333 /* Force accept multicast packets */
336 /* Force reject multicast packets */
339 if (dev
->flags
& IFF_PROMISC
)
341 * Reject matches if promisc. Since CAM is
342 * shut off, should accept everything.
344 control
.s
.cam_mode
= 0;
346 /* Filter packets based on the CAM */
347 control
.s
.cam_mode
= 1;
350 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
351 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
352 gmx_cfg
.u64
& ~1ull);
354 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index
, interface
),
356 if (dev
->flags
& IFF_PROMISC
)
357 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
358 (index
, interface
), 0);
360 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
361 (index
, interface
), 1);
363 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
368 static int cvm_oct_set_mac_filter(struct net_device
*dev
)
370 struct octeon_ethernet
*priv
= netdev_priv(dev
);
371 union cvmx_gmxx_prtx_cfg gmx_cfg
;
372 int interface
= INTERFACE(priv
->port
);
374 if ((interface
< 2) &&
375 (cvmx_helper_interface_get_mode(interface
) !=
376 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
378 u8
*ptr
= dev
->dev_addr
;
380 int index
= INDEX(priv
->port
);
382 for (i
= 0; i
< 6; i
++)
383 mac
= (mac
<< 8) | (u64
)ptr
[i
];
386 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
387 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
388 gmx_cfg
.u64
& ~1ull);
390 cvmx_write_csr(CVMX_GMXX_SMACX(index
, interface
), mac
);
391 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index
, interface
),
393 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index
, interface
),
395 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index
, interface
),
397 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index
, interface
),
399 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index
, interface
),
401 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index
, interface
),
403 cvm_oct_common_set_multicast_list(dev
);
404 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
411 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
412 * @dev: The device in question.
413 * @addr: Socket address.
415 * Returns Zero on success
417 static int cvm_oct_common_set_mac_address(struct net_device
*dev
, void *addr
)
419 int r
= eth_mac_addr(dev
, addr
);
423 return cvm_oct_set_mac_filter(dev
);
427 * cvm_oct_common_init - per network device initialization
428 * @dev: Device to initialize
430 * Returns Zero on success
432 int cvm_oct_common_init(struct net_device
*dev
)
434 struct octeon_ethernet
*priv
= netdev_priv(dev
);
435 const u8
*mac
= NULL
;
438 mac
= of_get_mac_address(priv
->of_node
);
441 ether_addr_copy(dev
->dev_addr
, mac
);
443 eth_hw_addr_random(dev
);
446 * Force the interface to use the POW send if always_use_pow
447 * was specified or it is in the pow send list.
449 if ((pow_send_group
!= -1) &&
450 (always_use_pow
|| strstr(pow_send_list
, dev
->name
)))
453 if (priv
->queue
!= -1)
454 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
456 /* We do our own locking, Linux doesn't need to */
457 dev
->features
|= NETIF_F_LLTX
;
458 dev
->ethtool_ops
= &cvm_oct_ethtool_ops
;
460 cvm_oct_set_mac_filter(dev
);
461 dev
->netdev_ops
->ndo_change_mtu(dev
, dev
->mtu
);
464 * Zero out stats for port so we won't mistakenly show
465 * counters from the bootloader.
467 memset(dev
->netdev_ops
->ndo_get_stats(dev
), 0,
468 sizeof(struct net_device_stats
));
470 if (dev
->netdev_ops
->ndo_stop
)
471 dev
->netdev_ops
->ndo_stop(dev
);
476 void cvm_oct_common_uninit(struct net_device
*dev
)
479 phy_disconnect(dev
->phydev
);
482 int cvm_oct_common_open(struct net_device
*dev
,
483 void (*link_poll
)(struct net_device
*))
485 union cvmx_gmxx_prtx_cfg gmx_cfg
;
486 struct octeon_ethernet
*priv
= netdev_priv(dev
);
487 int interface
= INTERFACE(priv
->port
);
488 int index
= INDEX(priv
->port
);
489 cvmx_helper_link_info_t link_info
;
492 rv
= cvm_oct_phy_setup_device(dev
);
496 gmx_cfg
.u64
= cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
498 if (octeon_has_feature(OCTEON_FEATURE_PKND
))
499 gmx_cfg
.s
.pknd
= priv
->port
;
500 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
), gmx_cfg
.u64
);
502 if (octeon_is_simulation())
506 int r
= phy_read_status(dev
->phydev
);
508 if (r
== 0 && dev
->phydev
->link
== 0)
509 netif_carrier_off(dev
);
510 cvm_oct_adjust_link(dev
);
512 link_info
= cvmx_helper_link_get(priv
->port
);
513 if (!link_info
.s
.link_up
)
514 netif_carrier_off(dev
);
515 priv
->poll
= link_poll
;
522 void cvm_oct_link_poll(struct net_device
*dev
)
524 struct octeon_ethernet
*priv
= netdev_priv(dev
);
525 cvmx_helper_link_info_t link_info
;
527 link_info
= cvmx_helper_link_get(priv
->port
);
528 if (link_info
.u64
== priv
->link_info
)
531 if (cvmx_helper_link_set(priv
->port
, link_info
))
532 link_info
.u64
= priv
->link_info
;
534 priv
->link_info
= link_info
.u64
;
536 if (link_info
.s
.link_up
) {
537 if (!netif_carrier_ok(dev
))
538 netif_carrier_on(dev
);
539 } else if (netif_carrier_ok(dev
)) {
540 netif_carrier_off(dev
);
542 cvm_oct_note_carrier(priv
, link_info
);
545 static int cvm_oct_xaui_open(struct net_device
*dev
)
547 return cvm_oct_common_open(dev
, cvm_oct_link_poll
);
550 static const struct net_device_ops cvm_oct_npi_netdev_ops
= {
551 .ndo_init
= cvm_oct_common_init
,
552 .ndo_uninit
= cvm_oct_common_uninit
,
553 .ndo_start_xmit
= cvm_oct_xmit
,
554 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
555 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
556 .ndo_do_ioctl
= cvm_oct_ioctl
,
557 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
558 .ndo_get_stats
= cvm_oct_common_get_stats
,
559 #ifdef CONFIG_NET_POLL_CONTROLLER
560 .ndo_poll_controller
= cvm_oct_poll_controller
,
564 static const struct net_device_ops cvm_oct_xaui_netdev_ops
= {
565 .ndo_init
= cvm_oct_common_init
,
566 .ndo_uninit
= cvm_oct_common_uninit
,
567 .ndo_open
= cvm_oct_xaui_open
,
568 .ndo_stop
= cvm_oct_common_stop
,
569 .ndo_start_xmit
= cvm_oct_xmit
,
570 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
571 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
572 .ndo_do_ioctl
= cvm_oct_ioctl
,
573 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
574 .ndo_get_stats
= cvm_oct_common_get_stats
,
575 #ifdef CONFIG_NET_POLL_CONTROLLER
576 .ndo_poll_controller
= cvm_oct_poll_controller
,
580 static const struct net_device_ops cvm_oct_sgmii_netdev_ops
= {
581 .ndo_init
= cvm_oct_sgmii_init
,
582 .ndo_uninit
= cvm_oct_common_uninit
,
583 .ndo_open
= cvm_oct_sgmii_open
,
584 .ndo_stop
= cvm_oct_common_stop
,
585 .ndo_start_xmit
= cvm_oct_xmit
,
586 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
587 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
588 .ndo_do_ioctl
= cvm_oct_ioctl
,
589 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
590 .ndo_get_stats
= cvm_oct_common_get_stats
,
591 #ifdef CONFIG_NET_POLL_CONTROLLER
592 .ndo_poll_controller
= cvm_oct_poll_controller
,
596 static const struct net_device_ops cvm_oct_spi_netdev_ops
= {
597 .ndo_init
= cvm_oct_spi_init
,
598 .ndo_uninit
= cvm_oct_spi_uninit
,
599 .ndo_start_xmit
= cvm_oct_xmit
,
600 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
601 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
602 .ndo_do_ioctl
= cvm_oct_ioctl
,
603 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
604 .ndo_get_stats
= cvm_oct_common_get_stats
,
605 #ifdef CONFIG_NET_POLL_CONTROLLER
606 .ndo_poll_controller
= cvm_oct_poll_controller
,
610 static const struct net_device_ops cvm_oct_rgmii_netdev_ops
= {
611 .ndo_init
= cvm_oct_common_init
,
612 .ndo_uninit
= cvm_oct_common_uninit
,
613 .ndo_open
= cvm_oct_rgmii_open
,
614 .ndo_stop
= cvm_oct_common_stop
,
615 .ndo_start_xmit
= cvm_oct_xmit
,
616 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
617 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
618 .ndo_do_ioctl
= cvm_oct_ioctl
,
619 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
620 .ndo_get_stats
= cvm_oct_common_get_stats
,
621 #ifdef CONFIG_NET_POLL_CONTROLLER
622 .ndo_poll_controller
= cvm_oct_poll_controller
,
626 static const struct net_device_ops cvm_oct_pow_netdev_ops
= {
627 .ndo_init
= cvm_oct_common_init
,
628 .ndo_start_xmit
= cvm_oct_xmit_pow
,
629 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
630 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
631 .ndo_do_ioctl
= cvm_oct_ioctl
,
632 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
633 .ndo_get_stats
= cvm_oct_common_get_stats
,
634 #ifdef CONFIG_NET_POLL_CONTROLLER
635 .ndo_poll_controller
= cvm_oct_poll_controller
,
639 static struct device_node
*cvm_oct_of_get_child(
640 const struct device_node
*parent
, int reg_val
)
642 struct device_node
*node
= NULL
;
647 node
= of_get_next_child(parent
, node
);
650 addr
= of_get_property(node
, "reg", &size
);
651 if (addr
&& (be32_to_cpu(*addr
) == reg_val
))
657 static struct device_node
*cvm_oct_node_for_port(struct device_node
*pip
,
658 int interface
, int port
)
660 struct device_node
*ni
, *np
;
662 ni
= cvm_oct_of_get_child(pip
, interface
);
666 np
= cvm_oct_of_get_child(ni
, port
);
672 static void cvm_set_rgmii_delay(struct device_node
*np
, int iface
, int port
)
676 if (!of_property_read_u32(np
, "rx-delay", &delay_value
))
677 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port
, iface
), delay_value
);
678 if (!of_property_read_u32(np
, "tx-delay", &delay_value
))
679 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port
, iface
), delay_value
);
682 static int cvm_oct_probe(struct platform_device
*pdev
)
686 int fau
= FAU_NUM_PACKET_BUFFERS_TO_FREE
;
688 struct device_node
*pip
;
690 octeon_mdiobus_force_mod_depencency();
692 pip
= pdev
->dev
.of_node
;
694 pr_err("Error: No 'pip' in /aliases\n");
698 cvm_oct_configure_common_hw();
700 cvmx_helper_initialize_packet_io_global();
702 if (receive_group_order
) {
703 if (receive_group_order
> 4)
704 receive_group_order
= 4;
705 pow_receive_groups
= (1 << (1 << receive_group_order
)) - 1;
707 pow_receive_groups
= BIT(pow_receive_group
);
710 /* Change the input group for all ports before input is enabled */
711 num_interfaces
= cvmx_helper_get_number_of_interfaces();
712 for (interface
= 0; interface
< num_interfaces
; interface
++) {
713 int num_ports
= cvmx_helper_ports_on_interface(interface
);
716 for (port
= cvmx_helper_get_ipd_port(interface
, 0);
717 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
719 union cvmx_pip_prt_tagx pip_prt_tagx
;
722 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port
));
724 if (receive_group_order
) {
727 /* We support only 16 groups at the moment, so
728 * always disable the two additional "hidden"
729 * tag_mask bits on CN68XX.
731 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
732 pip_prt_tagx
.u64
|= 0x3ull
<< 44;
734 tag_mask
= ~((1 << receive_group_order
) - 1);
735 pip_prt_tagx
.s
.grptagbase
= 0;
736 pip_prt_tagx
.s
.grptagmask
= tag_mask
;
737 pip_prt_tagx
.s
.grptag
= 1;
738 pip_prt_tagx
.s
.tag_mode
= 0;
739 pip_prt_tagx
.s
.inc_prt_flag
= 1;
740 pip_prt_tagx
.s
.ip6_dprt_flag
= 1;
741 pip_prt_tagx
.s
.ip4_dprt_flag
= 1;
742 pip_prt_tagx
.s
.ip6_sprt_flag
= 1;
743 pip_prt_tagx
.s
.ip4_sprt_flag
= 1;
744 pip_prt_tagx
.s
.ip6_dst_flag
= 1;
745 pip_prt_tagx
.s
.ip4_dst_flag
= 1;
746 pip_prt_tagx
.s
.ip6_src_flag
= 1;
747 pip_prt_tagx
.s
.ip4_src_flag
= 1;
748 pip_prt_tagx
.s
.grp
= 0;
750 pip_prt_tagx
.s
.grptag
= 0;
751 pip_prt_tagx
.s
.grp
= pow_receive_group
;
754 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port
),
759 cvmx_helper_ipd_and_packet_input_enable();
761 memset(cvm_oct_device
, 0, sizeof(cvm_oct_device
));
764 * Initialize the FAU used for counting packet buffers that
767 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
769 /* Initialize the FAU used for counting tx SKBs that need to be freed */
770 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN
, 0);
772 if ((pow_send_group
!= -1)) {
773 struct net_device
*dev
;
775 dev
= alloc_etherdev(sizeof(struct octeon_ethernet
));
777 /* Initialize the device private structure. */
778 struct octeon_ethernet
*priv
= netdev_priv(dev
);
780 SET_NETDEV_DEV(dev
, &pdev
->dev
);
781 dev
->netdev_ops
= &cvm_oct_pow_netdev_ops
;
782 priv
->imode
= CVMX_HELPER_INTERFACE_MODE_DISABLED
;
783 priv
->port
= CVMX_PIP_NUM_INPUT_PORTS
;
785 strcpy(dev
->name
, "pow%d");
786 for (qos
= 0; qos
< 16; qos
++)
787 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
789 if (register_netdev(dev
) < 0) {
790 pr_err("Failed to register ethernet device for POW\n");
793 cvm_oct_device
[CVMX_PIP_NUM_INPUT_PORTS
] = dev
;
794 pr_info("%s: POW send group %d, receive group %d\n",
795 dev
->name
, pow_send_group
,
799 pr_err("Failed to allocate ethernet device for POW\n");
803 num_interfaces
= cvmx_helper_get_number_of_interfaces();
804 for (interface
= 0; interface
< num_interfaces
; interface
++) {
805 cvmx_helper_interface_mode_t imode
=
806 cvmx_helper_interface_get_mode(interface
);
807 int num_ports
= cvmx_helper_ports_on_interface(interface
);
812 port
= cvmx_helper_get_ipd_port(interface
, 0);
813 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
814 port_index
++, port
++) {
815 struct octeon_ethernet
*priv
;
816 struct net_device
*dev
=
817 alloc_etherdev(sizeof(struct octeon_ethernet
));
819 pr_err("Failed to allocate ethernet device for port %d\n",
824 /* Initialize the device private structure. */
825 SET_NETDEV_DEV(dev
, &pdev
->dev
);
826 priv
= netdev_priv(dev
);
828 priv
->of_node
= cvm_oct_node_for_port(pip
, interface
,
831 INIT_DELAYED_WORK(&priv
->port_periodic_work
,
832 cvm_oct_periodic_worker
);
835 priv
->queue
= cvmx_pko_get_base_queue(priv
->port
);
836 priv
->fau
= fau
- cvmx_pko_get_num_queues(port
) * 4;
837 for (qos
= 0; qos
< 16; qos
++)
838 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
839 for (qos
= 0; qos
< cvmx_pko_get_num_queues(port
);
841 cvmx_fau_atomic_write32(priv
->fau
+ qos
* 4, 0);
843 switch (priv
->imode
) {
844 /* These types don't support ports to IPD/PKO */
845 case CVMX_HELPER_INTERFACE_MODE_DISABLED
:
846 case CVMX_HELPER_INTERFACE_MODE_PCIE
:
847 case CVMX_HELPER_INTERFACE_MODE_PICMG
:
850 case CVMX_HELPER_INTERFACE_MODE_NPI
:
851 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
852 strcpy(dev
->name
, "npi%d");
855 case CVMX_HELPER_INTERFACE_MODE_XAUI
:
856 dev
->netdev_ops
= &cvm_oct_xaui_netdev_ops
;
857 strcpy(dev
->name
, "xaui%d");
860 case CVMX_HELPER_INTERFACE_MODE_LOOP
:
861 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
862 strcpy(dev
->name
, "loop%d");
865 case CVMX_HELPER_INTERFACE_MODE_SGMII
:
866 dev
->netdev_ops
= &cvm_oct_sgmii_netdev_ops
;
867 strcpy(dev
->name
, "eth%d");
870 case CVMX_HELPER_INTERFACE_MODE_SPI
:
871 dev
->netdev_ops
= &cvm_oct_spi_netdev_ops
;
872 strcpy(dev
->name
, "spi%d");
875 case CVMX_HELPER_INTERFACE_MODE_RGMII
:
876 case CVMX_HELPER_INTERFACE_MODE_GMII
:
877 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
878 strcpy(dev
->name
, "eth%d");
879 cvm_set_rgmii_delay(priv
->of_node
, interface
,
884 if (priv
->of_node
&& of_phy_is_fixed_link(priv
->of_node
)) {
885 if (of_phy_register_fixed_link(priv
->of_node
)) {
886 netdev_err(dev
, "Failed to register fixed link for interface %d, port %d\n",
887 interface
, priv
->port
);
888 dev
->netdev_ops
= NULL
;
892 if (!dev
->netdev_ops
) {
894 } else if (register_netdev(dev
) < 0) {
895 pr_err("Failed to register ethernet device for interface %d, port %d\n",
896 interface
, priv
->port
);
899 cvm_oct_device
[priv
->port
] = dev
;
901 cvmx_pko_get_num_queues(priv
->port
) *
903 schedule_delayed_work(&priv
->port_periodic_work
, HZ
);
908 cvm_oct_tx_initialize();
909 cvm_oct_rx_initialize();
912 * 150 uS: about 10 1500-byte packets at 1GE.
914 cvm_oct_tx_poll_interval
= 150 * (octeon_get_clock_rate() / 1000000);
916 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
921 static int cvm_oct_remove(struct platform_device
*pdev
)
927 atomic_inc_return(&cvm_oct_poll_queue_stopping
);
928 cancel_delayed_work_sync(&cvm_oct_rx_refill_work
);
930 cvm_oct_rx_shutdown();
931 cvm_oct_tx_shutdown();
935 /* Free the ethernet devices */
936 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
937 if (cvm_oct_device
[port
]) {
938 struct net_device
*dev
= cvm_oct_device
[port
];
939 struct octeon_ethernet
*priv
= netdev_priv(dev
);
941 cancel_delayed_work_sync(&priv
->port_periodic_work
);
943 cvm_oct_tx_shutdown_dev(dev
);
944 unregister_netdev(dev
);
946 cvm_oct_device
[port
] = NULL
;
954 /* Free the HW pools */
955 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
957 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
959 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
960 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
961 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
965 static const struct of_device_id cvm_oct_match
[] = {
967 .compatible
= "cavium,octeon-3860-pip",
971 MODULE_DEVICE_TABLE(of
, cvm_oct_match
);
973 static struct platform_driver cvm_oct_driver
= {
974 .probe
= cvm_oct_probe
,
975 .remove
= cvm_oct_remove
,
977 .name
= KBUILD_MODNAME
,
978 .of_match_table
= cvm_oct_match
,
982 module_platform_driver(cvm_oct_driver
);
984 MODULE_LICENSE("GPL");
985 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
986 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");