2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2007 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/platform_device.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/phy.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/of_net.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
25 #include <asm/octeon/octeon.h>
27 #include "ethernet-defines.h"
28 #include "octeon-ethernet.h"
29 #include "ethernet-mem.h"
30 #include "ethernet-rx.h"
31 #include "ethernet-tx.h"
32 #include "ethernet-mdio.h"
33 #include "ethernet-util.h"
35 #include <asm/octeon/cvmx-pip.h>
36 #include <asm/octeon/cvmx-pko.h>
37 #include <asm/octeon/cvmx-fau.h>
38 #include <asm/octeon/cvmx-ipd.h>
39 #include <asm/octeon/cvmx-helper.h>
40 #include <asm/octeon/cvmx-asxx-defs.h>
41 #include <asm/octeon/cvmx-gmxx-defs.h>
42 #include <asm/octeon/cvmx-smix-defs.h>
44 #define OCTEON_MAX_MTU 65392
46 static int num_packet_buffers
= 1024;
47 module_param(num_packet_buffers
, int, 0444);
48 MODULE_PARM_DESC(num_packet_buffers
, "\n"
49 "\tNumber of packet buffers to allocate and store in the\n"
50 "\tFPA. By default, 1024 packet buffers are used.\n");
52 static int pow_receive_group
= 15;
53 module_param(pow_receive_group
, int, 0444);
54 MODULE_PARM_DESC(pow_receive_group
, "\n"
55 "\tPOW group to receive packets from. All ethernet hardware\n"
56 "\twill be configured to send incoming packets to this POW\n"
57 "\tgroup. Also any other software can submit packets to this\n"
58 "\tgroup for the kernel to process.");
60 static int receive_group_order
;
61 module_param(receive_group_order
, int, 0444);
62 MODULE_PARM_DESC(receive_group_order
, "\n"
63 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
64 "\twill be configured to send incoming packets to multiple POW\n"
65 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
66 "\tgroups are taken into use and groups are allocated starting\n"
67 "\tfrom 0. By default, a single group is used.\n");
69 int pow_send_group
= -1;
70 module_param(pow_send_group
, int, 0644);
71 MODULE_PARM_DESC(pow_send_group
, "\n"
72 "\tPOW group to send packets to other software on. This\n"
73 "\tcontrols the creation of the virtual device pow0.\n"
74 "\talways_use_pow also depends on this value.");
77 module_param(always_use_pow
, int, 0444);
78 MODULE_PARM_DESC(always_use_pow
, "\n"
79 "\tWhen set, always send to the pow group. This will cause\n"
80 "\tpackets sent to real ethernet devices to be sent to the\n"
81 "\tPOW group instead of the hardware. Unless some other\n"
82 "\tapplication changes the config, packets will still be\n"
83 "\treceived from the low level hardware. Use this option\n"
84 "\tto allow a CVMX app to intercept all packets from the\n"
85 "\tlinux kernel. You must specify pow_send_group along with\n"
88 char pow_send_list
[128] = "";
89 module_param_string(pow_send_list
, pow_send_list
, sizeof(pow_send_list
), 0444);
90 MODULE_PARM_DESC(pow_send_list
, "\n"
91 "\tComma separated list of ethernet devices that should use the\n"
92 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
93 "\tis a per port version of always_use_pow. always_use_pow takes\n"
94 "\tprecedence over this list. For example, setting this to\n"
95 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
96 "\tusing the pow_send_group.");
98 int rx_napi_weight
= 32;
99 module_param(rx_napi_weight
, int, 0444);
100 MODULE_PARM_DESC(rx_napi_weight
, "The NAPI WEIGHT parameter.");
102 /* Mask indicating which receive groups are in use. */
103 int pow_receive_groups
;
106 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
108 * Set to one right before cvm_oct_poll_queue is destroyed.
110 atomic_t cvm_oct_poll_queue_stopping
= ATOMIC_INIT(0);
113 * Array of every ethernet device owned by this driver indexed by
114 * the ipd input port number.
116 struct net_device
*cvm_oct_device
[TOTAL_NUMBER_OF_PORTS
];
118 u64 cvm_oct_tx_poll_interval
;
120 static void cvm_oct_rx_refill_worker(struct work_struct
*work
);
121 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work
, cvm_oct_rx_refill_worker
);
123 static void cvm_oct_rx_refill_worker(struct work_struct
*work
)
126 * FPA 0 may have been drained, try to refill it if we need
127 * more than num_packet_buffers / 2, otherwise normal receive
128 * processing will refill it. If it were drained, no packets
129 * could be received so cvm_oct_napi_poll would never be
130 * invoked to do the refill.
132 cvm_oct_rx_refill_pool(num_packet_buffers
/ 2);
134 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
135 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
138 static void cvm_oct_periodic_worker(struct work_struct
*work
)
140 struct octeon_ethernet
*priv
= container_of(work
,
141 struct octeon_ethernet
,
142 port_periodic_work
.work
);
145 priv
->poll(cvm_oct_device
[priv
->port
]);
147 cvm_oct_device
[priv
->port
]->netdev_ops
->ndo_get_stats(
148 cvm_oct_device
[priv
->port
]);
150 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
151 schedule_delayed_work(&priv
->port_periodic_work
, HZ
);
154 static void cvm_oct_configure_common_hw(void)
158 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
160 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
162 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
163 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
164 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 1024);
166 #ifdef __LITTLE_ENDIAN
168 union cvmx_ipd_ctl_status ipd_ctl_status
;
170 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
171 ipd_ctl_status
.s
.pkt_lend
= 1;
172 ipd_ctl_status
.s
.wqe_lend
= 1;
173 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
177 cvmx_helper_setup_red(num_packet_buffers
/ 4, num_packet_buffers
/ 8);
181 * cvm_oct_free_work- Free a work queue entry
183 * @work_queue_entry: Work queue entry to free
185 * Returns Zero on success, Negative on failure.
187 int cvm_oct_free_work(void *work_queue_entry
)
189 cvmx_wqe_t
*work
= work_queue_entry
;
191 int segments
= work
->word2
.s
.bufs
;
192 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
195 union cvmx_buf_ptr next_ptr
= *(union cvmx_buf_ptr
*)
196 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
197 if (unlikely(!segment_ptr
.s
.i
))
198 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr
),
200 CVMX_FPA_PACKET_POOL_SIZE
/ 128);
201 segment_ptr
= next_ptr
;
203 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, 1);
207 EXPORT_SYMBOL(cvm_oct_free_work
);
210 * cvm_oct_common_get_stats - get the low level ethernet statistics
211 * @dev: Device to get the statistics from
213 * Returns Pointer to the statistics
215 static struct net_device_stats
*cvm_oct_common_get_stats(struct net_device
*dev
)
217 cvmx_pip_port_status_t rx_status
;
218 cvmx_pko_port_status_t tx_status
;
219 struct octeon_ethernet
*priv
= netdev_priv(dev
);
221 if (priv
->port
< CVMX_PIP_NUM_INPUT_PORTS
) {
222 if (octeon_is_simulation()) {
223 /* The simulator doesn't support statistics */
224 memset(&rx_status
, 0, sizeof(rx_status
));
225 memset(&tx_status
, 0, sizeof(tx_status
));
227 cvmx_pip_get_port_status(priv
->port
, 1, &rx_status
);
228 cvmx_pko_get_port_status(priv
->port
, 1, &tx_status
);
231 dev
->stats
.rx_packets
+= rx_status
.inb_packets
;
232 dev
->stats
.tx_packets
+= tx_status
.packets
;
233 dev
->stats
.rx_bytes
+= rx_status
.inb_octets
;
234 dev
->stats
.tx_bytes
+= tx_status
.octets
;
235 dev
->stats
.multicast
+= rx_status
.multicast_packets
;
236 dev
->stats
.rx_crc_errors
+= rx_status
.inb_errors
;
237 dev
->stats
.rx_frame_errors
+= rx_status
.fcs_align_err_packets
;
238 dev
->stats
.rx_dropped
+= rx_status
.dropped_packets
;
245 * cvm_oct_common_change_mtu - change the link MTU
246 * @dev: Device to change
247 * @new_mtu: The new MTU
249 * Returns Zero on success
251 static int cvm_oct_common_change_mtu(struct net_device
*dev
, int new_mtu
)
253 struct octeon_ethernet
*priv
= netdev_priv(dev
);
254 int interface
= INTERFACE(priv
->port
);
255 #if IS_ENABLED(CONFIG_VLAN_8021Q)
256 int vlan_bytes
= VLAN_HLEN
;
260 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
+ vlan_bytes
;
264 if ((interface
< 2) &&
265 (cvmx_helper_interface_get_mode(interface
) !=
266 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
267 int index
= INDEX(priv
->port
);
268 /* Add ethernet header and FCS, and VLAN if configured. */
269 int max_packet
= new_mtu
+ mtu_overhead
;
271 if (OCTEON_IS_MODEL(OCTEON_CN3XXX
) ||
272 OCTEON_IS_MODEL(OCTEON_CN58XX
)) {
273 /* Signal errors on packets larger than the MTU */
274 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index
, interface
),
278 * Set the hardware to truncate packets larger
279 * than the MTU and smaller the 64 bytes.
281 union cvmx_pip_frm_len_chkx frm_len_chk
;
284 frm_len_chk
.s
.minlen
= VLAN_ETH_ZLEN
;
285 frm_len_chk
.s
.maxlen
= max_packet
;
286 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface
),
290 * Set the hardware to truncate packets larger than
291 * the MTU. The jabber register must be set to a
292 * multiple of 8 bytes, so round up.
294 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index
, interface
),
295 (max_packet
+ 7) & ~7u);
301 * cvm_oct_common_set_multicast_list - set the multicast list
302 * @dev: Device to work on
304 static void cvm_oct_common_set_multicast_list(struct net_device
*dev
)
306 union cvmx_gmxx_prtx_cfg gmx_cfg
;
307 struct octeon_ethernet
*priv
= netdev_priv(dev
);
308 int interface
= INTERFACE(priv
->port
);
310 if ((interface
< 2) &&
311 (cvmx_helper_interface_get_mode(interface
) !=
312 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
313 union cvmx_gmxx_rxx_adr_ctl control
;
314 int index
= INDEX(priv
->port
);
317 control
.s
.bcst
= 1; /* Allow broadcast MAC addresses */
319 if (!netdev_mc_empty(dev
) || (dev
->flags
& IFF_ALLMULTI
) ||
320 (dev
->flags
& IFF_PROMISC
))
321 /* Force accept multicast packets */
324 /* Force reject multicast packets */
327 if (dev
->flags
& IFF_PROMISC
)
329 * Reject matches if promisc. Since CAM is
330 * shut off, should accept everything.
332 control
.s
.cam_mode
= 0;
334 /* Filter packets based on the CAM */
335 control
.s
.cam_mode
= 1;
338 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
339 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
340 gmx_cfg
.u64
& ~1ull);
342 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index
, interface
),
344 if (dev
->flags
& IFF_PROMISC
)
345 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
346 (index
, interface
), 0);
348 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
349 (index
, interface
), 1);
351 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
356 static int cvm_oct_set_mac_filter(struct net_device
*dev
)
358 struct octeon_ethernet
*priv
= netdev_priv(dev
);
359 union cvmx_gmxx_prtx_cfg gmx_cfg
;
360 int interface
= INTERFACE(priv
->port
);
362 if ((interface
< 2) &&
363 (cvmx_helper_interface_get_mode(interface
) !=
364 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
366 u8
*ptr
= dev
->dev_addr
;
368 int index
= INDEX(priv
->port
);
370 for (i
= 0; i
< 6; i
++)
371 mac
= (mac
<< 8) | (u64
)ptr
[i
];
374 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
375 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
376 gmx_cfg
.u64
& ~1ull);
378 cvmx_write_csr(CVMX_GMXX_SMACX(index
, interface
), mac
);
379 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index
, interface
),
381 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index
, interface
),
383 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index
, interface
),
385 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index
, interface
),
387 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index
, interface
),
389 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index
, interface
),
391 cvm_oct_common_set_multicast_list(dev
);
392 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
399 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
400 * @dev: The device in question.
401 * @addr: Socket address.
403 * Returns Zero on success
405 static int cvm_oct_common_set_mac_address(struct net_device
*dev
, void *addr
)
407 int r
= eth_mac_addr(dev
, addr
);
411 return cvm_oct_set_mac_filter(dev
);
415 * cvm_oct_common_init - per network device initialization
416 * @dev: Device to initialize
418 * Returns Zero on success
420 int cvm_oct_common_init(struct net_device
*dev
)
422 struct octeon_ethernet
*priv
= netdev_priv(dev
);
423 const u8
*mac
= NULL
;
426 mac
= of_get_mac_address(priv
->of_node
);
429 ether_addr_copy(dev
->dev_addr
, mac
);
431 eth_hw_addr_random(dev
);
434 * Force the interface to use the POW send if always_use_pow
435 * was specified or it is in the pow send list.
437 if ((pow_send_group
!= -1) &&
438 (always_use_pow
|| strstr(pow_send_list
, dev
->name
)))
441 if (priv
->queue
!= -1)
442 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
444 /* We do our own locking, Linux doesn't need to */
445 dev
->features
|= NETIF_F_LLTX
;
446 dev
->ethtool_ops
= &cvm_oct_ethtool_ops
;
448 cvm_oct_set_mac_filter(dev
);
449 dev_set_mtu(dev
, dev
->mtu
);
452 * Zero out stats for port so we won't mistakenly show
453 * counters from the bootloader.
455 memset(dev
->netdev_ops
->ndo_get_stats(dev
), 0,
456 sizeof(struct net_device_stats
));
458 if (dev
->netdev_ops
->ndo_stop
)
459 dev
->netdev_ops
->ndo_stop(dev
);
464 void cvm_oct_common_uninit(struct net_device
*dev
)
467 phy_disconnect(dev
->phydev
);
470 int cvm_oct_common_open(struct net_device
*dev
,
471 void (*link_poll
)(struct net_device
*))
473 union cvmx_gmxx_prtx_cfg gmx_cfg
;
474 struct octeon_ethernet
*priv
= netdev_priv(dev
);
475 int interface
= INTERFACE(priv
->port
);
476 int index
= INDEX(priv
->port
);
477 cvmx_helper_link_info_t link_info
;
480 rv
= cvm_oct_phy_setup_device(dev
);
484 gmx_cfg
.u64
= cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
486 if (octeon_has_feature(OCTEON_FEATURE_PKND
))
487 gmx_cfg
.s
.pknd
= priv
->port
;
488 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
), gmx_cfg
.u64
);
490 if (octeon_is_simulation())
494 int r
= phy_read_status(dev
->phydev
);
496 if (r
== 0 && dev
->phydev
->link
== 0)
497 netif_carrier_off(dev
);
498 cvm_oct_adjust_link(dev
);
500 link_info
= cvmx_helper_link_get(priv
->port
);
501 if (!link_info
.s
.link_up
)
502 netif_carrier_off(dev
);
503 priv
->poll
= link_poll
;
510 void cvm_oct_link_poll(struct net_device
*dev
)
512 struct octeon_ethernet
*priv
= netdev_priv(dev
);
513 cvmx_helper_link_info_t link_info
;
515 link_info
= cvmx_helper_link_get(priv
->port
);
516 if (link_info
.u64
== priv
->link_info
)
519 if (cvmx_helper_link_set(priv
->port
, link_info
))
520 link_info
.u64
= priv
->link_info
;
522 priv
->link_info
= link_info
.u64
;
524 if (link_info
.s
.link_up
) {
525 if (!netif_carrier_ok(dev
))
526 netif_carrier_on(dev
);
527 } else if (netif_carrier_ok(dev
)) {
528 netif_carrier_off(dev
);
530 cvm_oct_note_carrier(priv
, link_info
);
533 static int cvm_oct_xaui_open(struct net_device
*dev
)
535 return cvm_oct_common_open(dev
, cvm_oct_link_poll
);
538 static const struct net_device_ops cvm_oct_npi_netdev_ops
= {
539 .ndo_init
= cvm_oct_common_init
,
540 .ndo_uninit
= cvm_oct_common_uninit
,
541 .ndo_start_xmit
= cvm_oct_xmit
,
542 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
543 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
544 .ndo_do_ioctl
= cvm_oct_ioctl
,
545 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
546 .ndo_get_stats
= cvm_oct_common_get_stats
,
547 #ifdef CONFIG_NET_POLL_CONTROLLER
548 .ndo_poll_controller
= cvm_oct_poll_controller
,
552 static const struct net_device_ops cvm_oct_xaui_netdev_ops
= {
553 .ndo_init
= cvm_oct_common_init
,
554 .ndo_uninit
= cvm_oct_common_uninit
,
555 .ndo_open
= cvm_oct_xaui_open
,
556 .ndo_stop
= cvm_oct_common_stop
,
557 .ndo_start_xmit
= cvm_oct_xmit
,
558 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
559 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
560 .ndo_do_ioctl
= cvm_oct_ioctl
,
561 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
562 .ndo_get_stats
= cvm_oct_common_get_stats
,
563 #ifdef CONFIG_NET_POLL_CONTROLLER
564 .ndo_poll_controller
= cvm_oct_poll_controller
,
568 static const struct net_device_ops cvm_oct_sgmii_netdev_ops
= {
569 .ndo_init
= cvm_oct_sgmii_init
,
570 .ndo_uninit
= cvm_oct_common_uninit
,
571 .ndo_open
= cvm_oct_sgmii_open
,
572 .ndo_stop
= cvm_oct_common_stop
,
573 .ndo_start_xmit
= cvm_oct_xmit
,
574 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
575 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
576 .ndo_do_ioctl
= cvm_oct_ioctl
,
577 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
578 .ndo_get_stats
= cvm_oct_common_get_stats
,
579 #ifdef CONFIG_NET_POLL_CONTROLLER
580 .ndo_poll_controller
= cvm_oct_poll_controller
,
584 static const struct net_device_ops cvm_oct_spi_netdev_ops
= {
585 .ndo_init
= cvm_oct_spi_init
,
586 .ndo_uninit
= cvm_oct_spi_uninit
,
587 .ndo_start_xmit
= cvm_oct_xmit
,
588 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
589 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
590 .ndo_do_ioctl
= cvm_oct_ioctl
,
591 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
592 .ndo_get_stats
= cvm_oct_common_get_stats
,
593 #ifdef CONFIG_NET_POLL_CONTROLLER
594 .ndo_poll_controller
= cvm_oct_poll_controller
,
598 static const struct net_device_ops cvm_oct_rgmii_netdev_ops
= {
599 .ndo_init
= cvm_oct_common_init
,
600 .ndo_uninit
= cvm_oct_common_uninit
,
601 .ndo_open
= cvm_oct_rgmii_open
,
602 .ndo_stop
= cvm_oct_common_stop
,
603 .ndo_start_xmit
= cvm_oct_xmit
,
604 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
605 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
606 .ndo_do_ioctl
= cvm_oct_ioctl
,
607 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
608 .ndo_get_stats
= cvm_oct_common_get_stats
,
609 #ifdef CONFIG_NET_POLL_CONTROLLER
610 .ndo_poll_controller
= cvm_oct_poll_controller
,
614 static const struct net_device_ops cvm_oct_pow_netdev_ops
= {
615 .ndo_init
= cvm_oct_common_init
,
616 .ndo_start_xmit
= cvm_oct_xmit_pow
,
617 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
618 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
619 .ndo_do_ioctl
= cvm_oct_ioctl
,
620 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
621 .ndo_get_stats
= cvm_oct_common_get_stats
,
622 #ifdef CONFIG_NET_POLL_CONTROLLER
623 .ndo_poll_controller
= cvm_oct_poll_controller
,
627 static struct device_node
*cvm_oct_of_get_child(
628 const struct device_node
*parent
, int reg_val
)
630 struct device_node
*node
= NULL
;
635 node
= of_get_next_child(parent
, node
);
638 addr
= of_get_property(node
, "reg", &size
);
639 if (addr
&& (be32_to_cpu(*addr
) == reg_val
))
645 static struct device_node
*cvm_oct_node_for_port(struct device_node
*pip
,
646 int interface
, int port
)
648 struct device_node
*ni
, *np
;
650 ni
= cvm_oct_of_get_child(pip
, interface
);
654 np
= cvm_oct_of_get_child(ni
, port
);
660 static void cvm_set_rgmii_delay(struct device_node
*np
, int iface
, int port
)
664 if (!of_property_read_u32(np
, "rx-delay", &delay_value
))
665 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port
, iface
), delay_value
);
666 if (!of_property_read_u32(np
, "tx-delay", &delay_value
))
667 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port
, iface
), delay_value
);
670 static int cvm_oct_probe(struct platform_device
*pdev
)
674 int fau
= FAU_NUM_PACKET_BUFFERS_TO_FREE
;
676 struct device_node
*pip
;
677 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
;
679 #if IS_ENABLED(CONFIG_VLAN_8021Q)
680 mtu_overhead
+= VLAN_HLEN
;
683 octeon_mdiobus_force_mod_depencency();
685 pip
= pdev
->dev
.of_node
;
687 pr_err("Error: No 'pip' in /aliases\n");
691 cvm_oct_configure_common_hw();
693 cvmx_helper_initialize_packet_io_global();
695 if (receive_group_order
) {
696 if (receive_group_order
> 4)
697 receive_group_order
= 4;
698 pow_receive_groups
= (1 << (1 << receive_group_order
)) - 1;
700 pow_receive_groups
= BIT(pow_receive_group
);
703 /* Change the input group for all ports before input is enabled */
704 num_interfaces
= cvmx_helper_get_number_of_interfaces();
705 for (interface
= 0; interface
< num_interfaces
; interface
++) {
706 int num_ports
= cvmx_helper_ports_on_interface(interface
);
709 for (port
= cvmx_helper_get_ipd_port(interface
, 0);
710 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
712 union cvmx_pip_prt_tagx pip_prt_tagx
;
715 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port
));
717 if (receive_group_order
) {
720 /* We support only 16 groups at the moment, so
721 * always disable the two additional "hidden"
722 * tag_mask bits on CN68XX.
724 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
725 pip_prt_tagx
.u64
|= 0x3ull
<< 44;
727 tag_mask
= ~((1 << receive_group_order
) - 1);
728 pip_prt_tagx
.s
.grptagbase
= 0;
729 pip_prt_tagx
.s
.grptagmask
= tag_mask
;
730 pip_prt_tagx
.s
.grptag
= 1;
731 pip_prt_tagx
.s
.tag_mode
= 0;
732 pip_prt_tagx
.s
.inc_prt_flag
= 1;
733 pip_prt_tagx
.s
.ip6_dprt_flag
= 1;
734 pip_prt_tagx
.s
.ip4_dprt_flag
= 1;
735 pip_prt_tagx
.s
.ip6_sprt_flag
= 1;
736 pip_prt_tagx
.s
.ip4_sprt_flag
= 1;
737 pip_prt_tagx
.s
.ip6_dst_flag
= 1;
738 pip_prt_tagx
.s
.ip4_dst_flag
= 1;
739 pip_prt_tagx
.s
.ip6_src_flag
= 1;
740 pip_prt_tagx
.s
.ip4_src_flag
= 1;
741 pip_prt_tagx
.s
.grp
= 0;
743 pip_prt_tagx
.s
.grptag
= 0;
744 pip_prt_tagx
.s
.grp
= pow_receive_group
;
747 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port
),
752 cvmx_helper_ipd_and_packet_input_enable();
754 memset(cvm_oct_device
, 0, sizeof(cvm_oct_device
));
757 * Initialize the FAU used for counting packet buffers that
760 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
762 /* Initialize the FAU used for counting tx SKBs that need to be freed */
763 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN
, 0);
765 if ((pow_send_group
!= -1)) {
766 struct net_device
*dev
;
768 dev
= alloc_etherdev(sizeof(struct octeon_ethernet
));
770 /* Initialize the device private structure. */
771 struct octeon_ethernet
*priv
= netdev_priv(dev
);
773 SET_NETDEV_DEV(dev
, &pdev
->dev
);
774 dev
->netdev_ops
= &cvm_oct_pow_netdev_ops
;
775 priv
->imode
= CVMX_HELPER_INTERFACE_MODE_DISABLED
;
776 priv
->port
= CVMX_PIP_NUM_INPUT_PORTS
;
778 strcpy(dev
->name
, "pow%d");
779 for (qos
= 0; qos
< 16; qos
++)
780 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
781 dev
->min_mtu
= VLAN_ETH_ZLEN
- mtu_overhead
;
782 dev
->max_mtu
= OCTEON_MAX_MTU
- mtu_overhead
;
784 if (register_netdev(dev
) < 0) {
785 pr_err("Failed to register ethernet device for POW\n");
788 cvm_oct_device
[CVMX_PIP_NUM_INPUT_PORTS
] = dev
;
789 pr_info("%s: POW send group %d, receive group %d\n",
790 dev
->name
, pow_send_group
,
794 pr_err("Failed to allocate ethernet device for POW\n");
798 num_interfaces
= cvmx_helper_get_number_of_interfaces();
799 for (interface
= 0; interface
< num_interfaces
; interface
++) {
800 cvmx_helper_interface_mode_t imode
=
801 cvmx_helper_interface_get_mode(interface
);
802 int num_ports
= cvmx_helper_ports_on_interface(interface
);
807 port
= cvmx_helper_get_ipd_port(interface
, 0);
808 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
809 port_index
++, port
++) {
810 struct octeon_ethernet
*priv
;
811 struct net_device
*dev
=
812 alloc_etherdev(sizeof(struct octeon_ethernet
));
814 pr_err("Failed to allocate ethernet device for port %d\n",
819 /* Initialize the device private structure. */
820 SET_NETDEV_DEV(dev
, &pdev
->dev
);
821 priv
= netdev_priv(dev
);
823 priv
->of_node
= cvm_oct_node_for_port(pip
, interface
,
826 INIT_DELAYED_WORK(&priv
->port_periodic_work
,
827 cvm_oct_periodic_worker
);
830 priv
->queue
= cvmx_pko_get_base_queue(priv
->port
);
831 priv
->fau
= fau
- cvmx_pko_get_num_queues(port
) * 4;
832 for (qos
= 0; qos
< 16; qos
++)
833 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
834 for (qos
= 0; qos
< cvmx_pko_get_num_queues(port
);
836 cvmx_fau_atomic_write32(priv
->fau
+ qos
* 4, 0);
837 dev
->min_mtu
= VLAN_ETH_ZLEN
- mtu_overhead
;
838 dev
->max_mtu
= OCTEON_MAX_MTU
- mtu_overhead
;
840 switch (priv
->imode
) {
841 /* These types don't support ports to IPD/PKO */
842 case CVMX_HELPER_INTERFACE_MODE_DISABLED
:
843 case CVMX_HELPER_INTERFACE_MODE_PCIE
:
844 case CVMX_HELPER_INTERFACE_MODE_PICMG
:
847 case CVMX_HELPER_INTERFACE_MODE_NPI
:
848 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
849 strcpy(dev
->name
, "npi%d");
852 case CVMX_HELPER_INTERFACE_MODE_XAUI
:
853 dev
->netdev_ops
= &cvm_oct_xaui_netdev_ops
;
854 strcpy(dev
->name
, "xaui%d");
857 case CVMX_HELPER_INTERFACE_MODE_LOOP
:
858 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
859 strcpy(dev
->name
, "loop%d");
862 case CVMX_HELPER_INTERFACE_MODE_SGMII
:
863 dev
->netdev_ops
= &cvm_oct_sgmii_netdev_ops
;
864 strcpy(dev
->name
, "eth%d");
867 case CVMX_HELPER_INTERFACE_MODE_SPI
:
868 dev
->netdev_ops
= &cvm_oct_spi_netdev_ops
;
869 strcpy(dev
->name
, "spi%d");
872 case CVMX_HELPER_INTERFACE_MODE_RGMII
:
873 case CVMX_HELPER_INTERFACE_MODE_GMII
:
874 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
875 strcpy(dev
->name
, "eth%d");
876 cvm_set_rgmii_delay(priv
->of_node
, interface
,
881 if (!dev
->netdev_ops
) {
883 } else if (register_netdev(dev
) < 0) {
884 pr_err("Failed to register ethernet device for interface %d, port %d\n",
885 interface
, priv
->port
);
888 cvm_oct_device
[priv
->port
] = dev
;
890 cvmx_pko_get_num_queues(priv
->port
) *
892 schedule_delayed_work(&priv
->port_periodic_work
,
898 cvm_oct_tx_initialize();
899 cvm_oct_rx_initialize();
902 * 150 uS: about 10 1500-byte packets at 1GE.
904 cvm_oct_tx_poll_interval
= 150 * (octeon_get_clock_rate() / 1000000);
906 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
911 static int cvm_oct_remove(struct platform_device
*pdev
)
917 atomic_inc_return(&cvm_oct_poll_queue_stopping
);
918 cancel_delayed_work_sync(&cvm_oct_rx_refill_work
);
920 cvm_oct_rx_shutdown();
921 cvm_oct_tx_shutdown();
925 /* Free the ethernet devices */
926 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
927 if (cvm_oct_device
[port
]) {
928 struct net_device
*dev
= cvm_oct_device
[port
];
929 struct octeon_ethernet
*priv
= netdev_priv(dev
);
931 cancel_delayed_work_sync(&priv
->port_periodic_work
);
933 cvm_oct_tx_shutdown_dev(dev
);
934 unregister_netdev(dev
);
936 cvm_oct_device
[port
] = NULL
;
944 /* Free the HW pools */
945 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
947 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
949 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
950 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
951 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
955 static const struct of_device_id cvm_oct_match
[] = {
957 .compatible
= "cavium,octeon-3860-pip",
961 MODULE_DEVICE_TABLE(of
, cvm_oct_match
);
963 static struct platform_driver cvm_oct_driver
= {
964 .probe
= cvm_oct_probe
,
965 .remove
= cvm_oct_remove
,
967 .name
= KBUILD_MODNAME
,
968 .of_match_table
= cvm_oct_match
,
972 module_platform_driver(cvm_oct_driver
);
974 MODULE_LICENSE("GPL");
975 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
976 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");