1 // SPDX-License-Identifier: GPL-2.0
3 * This file is based on code from OCTEON SDK by Cavium Networks.
5 * Copyright (c) 2003-2007 Cavium Networks
8 #include <linux/platform_device.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/phy.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
23 #include "octeon-ethernet.h"
24 #include "ethernet-defines.h"
25 #include "ethernet-mem.h"
26 #include "ethernet-rx.h"
27 #include "ethernet-tx.h"
28 #include "ethernet-mdio.h"
29 #include "ethernet-util.h"
31 #define OCTEON_MAX_MTU 65392
33 static int num_packet_buffers
= 1024;
34 module_param(num_packet_buffers
, int, 0444);
35 MODULE_PARM_DESC(num_packet_buffers
, "\n"
36 "\tNumber of packet buffers to allocate and store in the\n"
37 "\tFPA. By default, 1024 packet buffers are used.\n");
39 static int pow_receive_group
= 15;
40 module_param(pow_receive_group
, int, 0444);
41 MODULE_PARM_DESC(pow_receive_group
, "\n"
42 "\tPOW group to receive packets from. All ethernet hardware\n"
43 "\twill be configured to send incoming packets to this POW\n"
44 "\tgroup. Also any other software can submit packets to this\n"
45 "\tgroup for the kernel to process.");
47 static int receive_group_order
;
48 module_param(receive_group_order
, int, 0444);
49 MODULE_PARM_DESC(receive_group_order
, "\n"
50 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
51 "\twill be configured to send incoming packets to multiple POW\n"
52 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
53 "\tgroups are taken into use and groups are allocated starting\n"
54 "\tfrom 0. By default, a single group is used.\n");
56 int pow_send_group
= -1;
57 module_param(pow_send_group
, int, 0644);
58 MODULE_PARM_DESC(pow_send_group
, "\n"
59 "\tPOW group to send packets to other software on. This\n"
60 "\tcontrols the creation of the virtual device pow0.\n"
61 "\talways_use_pow also depends on this value.");
64 module_param(always_use_pow
, int, 0444);
65 MODULE_PARM_DESC(always_use_pow
, "\n"
66 "\tWhen set, always send to the pow group. This will cause\n"
67 "\tpackets sent to real ethernet devices to be sent to the\n"
68 "\tPOW group instead of the hardware. Unless some other\n"
69 "\tapplication changes the config, packets will still be\n"
70 "\treceived from the low level hardware. Use this option\n"
71 "\tto allow a CVMX app to intercept all packets from the\n"
72 "\tlinux kernel. You must specify pow_send_group along with\n"
75 char pow_send_list
[128] = "";
76 module_param_string(pow_send_list
, pow_send_list
, sizeof(pow_send_list
), 0444);
77 MODULE_PARM_DESC(pow_send_list
, "\n"
78 "\tComma separated list of ethernet devices that should use the\n"
79 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 "\tis a per port version of always_use_pow. always_use_pow takes\n"
81 "\tprecedence over this list. For example, setting this to\n"
82 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 "\tusing the pow_send_group.");
85 int rx_napi_weight
= 32;
86 module_param(rx_napi_weight
, int, 0444);
87 MODULE_PARM_DESC(rx_napi_weight
, "The NAPI WEIGHT parameter.");
89 /* Mask indicating which receive groups are in use. */
90 int pow_receive_groups
;
93 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
95 * Set to one right before cvm_oct_poll_queue is destroyed.
97 atomic_t cvm_oct_poll_queue_stopping
= ATOMIC_INIT(0);
100 * Array of every ethernet device owned by this driver indexed by
101 * the ipd input port number.
103 struct net_device
*cvm_oct_device
[TOTAL_NUMBER_OF_PORTS
];
105 u64 cvm_oct_tx_poll_interval
;
107 static void cvm_oct_rx_refill_worker(struct work_struct
*work
);
108 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work
, cvm_oct_rx_refill_worker
);
110 static void cvm_oct_rx_refill_worker(struct work_struct
*work
)
113 * FPA 0 may have been drained, try to refill it if we need
114 * more than num_packet_buffers / 2, otherwise normal receive
115 * processing will refill it. If it were drained, no packets
116 * could be received so cvm_oct_napi_poll would never be
117 * invoked to do the refill.
119 cvm_oct_rx_refill_pool(num_packet_buffers
/ 2);
121 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
122 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
125 static void cvm_oct_periodic_worker(struct work_struct
*work
)
127 struct octeon_ethernet
*priv
= container_of(work
,
128 struct octeon_ethernet
,
129 port_periodic_work
.work
);
132 priv
->poll(cvm_oct_device
[priv
->port
]);
134 cvm_oct_device
[priv
->port
]->netdev_ops
->ndo_get_stats
135 (cvm_oct_device
[priv
->port
]);
137 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
138 schedule_delayed_work(&priv
->port_periodic_work
, HZ
);
141 static void cvm_oct_configure_common_hw(void)
145 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
147 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
149 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
150 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
151 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 1024);
153 #ifdef __LITTLE_ENDIAN
155 union cvmx_ipd_ctl_status ipd_ctl_status
;
157 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
158 ipd_ctl_status
.s
.pkt_lend
= 1;
159 ipd_ctl_status
.s
.wqe_lend
= 1;
160 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
164 cvmx_helper_setup_red(num_packet_buffers
/ 4, num_packet_buffers
/ 8);
168 * cvm_oct_free_work- Free a work queue entry
170 * @work_queue_entry: Work queue entry to free
172 * Returns Zero on success, Negative on failure.
174 int cvm_oct_free_work(void *work_queue_entry
)
176 struct cvmx_wqe
*work
= work_queue_entry
;
178 int segments
= work
->word2
.s
.bufs
;
179 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
182 union cvmx_buf_ptr next_ptr
= *(union cvmx_buf_ptr
*)
183 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
184 if (unlikely(!segment_ptr
.s
.i
))
185 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr
),
187 CVMX_FPA_PACKET_POOL_SIZE
/ 128);
188 segment_ptr
= next_ptr
;
190 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, 1);
194 EXPORT_SYMBOL(cvm_oct_free_work
);
197 * cvm_oct_common_get_stats - get the low level ethernet statistics
198 * @dev: Device to get the statistics from
200 * Returns Pointer to the statistics
202 static struct net_device_stats
*cvm_oct_common_get_stats(struct net_device
*dev
)
204 cvmx_pip_port_status_t rx_status
;
205 cvmx_pko_port_status_t tx_status
;
206 struct octeon_ethernet
*priv
= netdev_priv(dev
);
208 if (priv
->port
< CVMX_PIP_NUM_INPUT_PORTS
) {
209 if (octeon_is_simulation()) {
210 /* The simulator doesn't support statistics */
211 memset(&rx_status
, 0, sizeof(rx_status
));
212 memset(&tx_status
, 0, sizeof(tx_status
));
214 cvmx_pip_get_port_status(priv
->port
, 1, &rx_status
);
215 cvmx_pko_get_port_status(priv
->port
, 1, &tx_status
);
218 dev
->stats
.rx_packets
+= rx_status
.inb_packets
;
219 dev
->stats
.tx_packets
+= tx_status
.packets
;
220 dev
->stats
.rx_bytes
+= rx_status
.inb_octets
;
221 dev
->stats
.tx_bytes
+= tx_status
.octets
;
222 dev
->stats
.multicast
+= rx_status
.multicast_packets
;
223 dev
->stats
.rx_crc_errors
+= rx_status
.inb_errors
;
224 dev
->stats
.rx_frame_errors
+= rx_status
.fcs_align_err_packets
;
225 dev
->stats
.rx_dropped
+= rx_status
.dropped_packets
;
232 * cvm_oct_common_change_mtu - change the link MTU
233 * @dev: Device to change
234 * @new_mtu: The new MTU
236 * Returns Zero on success
238 static int cvm_oct_common_change_mtu(struct net_device
*dev
, int new_mtu
)
240 struct octeon_ethernet
*priv
= netdev_priv(dev
);
241 int interface
= INTERFACE(priv
->port
);
242 #if IS_ENABLED(CONFIG_VLAN_8021Q)
243 int vlan_bytes
= VLAN_HLEN
;
247 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
+ vlan_bytes
;
251 if ((interface
< 2) &&
252 (cvmx_helper_interface_get_mode(interface
) !=
253 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
254 int index
= INDEX(priv
->port
);
255 /* Add ethernet header and FCS, and VLAN if configured. */
256 int max_packet
= new_mtu
+ mtu_overhead
;
258 if (OCTEON_IS_MODEL(OCTEON_CN3XXX
) ||
259 OCTEON_IS_MODEL(OCTEON_CN58XX
)) {
260 /* Signal errors on packets larger than the MTU */
261 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index
, interface
),
265 * Set the hardware to truncate packets larger
266 * than the MTU and smaller the 64 bytes.
268 union cvmx_pip_frm_len_chkx frm_len_chk
;
271 frm_len_chk
.s
.minlen
= VLAN_ETH_ZLEN
;
272 frm_len_chk
.s
.maxlen
= max_packet
;
273 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface
),
277 * Set the hardware to truncate packets larger than
278 * the MTU. The jabber register must be set to a
279 * multiple of 8 bytes, so round up.
281 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index
, interface
),
282 (max_packet
+ 7) & ~7u);
288 * cvm_oct_common_set_multicast_list - set the multicast list
289 * @dev: Device to work on
291 static void cvm_oct_common_set_multicast_list(struct net_device
*dev
)
293 union cvmx_gmxx_prtx_cfg gmx_cfg
;
294 struct octeon_ethernet
*priv
= netdev_priv(dev
);
295 int interface
= INTERFACE(priv
->port
);
297 if ((interface
< 2) &&
298 (cvmx_helper_interface_get_mode(interface
) !=
299 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
300 union cvmx_gmxx_rxx_adr_ctl control
;
301 int index
= INDEX(priv
->port
);
304 control
.s
.bcst
= 1; /* Allow broadcast MAC addresses */
306 if (!netdev_mc_empty(dev
) || (dev
->flags
& IFF_ALLMULTI
) ||
307 (dev
->flags
& IFF_PROMISC
))
308 /* Force accept multicast packets */
311 /* Force reject multicast packets */
314 if (dev
->flags
& IFF_PROMISC
)
316 * Reject matches if promisc. Since CAM is
317 * shut off, should accept everything.
319 control
.s
.cam_mode
= 0;
321 /* Filter packets based on the CAM */
322 control
.s
.cam_mode
= 1;
325 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
326 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
327 gmx_cfg
.u64
& ~1ull);
329 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index
, interface
),
331 if (dev
->flags
& IFF_PROMISC
)
332 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
333 (index
, interface
), 0);
335 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
336 (index
, interface
), 1);
338 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
343 static int cvm_oct_set_mac_filter(struct net_device
*dev
)
345 struct octeon_ethernet
*priv
= netdev_priv(dev
);
346 union cvmx_gmxx_prtx_cfg gmx_cfg
;
347 int interface
= INTERFACE(priv
->port
);
349 if ((interface
< 2) &&
350 (cvmx_helper_interface_get_mode(interface
) !=
351 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
353 const u8
*ptr
= dev
->dev_addr
;
355 int index
= INDEX(priv
->port
);
357 for (i
= 0; i
< 6; i
++)
358 mac
= (mac
<< 8) | (u64
)ptr
[i
];
361 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
363 gmx_cfg
.u64
& ~1ull);
365 cvmx_write_csr(CVMX_GMXX_SMACX(index
, interface
), mac
);
366 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index
, interface
),
368 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index
, interface
),
370 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index
, interface
),
372 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index
, interface
),
374 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index
, interface
),
376 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index
, interface
),
378 cvm_oct_common_set_multicast_list(dev
);
379 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
386 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
387 * @dev: The device in question.
388 * @addr: Socket address.
390 * Returns Zero on success
392 static int cvm_oct_common_set_mac_address(struct net_device
*dev
, void *addr
)
394 int r
= eth_mac_addr(dev
, addr
);
398 return cvm_oct_set_mac_filter(dev
);
402 * cvm_oct_common_init - per network device initialization
403 * @dev: Device to initialize
405 * Returns Zero on success
407 int cvm_oct_common_init(struct net_device
*dev
)
409 struct octeon_ethernet
*priv
= netdev_priv(dev
);
412 ret
= of_get_ethdev_address(priv
->of_node
, dev
);
414 eth_hw_addr_random(dev
);
417 * Force the interface to use the POW send if always_use_pow
418 * was specified or it is in the pow send list.
420 if ((pow_send_group
!= -1) &&
421 (always_use_pow
|| strstr(pow_send_list
, dev
->name
)))
424 if (priv
->queue
!= -1)
425 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
427 /* We do our own locking, Linux doesn't need to */
429 dev
->ethtool_ops
= &cvm_oct_ethtool_ops
;
431 cvm_oct_set_mac_filter(dev
);
432 dev_set_mtu(dev
, dev
->mtu
);
435 * Zero out stats for port so we won't mistakenly show
436 * counters from the bootloader.
438 memset(dev
->netdev_ops
->ndo_get_stats(dev
), 0,
439 sizeof(struct net_device_stats
));
441 if (dev
->netdev_ops
->ndo_stop
)
442 dev
->netdev_ops
->ndo_stop(dev
);
447 void cvm_oct_common_uninit(struct net_device
*dev
)
450 phy_disconnect(dev
->phydev
);
453 int cvm_oct_common_open(struct net_device
*dev
,
454 void (*link_poll
)(struct net_device
*))
456 union cvmx_gmxx_prtx_cfg gmx_cfg
;
457 struct octeon_ethernet
*priv
= netdev_priv(dev
);
458 int interface
= INTERFACE(priv
->port
);
459 int index
= INDEX(priv
->port
);
460 union cvmx_helper_link_info link_info
;
463 rv
= cvm_oct_phy_setup_device(dev
);
467 gmx_cfg
.u64
= cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
469 if (octeon_has_feature(OCTEON_FEATURE_PKND
))
470 gmx_cfg
.s
.pknd
= priv
->port
;
471 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
), gmx_cfg
.u64
);
473 if (octeon_is_simulation())
477 int r
= phy_read_status(dev
->phydev
);
479 if (r
== 0 && dev
->phydev
->link
== 0)
480 netif_carrier_off(dev
);
481 cvm_oct_adjust_link(dev
);
483 link_info
= cvmx_helper_link_get(priv
->port
);
484 if (!link_info
.s
.link_up
)
485 netif_carrier_off(dev
);
486 priv
->poll
= link_poll
;
493 void cvm_oct_link_poll(struct net_device
*dev
)
495 struct octeon_ethernet
*priv
= netdev_priv(dev
);
496 union cvmx_helper_link_info link_info
;
498 link_info
= cvmx_helper_link_get(priv
->port
);
499 if (link_info
.u64
== priv
->link_info
)
502 if (cvmx_helper_link_set(priv
->port
, link_info
))
503 link_info
.u64
= priv
->link_info
;
505 priv
->link_info
= link_info
.u64
;
507 if (link_info
.s
.link_up
) {
508 if (!netif_carrier_ok(dev
))
509 netif_carrier_on(dev
);
510 } else if (netif_carrier_ok(dev
)) {
511 netif_carrier_off(dev
);
513 cvm_oct_note_carrier(priv
, link_info
);
516 static int cvm_oct_xaui_open(struct net_device
*dev
)
518 return cvm_oct_common_open(dev
, cvm_oct_link_poll
);
521 static const struct net_device_ops cvm_oct_npi_netdev_ops
= {
522 .ndo_init
= cvm_oct_common_init
,
523 .ndo_uninit
= cvm_oct_common_uninit
,
524 .ndo_start_xmit
= cvm_oct_xmit
,
525 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
526 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
527 .ndo_eth_ioctl
= cvm_oct_ioctl
,
528 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
529 .ndo_get_stats
= cvm_oct_common_get_stats
,
530 #ifdef CONFIG_NET_POLL_CONTROLLER
531 .ndo_poll_controller
= cvm_oct_poll_controller
,
535 static const struct net_device_ops cvm_oct_xaui_netdev_ops
= {
536 .ndo_init
= cvm_oct_common_init
,
537 .ndo_uninit
= cvm_oct_common_uninit
,
538 .ndo_open
= cvm_oct_xaui_open
,
539 .ndo_stop
= cvm_oct_common_stop
,
540 .ndo_start_xmit
= cvm_oct_xmit
,
541 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
542 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
543 .ndo_eth_ioctl
= cvm_oct_ioctl
,
544 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
545 .ndo_get_stats
= cvm_oct_common_get_stats
,
546 #ifdef CONFIG_NET_POLL_CONTROLLER
547 .ndo_poll_controller
= cvm_oct_poll_controller
,
551 static const struct net_device_ops cvm_oct_sgmii_netdev_ops
= {
552 .ndo_init
= cvm_oct_sgmii_init
,
553 .ndo_uninit
= cvm_oct_common_uninit
,
554 .ndo_open
= cvm_oct_sgmii_open
,
555 .ndo_stop
= cvm_oct_common_stop
,
556 .ndo_start_xmit
= cvm_oct_xmit
,
557 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
558 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
559 .ndo_eth_ioctl
= cvm_oct_ioctl
,
560 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
561 .ndo_get_stats
= cvm_oct_common_get_stats
,
562 #ifdef CONFIG_NET_POLL_CONTROLLER
563 .ndo_poll_controller
= cvm_oct_poll_controller
,
567 static const struct net_device_ops cvm_oct_spi_netdev_ops
= {
568 .ndo_init
= cvm_oct_spi_init
,
569 .ndo_uninit
= cvm_oct_spi_uninit
,
570 .ndo_start_xmit
= cvm_oct_xmit
,
571 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
572 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
573 .ndo_eth_ioctl
= cvm_oct_ioctl
,
574 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
575 .ndo_get_stats
= cvm_oct_common_get_stats
,
576 #ifdef CONFIG_NET_POLL_CONTROLLER
577 .ndo_poll_controller
= cvm_oct_poll_controller
,
581 static const struct net_device_ops cvm_oct_rgmii_netdev_ops
= {
582 .ndo_init
= cvm_oct_common_init
,
583 .ndo_uninit
= cvm_oct_common_uninit
,
584 .ndo_open
= cvm_oct_rgmii_open
,
585 .ndo_stop
= cvm_oct_common_stop
,
586 .ndo_start_xmit
= cvm_oct_xmit
,
587 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
588 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
589 .ndo_eth_ioctl
= cvm_oct_ioctl
,
590 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
591 .ndo_get_stats
= cvm_oct_common_get_stats
,
592 #ifdef CONFIG_NET_POLL_CONTROLLER
593 .ndo_poll_controller
= cvm_oct_poll_controller
,
597 static const struct net_device_ops cvm_oct_pow_netdev_ops
= {
598 .ndo_init
= cvm_oct_common_init
,
599 .ndo_start_xmit
= cvm_oct_xmit_pow
,
600 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
601 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
602 .ndo_eth_ioctl
= cvm_oct_ioctl
,
603 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
604 .ndo_get_stats
= cvm_oct_common_get_stats
,
605 #ifdef CONFIG_NET_POLL_CONTROLLER
606 .ndo_poll_controller
= cvm_oct_poll_controller
,
610 static struct device_node
*cvm_oct_of_get_child
611 (const struct device_node
*parent
, int reg_val
)
613 struct device_node
*node
;
617 for_each_child_of_node(parent
, node
) {
618 addr
= of_get_property(node
, "reg", &size
);
619 if (addr
&& (be32_to_cpu(*addr
) == reg_val
))
625 static struct device_node
*cvm_oct_node_for_port(struct device_node
*pip
,
626 int interface
, int port
)
628 struct device_node
*ni
, *np
;
630 ni
= cvm_oct_of_get_child(pip
, interface
);
634 np
= cvm_oct_of_get_child(ni
, port
);
640 static void cvm_set_rgmii_delay(struct octeon_ethernet
*priv
, int iface
,
643 struct device_node
*np
= priv
->of_node
;
648 /* By default, both RX/TX delay is enabled in
649 * __cvmx_helper_rgmii_enable().
654 if (!of_property_read_u32(np
, "rx-delay", &delay_value
)) {
655 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port
, iface
), delay_value
);
656 rx_delay
= delay_value
> 0;
658 if (!of_property_read_u32(np
, "tx-delay", &delay_value
)) {
659 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port
, iface
), delay_value
);
660 tx_delay
= delay_value
> 0;
663 if (!rx_delay
&& !tx_delay
)
664 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII_ID
;
666 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII_RXID
;
668 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII_TXID
;
670 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII
;
673 static int cvm_oct_probe(struct platform_device
*pdev
)
677 int fau
= FAU_NUM_PACKET_BUFFERS_TO_FREE
;
679 struct device_node
*pip
;
680 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
;
682 #if IS_ENABLED(CONFIG_VLAN_8021Q)
683 mtu_overhead
+= VLAN_HLEN
;
686 pip
= pdev
->dev
.of_node
;
688 pr_err("Error: No 'pip' in /aliases\n");
692 cvm_oct_configure_common_hw();
694 cvmx_helper_initialize_packet_io_global();
696 if (receive_group_order
) {
697 if (receive_group_order
> 4)
698 receive_group_order
= 4;
699 pow_receive_groups
= (1 << (1 << receive_group_order
)) - 1;
701 pow_receive_groups
= BIT(pow_receive_group
);
704 /* Change the input group for all ports before input is enabled */
705 num_interfaces
= cvmx_helper_get_number_of_interfaces();
706 for (interface
= 0; interface
< num_interfaces
; interface
++) {
707 int num_ports
= cvmx_helper_ports_on_interface(interface
);
710 for (port
= cvmx_helper_get_ipd_port(interface
, 0);
711 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
713 union cvmx_pip_prt_tagx pip_prt_tagx
;
716 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port
));
718 if (receive_group_order
) {
721 /* We support only 16 groups at the moment, so
722 * always disable the two additional "hidden"
723 * tag_mask bits on CN68XX.
725 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
726 pip_prt_tagx
.u64
|= 0x3ull
<< 44;
728 tag_mask
= ~((1 << receive_group_order
) - 1);
729 pip_prt_tagx
.s
.grptagbase
= 0;
730 pip_prt_tagx
.s
.grptagmask
= tag_mask
;
731 pip_prt_tagx
.s
.grptag
= 1;
732 pip_prt_tagx
.s
.tag_mode
= 0;
733 pip_prt_tagx
.s
.inc_prt_flag
= 1;
734 pip_prt_tagx
.s
.ip6_dprt_flag
= 1;
735 pip_prt_tagx
.s
.ip4_dprt_flag
= 1;
736 pip_prt_tagx
.s
.ip6_sprt_flag
= 1;
737 pip_prt_tagx
.s
.ip4_sprt_flag
= 1;
738 pip_prt_tagx
.s
.ip6_dst_flag
= 1;
739 pip_prt_tagx
.s
.ip4_dst_flag
= 1;
740 pip_prt_tagx
.s
.ip6_src_flag
= 1;
741 pip_prt_tagx
.s
.ip4_src_flag
= 1;
742 pip_prt_tagx
.s
.grp
= 0;
744 pip_prt_tagx
.s
.grptag
= 0;
745 pip_prt_tagx
.s
.grp
= pow_receive_group
;
748 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port
),
753 cvmx_helper_ipd_and_packet_input_enable();
755 memset(cvm_oct_device
, 0, sizeof(cvm_oct_device
));
758 * Initialize the FAU used for counting packet buffers that
761 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
763 /* Initialize the FAU used for counting tx SKBs that need to be freed */
764 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN
, 0);
766 if ((pow_send_group
!= -1)) {
767 struct net_device
*dev
;
769 dev
= alloc_etherdev(sizeof(struct octeon_ethernet
));
771 /* Initialize the device private structure. */
772 struct octeon_ethernet
*priv
= netdev_priv(dev
);
774 SET_NETDEV_DEV(dev
, &pdev
->dev
);
775 dev
->netdev_ops
= &cvm_oct_pow_netdev_ops
;
776 priv
->imode
= CVMX_HELPER_INTERFACE_MODE_DISABLED
;
777 priv
->port
= CVMX_PIP_NUM_INPUT_PORTS
;
779 strscpy(dev
->name
, "pow%d", sizeof(dev
->name
));
780 for (qos
= 0; qos
< 16; qos
++)
781 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
782 dev
->min_mtu
= VLAN_ETH_ZLEN
- mtu_overhead
;
783 dev
->max_mtu
= OCTEON_MAX_MTU
- mtu_overhead
;
785 if (register_netdev(dev
) < 0) {
786 pr_err("Failed to register ethernet device for POW\n");
789 cvm_oct_device
[CVMX_PIP_NUM_INPUT_PORTS
] = dev
;
790 pr_info("%s: POW send group %d, receive group %d\n",
791 dev
->name
, pow_send_group
,
795 pr_err("Failed to allocate ethernet device for POW\n");
799 num_interfaces
= cvmx_helper_get_number_of_interfaces();
800 for (interface
= 0; interface
< num_interfaces
; interface
++) {
801 cvmx_helper_interface_mode_t imode
=
802 cvmx_helper_interface_get_mode(interface
);
803 int num_ports
= cvmx_helper_ports_on_interface(interface
);
808 port
= cvmx_helper_get_ipd_port(interface
, 0);
809 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
810 port_index
++, port
++) {
811 struct octeon_ethernet
*priv
;
812 struct net_device
*dev
=
813 alloc_etherdev(sizeof(struct octeon_ethernet
));
815 pr_err("Failed to allocate ethernet device for port %d\n",
820 /* Initialize the device private structure. */
821 SET_NETDEV_DEV(dev
, &pdev
->dev
);
822 priv
= netdev_priv(dev
);
824 priv
->of_node
= cvm_oct_node_for_port(pip
, interface
,
827 INIT_DELAYED_WORK(&priv
->port_periodic_work
,
828 cvm_oct_periodic_worker
);
831 priv
->queue
= cvmx_pko_get_base_queue(priv
->port
);
832 priv
->fau
= fau
- cvmx_pko_get_num_queues(port
) * 4;
833 priv
->phy_mode
= PHY_INTERFACE_MODE_NA
;
834 for (qos
= 0; qos
< 16; qos
++)
835 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
836 for (qos
= 0; qos
< cvmx_pko_get_num_queues(port
);
838 cvmx_fau_atomic_write32(priv
->fau
+ qos
* 4, 0);
839 dev
->min_mtu
= VLAN_ETH_ZLEN
- mtu_overhead
;
840 dev
->max_mtu
= OCTEON_MAX_MTU
- mtu_overhead
;
842 switch (priv
->imode
) {
843 /* These types don't support ports to IPD/PKO */
844 case CVMX_HELPER_INTERFACE_MODE_DISABLED
:
845 case CVMX_HELPER_INTERFACE_MODE_PCIE
:
846 case CVMX_HELPER_INTERFACE_MODE_PICMG
:
849 case CVMX_HELPER_INTERFACE_MODE_NPI
:
850 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
851 strscpy(dev
->name
, "npi%d", sizeof(dev
->name
));
854 case CVMX_HELPER_INTERFACE_MODE_XAUI
:
855 dev
->netdev_ops
= &cvm_oct_xaui_netdev_ops
;
856 strscpy(dev
->name
, "xaui%d", sizeof(dev
->name
));
859 case CVMX_HELPER_INTERFACE_MODE_LOOP
:
860 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
861 strscpy(dev
->name
, "loop%d", sizeof(dev
->name
));
864 case CVMX_HELPER_INTERFACE_MODE_SGMII
:
865 priv
->phy_mode
= PHY_INTERFACE_MODE_SGMII
;
866 dev
->netdev_ops
= &cvm_oct_sgmii_netdev_ops
;
867 strscpy(dev
->name
, "eth%d", sizeof(dev
->name
));
870 case CVMX_HELPER_INTERFACE_MODE_SPI
:
871 dev
->netdev_ops
= &cvm_oct_spi_netdev_ops
;
872 strscpy(dev
->name
, "spi%d", sizeof(dev
->name
));
875 case CVMX_HELPER_INTERFACE_MODE_GMII
:
876 priv
->phy_mode
= PHY_INTERFACE_MODE_GMII
;
877 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
878 strscpy(dev
->name
, "eth%d", sizeof(dev
->name
));
881 case CVMX_HELPER_INTERFACE_MODE_RGMII
:
882 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
883 strscpy(dev
->name
, "eth%d", sizeof(dev
->name
));
884 cvm_set_rgmii_delay(priv
, interface
,
889 if (priv
->of_node
&& of_phy_is_fixed_link(priv
->of_node
)) {
890 if (of_phy_register_fixed_link(priv
->of_node
)) {
891 netdev_err(dev
, "Failed to register fixed link for interface %d, port %d\n",
892 interface
, priv
->port
);
893 dev
->netdev_ops
= NULL
;
897 if (!dev
->netdev_ops
) {
899 } else if (register_netdev(dev
) < 0) {
900 pr_err("Failed to register ethernet device for interface %d, port %d\n",
901 interface
, priv
->port
);
904 cvm_oct_device
[priv
->port
] = dev
;
906 cvmx_pko_get_num_queues(priv
->port
) *
908 schedule_delayed_work(&priv
->port_periodic_work
,
914 cvm_oct_tx_initialize();
915 cvm_oct_rx_initialize();
918 * 150 uS: about 10 1500-byte packets at 1GE.
920 cvm_oct_tx_poll_interval
= 150 * (octeon_get_clock_rate() / 1000000);
922 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
927 static void cvm_oct_remove(struct platform_device
*pdev
)
933 atomic_inc_return(&cvm_oct_poll_queue_stopping
);
934 cancel_delayed_work_sync(&cvm_oct_rx_refill_work
);
936 cvm_oct_rx_shutdown();
937 cvm_oct_tx_shutdown();
941 /* Free the ethernet devices */
942 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
943 if (cvm_oct_device
[port
]) {
944 struct net_device
*dev
= cvm_oct_device
[port
];
945 struct octeon_ethernet
*priv
= netdev_priv(dev
);
947 cancel_delayed_work_sync(&priv
->port_periodic_work
);
949 cvm_oct_tx_shutdown_dev(dev
);
950 unregister_netdev(dev
);
952 cvm_oct_device
[port
] = NULL
;
960 /* Free the HW pools */
961 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
963 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
965 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
966 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
967 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
970 static const struct of_device_id cvm_oct_match
[] = {
972 .compatible
= "cavium,octeon-3860-pip",
976 MODULE_DEVICE_TABLE(of
, cvm_oct_match
);
978 static struct platform_driver cvm_oct_driver
= {
979 .probe
= cvm_oct_probe
,
980 .remove_new
= cvm_oct_remove
,
982 .name
= KBUILD_MODNAME
,
983 .of_match_table
= cvm_oct_match
,
987 module_platform_driver(cvm_oct_driver
);
989 MODULE_SOFTDEP("pre: mdio-cavium");
990 MODULE_LICENSE("GPL");
991 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
992 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");