1 // SPDX-License-Identifier: GPL-2.0
3 * This file is based on code from OCTEON SDK by Cavium Networks.
5 * Copyright (c) 2003-2007 Cavium Networks
8 #include <linux/platform_device.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/phy.h>
14 #include <linux/slab.h>
15 #include <linux/interrupt.h>
16 #include <linux/of_net.h>
17 #include <linux/if_ether.h>
18 #include <linux/if_vlan.h>
22 #include "octeon-ethernet.h"
23 #include "ethernet-defines.h"
24 #include "ethernet-mem.h"
25 #include "ethernet-rx.h"
26 #include "ethernet-tx.h"
27 #include "ethernet-mdio.h"
28 #include "ethernet-util.h"
30 #define OCTEON_MAX_MTU 65392
32 static int num_packet_buffers
= 1024;
33 module_param(num_packet_buffers
, int, 0444);
34 MODULE_PARM_DESC(num_packet_buffers
, "\n"
35 "\tNumber of packet buffers to allocate and store in the\n"
36 "\tFPA. By default, 1024 packet buffers are used.\n");
38 static int pow_receive_group
= 15;
39 module_param(pow_receive_group
, int, 0444);
40 MODULE_PARM_DESC(pow_receive_group
, "\n"
41 "\tPOW group to receive packets from. All ethernet hardware\n"
42 "\twill be configured to send incoming packets to this POW\n"
43 "\tgroup. Also any other software can submit packets to this\n"
44 "\tgroup for the kernel to process.");
46 static int receive_group_order
;
47 module_param(receive_group_order
, int, 0444);
48 MODULE_PARM_DESC(receive_group_order
, "\n"
49 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
50 "\twill be configured to send incoming packets to multiple POW\n"
51 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
52 "\tgroups are taken into use and groups are allocated starting\n"
53 "\tfrom 0. By default, a single group is used.\n");
55 int pow_send_group
= -1;
56 module_param(pow_send_group
, int, 0644);
57 MODULE_PARM_DESC(pow_send_group
, "\n"
58 "\tPOW group to send packets to other software on. This\n"
59 "\tcontrols the creation of the virtual device pow0.\n"
60 "\talways_use_pow also depends on this value.");
63 module_param(always_use_pow
, int, 0444);
64 MODULE_PARM_DESC(always_use_pow
, "\n"
65 "\tWhen set, always send to the pow group. This will cause\n"
66 "\tpackets sent to real ethernet devices to be sent to the\n"
67 "\tPOW group instead of the hardware. Unless some other\n"
68 "\tapplication changes the config, packets will still be\n"
69 "\treceived from the low level hardware. Use this option\n"
70 "\tto allow a CVMX app to intercept all packets from the\n"
71 "\tlinux kernel. You must specify pow_send_group along with\n"
74 char pow_send_list
[128] = "";
75 module_param_string(pow_send_list
, pow_send_list
, sizeof(pow_send_list
), 0444);
76 MODULE_PARM_DESC(pow_send_list
, "\n"
77 "\tComma separated list of ethernet devices that should use the\n"
78 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
79 "\tis a per port version of always_use_pow. always_use_pow takes\n"
80 "\tprecedence over this list. For example, setting this to\n"
81 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
82 "\tusing the pow_send_group.");
84 int rx_napi_weight
= 32;
85 module_param(rx_napi_weight
, int, 0444);
86 MODULE_PARM_DESC(rx_napi_weight
, "The NAPI WEIGHT parameter.");
88 /* Mask indicating which receive groups are in use. */
89 int pow_receive_groups
;
92 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
94 * Set to one right before cvm_oct_poll_queue is destroyed.
96 atomic_t cvm_oct_poll_queue_stopping
= ATOMIC_INIT(0);
99 * Array of every ethernet device owned by this driver indexed by
100 * the ipd input port number.
102 struct net_device
*cvm_oct_device
[TOTAL_NUMBER_OF_PORTS
];
104 u64 cvm_oct_tx_poll_interval
;
106 static void cvm_oct_rx_refill_worker(struct work_struct
*work
);
107 static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work
, cvm_oct_rx_refill_worker
);
109 static void cvm_oct_rx_refill_worker(struct work_struct
*work
)
112 * FPA 0 may have been drained, try to refill it if we need
113 * more than num_packet_buffers / 2, otherwise normal receive
114 * processing will refill it. If it were drained, no packets
115 * could be received so cvm_oct_napi_poll would never be
116 * invoked to do the refill.
118 cvm_oct_rx_refill_pool(num_packet_buffers
/ 2);
120 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
121 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
124 static void cvm_oct_periodic_worker(struct work_struct
*work
)
126 struct octeon_ethernet
*priv
= container_of(work
,
127 struct octeon_ethernet
,
128 port_periodic_work
.work
);
131 priv
->poll(cvm_oct_device
[priv
->port
]);
133 cvm_oct_device
[priv
->port
]->netdev_ops
->ndo_get_stats
134 (cvm_oct_device
[priv
->port
]);
136 if (!atomic_read(&cvm_oct_poll_queue_stopping
))
137 schedule_delayed_work(&priv
->port_periodic_work
, HZ
);
140 static void cvm_oct_configure_common_hw(void)
144 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
146 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
148 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
149 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
150 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 1024);
152 #ifdef __LITTLE_ENDIAN
154 union cvmx_ipd_ctl_status ipd_ctl_status
;
156 ipd_ctl_status
.u64
= cvmx_read_csr(CVMX_IPD_CTL_STATUS
);
157 ipd_ctl_status
.s
.pkt_lend
= 1;
158 ipd_ctl_status
.s
.wqe_lend
= 1;
159 cvmx_write_csr(CVMX_IPD_CTL_STATUS
, ipd_ctl_status
.u64
);
163 cvmx_helper_setup_red(num_packet_buffers
/ 4, num_packet_buffers
/ 8);
167 * cvm_oct_free_work- Free a work queue entry
169 * @work_queue_entry: Work queue entry to free
171 * Returns Zero on success, Negative on failure.
173 int cvm_oct_free_work(void *work_queue_entry
)
175 struct cvmx_wqe
*work
= work_queue_entry
;
177 int segments
= work
->word2
.s
.bufs
;
178 union cvmx_buf_ptr segment_ptr
= work
->packet_ptr
;
181 union cvmx_buf_ptr next_ptr
= *(union cvmx_buf_ptr
*)
182 cvmx_phys_to_ptr(segment_ptr
.s
.addr
- 8);
183 if (unlikely(!segment_ptr
.s
.i
))
184 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr
),
186 CVMX_FPA_PACKET_POOL_SIZE
/ 128);
187 segment_ptr
= next_ptr
;
189 cvmx_fpa_free(work
, CVMX_FPA_WQE_POOL
, 1);
193 EXPORT_SYMBOL(cvm_oct_free_work
);
196 * cvm_oct_common_get_stats - get the low level ethernet statistics
197 * @dev: Device to get the statistics from
199 * Returns Pointer to the statistics
201 static struct net_device_stats
*cvm_oct_common_get_stats(struct net_device
*dev
)
203 cvmx_pip_port_status_t rx_status
;
204 cvmx_pko_port_status_t tx_status
;
205 struct octeon_ethernet
*priv
= netdev_priv(dev
);
207 if (priv
->port
< CVMX_PIP_NUM_INPUT_PORTS
) {
208 if (octeon_is_simulation()) {
209 /* The simulator doesn't support statistics */
210 memset(&rx_status
, 0, sizeof(rx_status
));
211 memset(&tx_status
, 0, sizeof(tx_status
));
213 cvmx_pip_get_port_status(priv
->port
, 1, &rx_status
);
214 cvmx_pko_get_port_status(priv
->port
, 1, &tx_status
);
217 dev
->stats
.rx_packets
+= rx_status
.inb_packets
;
218 dev
->stats
.tx_packets
+= tx_status
.packets
;
219 dev
->stats
.rx_bytes
+= rx_status
.inb_octets
;
220 dev
->stats
.tx_bytes
+= tx_status
.octets
;
221 dev
->stats
.multicast
+= rx_status
.multicast_packets
;
222 dev
->stats
.rx_crc_errors
+= rx_status
.inb_errors
;
223 dev
->stats
.rx_frame_errors
+= rx_status
.fcs_align_err_packets
;
224 dev
->stats
.rx_dropped
+= rx_status
.dropped_packets
;
231 * cvm_oct_common_change_mtu - change the link MTU
232 * @dev: Device to change
233 * @new_mtu: The new MTU
235 * Returns Zero on success
237 static int cvm_oct_common_change_mtu(struct net_device
*dev
, int new_mtu
)
239 struct octeon_ethernet
*priv
= netdev_priv(dev
);
240 int interface
= INTERFACE(priv
->port
);
241 #if IS_ENABLED(CONFIG_VLAN_8021Q)
242 int vlan_bytes
= VLAN_HLEN
;
246 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
+ vlan_bytes
;
250 if ((interface
< 2) &&
251 (cvmx_helper_interface_get_mode(interface
) !=
252 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
253 int index
= INDEX(priv
->port
);
254 /* Add ethernet header and FCS, and VLAN if configured. */
255 int max_packet
= new_mtu
+ mtu_overhead
;
257 if (OCTEON_IS_MODEL(OCTEON_CN3XXX
) ||
258 OCTEON_IS_MODEL(OCTEON_CN58XX
)) {
259 /* Signal errors on packets larger than the MTU */
260 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index
, interface
),
264 * Set the hardware to truncate packets larger
265 * than the MTU and smaller the 64 bytes.
267 union cvmx_pip_frm_len_chkx frm_len_chk
;
270 frm_len_chk
.s
.minlen
= VLAN_ETH_ZLEN
;
271 frm_len_chk
.s
.maxlen
= max_packet
;
272 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface
),
276 * Set the hardware to truncate packets larger than
277 * the MTU. The jabber register must be set to a
278 * multiple of 8 bytes, so round up.
280 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index
, interface
),
281 (max_packet
+ 7) & ~7u);
287 * cvm_oct_common_set_multicast_list - set the multicast list
288 * @dev: Device to work on
290 static void cvm_oct_common_set_multicast_list(struct net_device
*dev
)
292 union cvmx_gmxx_prtx_cfg gmx_cfg
;
293 struct octeon_ethernet
*priv
= netdev_priv(dev
);
294 int interface
= INTERFACE(priv
->port
);
296 if ((interface
< 2) &&
297 (cvmx_helper_interface_get_mode(interface
) !=
298 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
299 union cvmx_gmxx_rxx_adr_ctl control
;
300 int index
= INDEX(priv
->port
);
303 control
.s
.bcst
= 1; /* Allow broadcast MAC addresses */
305 if (!netdev_mc_empty(dev
) || (dev
->flags
& IFF_ALLMULTI
) ||
306 (dev
->flags
& IFF_PROMISC
))
307 /* Force accept multicast packets */
310 /* Force reject multicast packets */
313 if (dev
->flags
& IFF_PROMISC
)
315 * Reject matches if promisc. Since CAM is
316 * shut off, should accept everything.
318 control
.s
.cam_mode
= 0;
320 /* Filter packets based on the CAM */
321 control
.s
.cam_mode
= 1;
324 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
325 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
326 gmx_cfg
.u64
& ~1ull);
328 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index
, interface
),
330 if (dev
->flags
& IFF_PROMISC
)
331 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
332 (index
, interface
), 0);
334 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
335 (index
, interface
), 1);
337 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
342 static int cvm_oct_set_mac_filter(struct net_device
*dev
)
344 struct octeon_ethernet
*priv
= netdev_priv(dev
);
345 union cvmx_gmxx_prtx_cfg gmx_cfg
;
346 int interface
= INTERFACE(priv
->port
);
348 if ((interface
< 2) &&
349 (cvmx_helper_interface_get_mode(interface
) !=
350 CVMX_HELPER_INTERFACE_MODE_SPI
)) {
352 u8
*ptr
= dev
->dev_addr
;
354 int index
= INDEX(priv
->port
);
356 for (i
= 0; i
< 6; i
++)
357 mac
= (mac
<< 8) | (u64
)ptr
[i
];
360 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
361 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
362 gmx_cfg
.u64
& ~1ull);
364 cvmx_write_csr(CVMX_GMXX_SMACX(index
, interface
), mac
);
365 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index
, interface
),
367 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index
, interface
),
369 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index
, interface
),
371 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index
, interface
),
373 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index
, interface
),
375 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index
, interface
),
377 cvm_oct_common_set_multicast_list(dev
);
378 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
),
385 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
386 * @dev: The device in question.
387 * @addr: Socket address.
389 * Returns Zero on success
391 static int cvm_oct_common_set_mac_address(struct net_device
*dev
, void *addr
)
393 int r
= eth_mac_addr(dev
, addr
);
397 return cvm_oct_set_mac_filter(dev
);
401 * cvm_oct_common_init - per network device initialization
402 * @dev: Device to initialize
404 * Returns Zero on success
406 int cvm_oct_common_init(struct net_device
*dev
)
408 struct octeon_ethernet
*priv
= netdev_priv(dev
);
409 const u8
*mac
= NULL
;
412 mac
= of_get_mac_address(priv
->of_node
);
414 if (!IS_ERR_OR_NULL(mac
))
415 ether_addr_copy(dev
->dev_addr
, mac
);
417 eth_hw_addr_random(dev
);
420 * Force the interface to use the POW send if always_use_pow
421 * was specified or it is in the pow send list.
423 if ((pow_send_group
!= -1) &&
424 (always_use_pow
|| strstr(pow_send_list
, dev
->name
)))
427 if (priv
->queue
!= -1)
428 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
430 /* We do our own locking, Linux doesn't need to */
431 dev
->features
|= NETIF_F_LLTX
;
432 dev
->ethtool_ops
= &cvm_oct_ethtool_ops
;
434 cvm_oct_set_mac_filter(dev
);
435 dev_set_mtu(dev
, dev
->mtu
);
438 * Zero out stats for port so we won't mistakenly show
439 * counters from the bootloader.
441 memset(dev
->netdev_ops
->ndo_get_stats(dev
), 0,
442 sizeof(struct net_device_stats
));
444 if (dev
->netdev_ops
->ndo_stop
)
445 dev
->netdev_ops
->ndo_stop(dev
);
450 void cvm_oct_common_uninit(struct net_device
*dev
)
453 phy_disconnect(dev
->phydev
);
456 int cvm_oct_common_open(struct net_device
*dev
,
457 void (*link_poll
)(struct net_device
*))
459 union cvmx_gmxx_prtx_cfg gmx_cfg
;
460 struct octeon_ethernet
*priv
= netdev_priv(dev
);
461 int interface
= INTERFACE(priv
->port
);
462 int index
= INDEX(priv
->port
);
463 union cvmx_helper_link_info link_info
;
466 rv
= cvm_oct_phy_setup_device(dev
);
470 gmx_cfg
.u64
= cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index
, interface
));
472 if (octeon_has_feature(OCTEON_FEATURE_PKND
))
473 gmx_cfg
.s
.pknd
= priv
->port
;
474 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index
, interface
), gmx_cfg
.u64
);
476 if (octeon_is_simulation())
480 int r
= phy_read_status(dev
->phydev
);
482 if (r
== 0 && dev
->phydev
->link
== 0)
483 netif_carrier_off(dev
);
484 cvm_oct_adjust_link(dev
);
486 link_info
= cvmx_helper_link_get(priv
->port
);
487 if (!link_info
.s
.link_up
)
488 netif_carrier_off(dev
);
489 priv
->poll
= link_poll
;
496 void cvm_oct_link_poll(struct net_device
*dev
)
498 struct octeon_ethernet
*priv
= netdev_priv(dev
);
499 union cvmx_helper_link_info link_info
;
501 link_info
= cvmx_helper_link_get(priv
->port
);
502 if (link_info
.u64
== priv
->link_info
)
505 if (cvmx_helper_link_set(priv
->port
, link_info
))
506 link_info
.u64
= priv
->link_info
;
508 priv
->link_info
= link_info
.u64
;
510 if (link_info
.s
.link_up
) {
511 if (!netif_carrier_ok(dev
))
512 netif_carrier_on(dev
);
513 } else if (netif_carrier_ok(dev
)) {
514 netif_carrier_off(dev
);
516 cvm_oct_note_carrier(priv
, link_info
);
519 static int cvm_oct_xaui_open(struct net_device
*dev
)
521 return cvm_oct_common_open(dev
, cvm_oct_link_poll
);
524 static const struct net_device_ops cvm_oct_npi_netdev_ops
= {
525 .ndo_init
= cvm_oct_common_init
,
526 .ndo_uninit
= cvm_oct_common_uninit
,
527 .ndo_start_xmit
= cvm_oct_xmit
,
528 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
529 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
530 .ndo_do_ioctl
= cvm_oct_ioctl
,
531 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
532 .ndo_get_stats
= cvm_oct_common_get_stats
,
533 #ifdef CONFIG_NET_POLL_CONTROLLER
534 .ndo_poll_controller
= cvm_oct_poll_controller
,
538 static const struct net_device_ops cvm_oct_xaui_netdev_ops
= {
539 .ndo_init
= cvm_oct_common_init
,
540 .ndo_uninit
= cvm_oct_common_uninit
,
541 .ndo_open
= cvm_oct_xaui_open
,
542 .ndo_stop
= cvm_oct_common_stop
,
543 .ndo_start_xmit
= cvm_oct_xmit
,
544 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
545 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
546 .ndo_do_ioctl
= cvm_oct_ioctl
,
547 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
548 .ndo_get_stats
= cvm_oct_common_get_stats
,
549 #ifdef CONFIG_NET_POLL_CONTROLLER
550 .ndo_poll_controller
= cvm_oct_poll_controller
,
554 static const struct net_device_ops cvm_oct_sgmii_netdev_ops
= {
555 .ndo_init
= cvm_oct_sgmii_init
,
556 .ndo_uninit
= cvm_oct_common_uninit
,
557 .ndo_open
= cvm_oct_sgmii_open
,
558 .ndo_stop
= cvm_oct_common_stop
,
559 .ndo_start_xmit
= cvm_oct_xmit
,
560 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
561 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
562 .ndo_do_ioctl
= cvm_oct_ioctl
,
563 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
564 .ndo_get_stats
= cvm_oct_common_get_stats
,
565 #ifdef CONFIG_NET_POLL_CONTROLLER
566 .ndo_poll_controller
= cvm_oct_poll_controller
,
570 static const struct net_device_ops cvm_oct_spi_netdev_ops
= {
571 .ndo_init
= cvm_oct_spi_init
,
572 .ndo_uninit
= cvm_oct_spi_uninit
,
573 .ndo_start_xmit
= cvm_oct_xmit
,
574 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
575 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
576 .ndo_do_ioctl
= cvm_oct_ioctl
,
577 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
578 .ndo_get_stats
= cvm_oct_common_get_stats
,
579 #ifdef CONFIG_NET_POLL_CONTROLLER
580 .ndo_poll_controller
= cvm_oct_poll_controller
,
584 static const struct net_device_ops cvm_oct_rgmii_netdev_ops
= {
585 .ndo_init
= cvm_oct_common_init
,
586 .ndo_uninit
= cvm_oct_common_uninit
,
587 .ndo_open
= cvm_oct_rgmii_open
,
588 .ndo_stop
= cvm_oct_common_stop
,
589 .ndo_start_xmit
= cvm_oct_xmit
,
590 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
591 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
592 .ndo_do_ioctl
= cvm_oct_ioctl
,
593 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
594 .ndo_get_stats
= cvm_oct_common_get_stats
,
595 #ifdef CONFIG_NET_POLL_CONTROLLER
596 .ndo_poll_controller
= cvm_oct_poll_controller
,
600 static const struct net_device_ops cvm_oct_pow_netdev_ops
= {
601 .ndo_init
= cvm_oct_common_init
,
602 .ndo_start_xmit
= cvm_oct_xmit_pow
,
603 .ndo_set_rx_mode
= cvm_oct_common_set_multicast_list
,
604 .ndo_set_mac_address
= cvm_oct_common_set_mac_address
,
605 .ndo_do_ioctl
= cvm_oct_ioctl
,
606 .ndo_change_mtu
= cvm_oct_common_change_mtu
,
607 .ndo_get_stats
= cvm_oct_common_get_stats
,
608 #ifdef CONFIG_NET_POLL_CONTROLLER
609 .ndo_poll_controller
= cvm_oct_poll_controller
,
613 static struct device_node
*cvm_oct_of_get_child
614 (const struct device_node
*parent
, int reg_val
)
616 struct device_node
*node
= NULL
;
621 node
= of_get_next_child(parent
, node
);
624 addr
= of_get_property(node
, "reg", &size
);
625 if (addr
&& (be32_to_cpu(*addr
) == reg_val
))
631 static struct device_node
*cvm_oct_node_for_port(struct device_node
*pip
,
632 int interface
, int port
)
634 struct device_node
*ni
, *np
;
636 ni
= cvm_oct_of_get_child(pip
, interface
);
640 np
= cvm_oct_of_get_child(ni
, port
);
646 static void cvm_set_rgmii_delay(struct octeon_ethernet
*priv
, int iface
,
649 struct device_node
*np
= priv
->of_node
;
654 /* By default, both RX/TX delay is enabled in
655 * __cvmx_helper_rgmii_enable().
660 if (!of_property_read_u32(np
, "rx-delay", &delay_value
)) {
661 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port
, iface
), delay_value
);
662 rx_delay
= delay_value
> 0;
664 if (!of_property_read_u32(np
, "tx-delay", &delay_value
)) {
665 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port
, iface
), delay_value
);
666 tx_delay
= delay_value
> 0;
669 if (!rx_delay
&& !tx_delay
)
670 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII_ID
;
672 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII_RXID
;
674 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII_TXID
;
676 priv
->phy_mode
= PHY_INTERFACE_MODE_RGMII
;
679 static int cvm_oct_probe(struct platform_device
*pdev
)
683 int fau
= FAU_NUM_PACKET_BUFFERS_TO_FREE
;
685 struct device_node
*pip
;
686 int mtu_overhead
= ETH_HLEN
+ ETH_FCS_LEN
;
688 #if IS_ENABLED(CONFIG_VLAN_8021Q)
689 mtu_overhead
+= VLAN_HLEN
;
692 octeon_mdiobus_force_mod_depencency();
694 pip
= pdev
->dev
.of_node
;
696 pr_err("Error: No 'pip' in /aliases\n");
700 cvm_oct_configure_common_hw();
702 cvmx_helper_initialize_packet_io_global();
704 if (receive_group_order
) {
705 if (receive_group_order
> 4)
706 receive_group_order
= 4;
707 pow_receive_groups
= (1 << (1 << receive_group_order
)) - 1;
709 pow_receive_groups
= BIT(pow_receive_group
);
712 /* Change the input group for all ports before input is enabled */
713 num_interfaces
= cvmx_helper_get_number_of_interfaces();
714 for (interface
= 0; interface
< num_interfaces
; interface
++) {
715 int num_ports
= cvmx_helper_ports_on_interface(interface
);
718 for (port
= cvmx_helper_get_ipd_port(interface
, 0);
719 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
721 union cvmx_pip_prt_tagx pip_prt_tagx
;
724 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port
));
726 if (receive_group_order
) {
729 /* We support only 16 groups at the moment, so
730 * always disable the two additional "hidden"
731 * tag_mask bits on CN68XX.
733 if (OCTEON_IS_MODEL(OCTEON_CN68XX
))
734 pip_prt_tagx
.u64
|= 0x3ull
<< 44;
736 tag_mask
= ~((1 << receive_group_order
) - 1);
737 pip_prt_tagx
.s
.grptagbase
= 0;
738 pip_prt_tagx
.s
.grptagmask
= tag_mask
;
739 pip_prt_tagx
.s
.grptag
= 1;
740 pip_prt_tagx
.s
.tag_mode
= 0;
741 pip_prt_tagx
.s
.inc_prt_flag
= 1;
742 pip_prt_tagx
.s
.ip6_dprt_flag
= 1;
743 pip_prt_tagx
.s
.ip4_dprt_flag
= 1;
744 pip_prt_tagx
.s
.ip6_sprt_flag
= 1;
745 pip_prt_tagx
.s
.ip4_sprt_flag
= 1;
746 pip_prt_tagx
.s
.ip6_dst_flag
= 1;
747 pip_prt_tagx
.s
.ip4_dst_flag
= 1;
748 pip_prt_tagx
.s
.ip6_src_flag
= 1;
749 pip_prt_tagx
.s
.ip4_src_flag
= 1;
750 pip_prt_tagx
.s
.grp
= 0;
752 pip_prt_tagx
.s
.grptag
= 0;
753 pip_prt_tagx
.s
.grp
= pow_receive_group
;
756 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port
),
761 cvmx_helper_ipd_and_packet_input_enable();
763 memset(cvm_oct_device
, 0, sizeof(cvm_oct_device
));
766 * Initialize the FAU used for counting packet buffers that
769 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE
, 0);
771 /* Initialize the FAU used for counting tx SKBs that need to be freed */
772 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN
, 0);
774 if ((pow_send_group
!= -1)) {
775 struct net_device
*dev
;
777 dev
= alloc_etherdev(sizeof(struct octeon_ethernet
));
779 /* Initialize the device private structure. */
780 struct octeon_ethernet
*priv
= netdev_priv(dev
);
782 SET_NETDEV_DEV(dev
, &pdev
->dev
);
783 dev
->netdev_ops
= &cvm_oct_pow_netdev_ops
;
784 priv
->imode
= CVMX_HELPER_INTERFACE_MODE_DISABLED
;
785 priv
->port
= CVMX_PIP_NUM_INPUT_PORTS
;
787 strscpy(dev
->name
, "pow%d", sizeof(dev
->name
));
788 for (qos
= 0; qos
< 16; qos
++)
789 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
790 dev
->min_mtu
= VLAN_ETH_ZLEN
- mtu_overhead
;
791 dev
->max_mtu
= OCTEON_MAX_MTU
- mtu_overhead
;
793 if (register_netdev(dev
) < 0) {
794 pr_err("Failed to register ethernet device for POW\n");
797 cvm_oct_device
[CVMX_PIP_NUM_INPUT_PORTS
] = dev
;
798 pr_info("%s: POW send group %d, receive group %d\n",
799 dev
->name
, pow_send_group
,
803 pr_err("Failed to allocate ethernet device for POW\n");
807 num_interfaces
= cvmx_helper_get_number_of_interfaces();
808 for (interface
= 0; interface
< num_interfaces
; interface
++) {
809 cvmx_helper_interface_mode_t imode
=
810 cvmx_helper_interface_get_mode(interface
);
811 int num_ports
= cvmx_helper_ports_on_interface(interface
);
816 port
= cvmx_helper_get_ipd_port(interface
, 0);
817 port
< cvmx_helper_get_ipd_port(interface
, num_ports
);
818 port_index
++, port
++) {
819 struct octeon_ethernet
*priv
;
820 struct net_device
*dev
=
821 alloc_etherdev(sizeof(struct octeon_ethernet
));
823 pr_err("Failed to allocate ethernet device for port %d\n",
828 /* Initialize the device private structure. */
829 SET_NETDEV_DEV(dev
, &pdev
->dev
);
830 priv
= netdev_priv(dev
);
832 priv
->of_node
= cvm_oct_node_for_port(pip
, interface
,
835 INIT_DELAYED_WORK(&priv
->port_periodic_work
,
836 cvm_oct_periodic_worker
);
839 priv
->queue
= cvmx_pko_get_base_queue(priv
->port
);
840 priv
->fau
= fau
- cvmx_pko_get_num_queues(port
) * 4;
841 priv
->phy_mode
= PHY_INTERFACE_MODE_NA
;
842 for (qos
= 0; qos
< 16; qos
++)
843 skb_queue_head_init(&priv
->tx_free_list
[qos
]);
844 for (qos
= 0; qos
< cvmx_pko_get_num_queues(port
);
846 cvmx_fau_atomic_write32(priv
->fau
+ qos
* 4, 0);
847 dev
->min_mtu
= VLAN_ETH_ZLEN
- mtu_overhead
;
848 dev
->max_mtu
= OCTEON_MAX_MTU
- mtu_overhead
;
850 switch (priv
->imode
) {
851 /* These types don't support ports to IPD/PKO */
852 case CVMX_HELPER_INTERFACE_MODE_DISABLED
:
853 case CVMX_HELPER_INTERFACE_MODE_PCIE
:
854 case CVMX_HELPER_INTERFACE_MODE_PICMG
:
857 case CVMX_HELPER_INTERFACE_MODE_NPI
:
858 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
859 strscpy(dev
->name
, "npi%d", sizeof(dev
->name
));
862 case CVMX_HELPER_INTERFACE_MODE_XAUI
:
863 dev
->netdev_ops
= &cvm_oct_xaui_netdev_ops
;
864 strscpy(dev
->name
, "xaui%d", sizeof(dev
->name
));
867 case CVMX_HELPER_INTERFACE_MODE_LOOP
:
868 dev
->netdev_ops
= &cvm_oct_npi_netdev_ops
;
869 strscpy(dev
->name
, "loop%d", sizeof(dev
->name
));
872 case CVMX_HELPER_INTERFACE_MODE_SGMII
:
873 priv
->phy_mode
= PHY_INTERFACE_MODE_SGMII
;
874 dev
->netdev_ops
= &cvm_oct_sgmii_netdev_ops
;
875 strscpy(dev
->name
, "eth%d", sizeof(dev
->name
));
878 case CVMX_HELPER_INTERFACE_MODE_SPI
:
879 dev
->netdev_ops
= &cvm_oct_spi_netdev_ops
;
880 strscpy(dev
->name
, "spi%d", sizeof(dev
->name
));
883 case CVMX_HELPER_INTERFACE_MODE_GMII
:
884 priv
->phy_mode
= PHY_INTERFACE_MODE_GMII
;
885 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
886 strscpy(dev
->name
, "eth%d", sizeof(dev
->name
));
889 case CVMX_HELPER_INTERFACE_MODE_RGMII
:
890 dev
->netdev_ops
= &cvm_oct_rgmii_netdev_ops
;
891 strscpy(dev
->name
, "eth%d", sizeof(dev
->name
));
892 cvm_set_rgmii_delay(priv
, interface
,
897 if (!dev
->netdev_ops
) {
899 } else if (register_netdev(dev
) < 0) {
900 pr_err("Failed to register ethernet device for interface %d, port %d\n",
901 interface
, priv
->port
);
904 cvm_oct_device
[priv
->port
] = dev
;
906 cvmx_pko_get_num_queues(priv
->port
) *
908 schedule_delayed_work(&priv
->port_periodic_work
,
914 cvm_oct_tx_initialize();
915 cvm_oct_rx_initialize();
918 * 150 uS: about 10 1500-byte packets at 1GE.
920 cvm_oct_tx_poll_interval
= 150 * (octeon_get_clock_rate() / 1000000);
922 schedule_delayed_work(&cvm_oct_rx_refill_work
, HZ
);
927 static int cvm_oct_remove(struct platform_device
*pdev
)
933 atomic_inc_return(&cvm_oct_poll_queue_stopping
);
934 cancel_delayed_work_sync(&cvm_oct_rx_refill_work
);
936 cvm_oct_rx_shutdown();
937 cvm_oct_tx_shutdown();
941 /* Free the ethernet devices */
942 for (port
= 0; port
< TOTAL_NUMBER_OF_PORTS
; port
++) {
943 if (cvm_oct_device
[port
]) {
944 struct net_device
*dev
= cvm_oct_device
[port
];
945 struct octeon_ethernet
*priv
= netdev_priv(dev
);
947 cancel_delayed_work_sync(&priv
->port_periodic_work
);
949 cvm_oct_tx_shutdown_dev(dev
);
950 unregister_netdev(dev
);
952 cvm_oct_device
[port
] = NULL
;
960 /* Free the HW pools */
961 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL
, CVMX_FPA_PACKET_POOL_SIZE
,
963 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL
, CVMX_FPA_WQE_POOL_SIZE
,
965 if (CVMX_FPA_OUTPUT_BUFFER_POOL
!= CVMX_FPA_PACKET_POOL
)
966 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL
,
967 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
, 128);
971 static const struct of_device_id cvm_oct_match
[] = {
973 .compatible
= "cavium,octeon-3860-pip",
977 MODULE_DEVICE_TABLE(of
, cvm_oct_match
);
979 static struct platform_driver cvm_oct_driver
= {
980 .probe
= cvm_oct_probe
,
981 .remove
= cvm_oct_remove
,
983 .name
= KBUILD_MODNAME
,
984 .of_match_table
= cvm_oct_match
,
988 module_platform_driver(cvm_oct_driver
);
990 MODULE_LICENSE("GPL");
991 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
992 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");