1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * rionet - Ethernet driver over RapidIO messaging services
5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/delay.h>
13 #include <linux/rio.h>
14 #include <linux/rio_drv.h>
15 #include <linux/slab.h>
16 #include <linux/rio_ids.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/crc32.h>
22 #include <linux/ethtool.h>
23 #include <linux/reboot.h>
25 #define DRV_NAME "rionet"
26 #define DRV_VERSION "0.3"
27 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
28 #define DRV_DESC "Ethernet over RapidIO"
30 MODULE_AUTHOR(DRV_AUTHOR
);
31 MODULE_DESCRIPTION(DRV_DESC
);
32 MODULE_LICENSE("GPL");
34 #define RIONET_DEFAULT_MSGLEVEL \
40 #define RIONET_DOORBELL_JOIN 0x1000
41 #define RIONET_DOORBELL_LEAVE 0x1001
43 #define RIONET_MAILBOX 0
45 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
46 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
47 #define RIONET_MAX_NETS 8
48 #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
49 #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
51 struct rionet_private
{
52 struct rio_mport
*mport
;
53 struct sk_buff
*rx_skb
[RIONET_RX_RING_SIZE
];
54 struct sk_buff
*tx_skb
[RIONET_TX_RING_SIZE
];
66 struct list_head node
;
72 struct net_device
*ndev
;
73 struct list_head peers
;
74 spinlock_t lock
; /* net info access lock */
75 struct rio_dev
**active
;
76 int nact
; /* number of active peers */
79 static struct rionet_net nets
[RIONET_MAX_NETS
];
81 #define is_rionet_capable(src_ops, dst_ops) \
82 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
83 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
84 (src_ops & RIO_SRC_OPS_DOORBELL) && \
85 (dst_ops & RIO_DST_OPS_DOORBELL))
86 #define dev_rionet_capable(dev) \
87 is_rionet_capable(dev->src_ops, dev->dst_ops)
89 #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
90 #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
92 static int rionet_rx_clean(struct net_device
*ndev
)
96 struct rionet_private
*rnet
= netdev_priv(ndev
);
102 if (!rnet
->rx_skb
[i
])
105 if (!(data
= rio_get_inb_message(rnet
->mport
, RIONET_MAILBOX
)))
108 rnet
->rx_skb
[i
]->data
= data
;
109 skb_put(rnet
->rx_skb
[i
], RIO_MAX_MSG_SIZE
);
110 rnet
->rx_skb
[i
]->protocol
=
111 eth_type_trans(rnet
->rx_skb
[i
], ndev
);
112 error
= netif_rx(rnet
->rx_skb
[i
]);
114 if (error
== NET_RX_DROP
) {
115 ndev
->stats
.rx_dropped
++;
117 ndev
->stats
.rx_packets
++;
118 ndev
->stats
.rx_bytes
+= RIO_MAX_MSG_SIZE
;
121 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != rnet
->rx_slot
);
126 static void rionet_rx_fill(struct net_device
*ndev
, int end
)
129 struct rionet_private
*rnet
= netdev_priv(ndev
);
133 rnet
->rx_skb
[i
] = dev_alloc_skb(RIO_MAX_MSG_SIZE
);
135 if (!rnet
->rx_skb
[i
])
138 rio_add_inb_buffer(rnet
->mport
, RIONET_MAILBOX
,
139 rnet
->rx_skb
[i
]->data
);
140 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != end
);
145 static int rionet_queue_tx_msg(struct sk_buff
*skb
, struct net_device
*ndev
,
146 struct rio_dev
*rdev
)
148 struct rionet_private
*rnet
= netdev_priv(ndev
);
150 rio_add_outb_message(rnet
->mport
, rdev
, 0, skb
->data
, skb
->len
);
151 rnet
->tx_skb
[rnet
->tx_slot
] = skb
;
153 ndev
->stats
.tx_packets
++;
154 ndev
->stats
.tx_bytes
+= skb
->len
;
156 if (++rnet
->tx_cnt
== RIONET_TX_RING_SIZE
)
157 netif_stop_queue(ndev
);
160 rnet
->tx_slot
&= (RIONET_TX_RING_SIZE
- 1);
162 if (netif_msg_tx_queued(rnet
))
163 printk(KERN_INFO
"%s: queued skb len %8.8x\n", DRV_NAME
,
169 static netdev_tx_t
rionet_start_xmit(struct sk_buff
*skb
,
170 struct net_device
*ndev
)
173 struct rionet_private
*rnet
= netdev_priv(ndev
);
174 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
179 spin_lock_irqsave(&rnet
->tx_lock
, flags
);
181 if (is_multicast_ether_addr(eth
->h_dest
))
182 add_num
= nets
[rnet
->mport
->id
].nact
;
184 if ((rnet
->tx_cnt
+ add_num
) > RIONET_TX_RING_SIZE
) {
185 netif_stop_queue(ndev
);
186 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
187 printk(KERN_ERR
"%s: BUG! Tx Ring full when queue awake!\n",
189 return NETDEV_TX_BUSY
;
192 if (is_multicast_ether_addr(eth
->h_dest
)) {
195 for (i
= 0; i
< RIO_MAX_ROUTE_ENTRIES(rnet
->mport
->sys_size
);
197 if (nets
[rnet
->mport
->id
].active
[i
]) {
198 rionet_queue_tx_msg(skb
, ndev
,
199 nets
[rnet
->mport
->id
].active
[i
]);
201 refcount_inc(&skb
->users
);
204 } else if (RIONET_MAC_MATCH(eth
->h_dest
)) {
205 destid
= RIONET_GET_DESTID(eth
->h_dest
);
206 if (nets
[rnet
->mport
->id
].active
[destid
])
207 rionet_queue_tx_msg(skb
, ndev
,
208 nets
[rnet
->mport
->id
].active
[destid
]);
211 * If the target device was removed from the list of
212 * active peers but we still have TX packets targeting
213 * it just report sending a packet to the target
214 * (without actual packet transfer).
216 ndev
->stats
.tx_packets
++;
217 ndev
->stats
.tx_bytes
+= skb
->len
;
218 dev_kfree_skb_any(skb
);
222 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
227 static void rionet_dbell_event(struct rio_mport
*mport
, void *dev_id
, u16 sid
, u16 tid
,
230 struct net_device
*ndev
= dev_id
;
231 struct rionet_private
*rnet
= netdev_priv(ndev
);
232 struct rionet_peer
*peer
;
233 unsigned char netid
= rnet
->mport
->id
;
235 if (netif_msg_intr(rnet
))
236 printk(KERN_INFO
"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
237 DRV_NAME
, sid
, tid
, info
);
238 if (info
== RIONET_DOORBELL_JOIN
) {
239 if (!nets
[netid
].active
[sid
]) {
240 spin_lock(&nets
[netid
].lock
);
241 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
242 if (peer
->rdev
->destid
== sid
) {
243 nets
[netid
].active
[sid
] = peer
->rdev
;
247 spin_unlock(&nets
[netid
].lock
);
249 rio_mport_send_doorbell(mport
, sid
,
250 RIONET_DOORBELL_JOIN
);
252 } else if (info
== RIONET_DOORBELL_LEAVE
) {
253 spin_lock(&nets
[netid
].lock
);
254 if (nets
[netid
].active
[sid
]) {
255 nets
[netid
].active
[sid
] = NULL
;
258 spin_unlock(&nets
[netid
].lock
);
260 if (netif_msg_intr(rnet
))
261 printk(KERN_WARNING
"%s: unhandled doorbell\n",
266 static void rionet_inb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
269 struct net_device
*ndev
= dev_id
;
270 struct rionet_private
*rnet
= netdev_priv(ndev
);
272 if (netif_msg_intr(rnet
))
273 printk(KERN_INFO
"%s: inbound message event, mbox %d slot %d\n",
274 DRV_NAME
, mbox
, slot
);
276 spin_lock(&rnet
->lock
);
277 if ((n
= rionet_rx_clean(ndev
)) != rnet
->rx_slot
)
278 rionet_rx_fill(ndev
, n
);
279 spin_unlock(&rnet
->lock
);
282 static void rionet_outb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
284 struct net_device
*ndev
= dev_id
;
285 struct rionet_private
*rnet
= netdev_priv(ndev
);
287 spin_lock(&rnet
->tx_lock
);
289 if (netif_msg_intr(rnet
))
291 "%s: outbound message event, mbox %d slot %d\n",
292 DRV_NAME
, mbox
, slot
);
294 while (rnet
->tx_cnt
&& (rnet
->ack_slot
!= slot
)) {
295 /* dma unmap single */
296 dev_kfree_skb_irq(rnet
->tx_skb
[rnet
->ack_slot
]);
297 rnet
->tx_skb
[rnet
->ack_slot
] = NULL
;
299 rnet
->ack_slot
&= (RIONET_TX_RING_SIZE
- 1);
303 if (rnet
->tx_cnt
< RIONET_TX_RING_SIZE
)
304 netif_wake_queue(ndev
);
306 spin_unlock(&rnet
->tx_lock
);
309 static int rionet_open(struct net_device
*ndev
)
312 struct rionet_peer
*peer
;
313 struct rionet_private
*rnet
= netdev_priv(ndev
);
314 unsigned char netid
= rnet
->mport
->id
;
317 if (netif_msg_ifup(rnet
))
318 printk(KERN_INFO
"%s: open\n", DRV_NAME
);
320 if ((rc
= rio_request_inb_dbell(rnet
->mport
,
322 RIONET_DOORBELL_JOIN
,
323 RIONET_DOORBELL_LEAVE
,
324 rionet_dbell_event
)) < 0)
327 if ((rc
= rio_request_inb_mbox(rnet
->mport
,
331 rionet_inb_msg_event
)) < 0)
334 if ((rc
= rio_request_outb_mbox(rnet
->mport
,
338 rionet_outb_msg_event
)) < 0)
341 /* Initialize inbound message ring */
342 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
343 rnet
->rx_skb
[i
] = NULL
;
345 rionet_rx_fill(ndev
, 0);
351 netif_carrier_on(ndev
);
352 netif_start_queue(ndev
);
354 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
355 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
356 /* Send a join message */
357 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
359 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
366 static int rionet_close(struct net_device
*ndev
)
368 struct rionet_private
*rnet
= netdev_priv(ndev
);
369 struct rionet_peer
*peer
;
370 unsigned char netid
= rnet
->mport
->id
;
374 if (netif_msg_ifup(rnet
))
375 printk(KERN_INFO
"%s: close %s\n", DRV_NAME
, ndev
->name
);
377 netif_stop_queue(ndev
);
378 netif_carrier_off(ndev
);
381 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
382 kfree_skb(rnet
->rx_skb
[i
]);
384 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
385 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
386 if (nets
[netid
].active
[peer
->rdev
->destid
]) {
387 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_LEAVE
);
388 nets
[netid
].active
[peer
->rdev
->destid
] = NULL
;
391 rio_release_outb_dbell(peer
->rdev
, peer
->res
);
393 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
395 rio_release_inb_dbell(rnet
->mport
, RIONET_DOORBELL_JOIN
,
396 RIONET_DOORBELL_LEAVE
);
397 rio_release_inb_mbox(rnet
->mport
, RIONET_MAILBOX
);
398 rio_release_outb_mbox(rnet
->mport
, RIONET_MAILBOX
);
403 static void rionet_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
405 struct rio_dev
*rdev
= to_rio_dev(dev
);
406 unsigned char netid
= rdev
->net
->hport
->id
;
407 struct rionet_peer
*peer
;
408 int state
, found
= 0;
411 if (!dev_rionet_capable(rdev
))
414 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
415 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
416 if (peer
->rdev
== rdev
) {
417 list_del(&peer
->node
);
418 if (nets
[netid
].active
[rdev
->destid
]) {
419 state
= atomic_read(&rdev
->state
);
420 if (state
!= RIO_DEVICE_GONE
&&
421 state
!= RIO_DEVICE_INITIALIZING
) {
422 rio_send_doorbell(rdev
,
423 RIONET_DOORBELL_LEAVE
);
425 nets
[netid
].active
[rdev
->destid
] = NULL
;
432 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
436 rio_release_outb_dbell(rdev
, peer
->res
);
441 static void rionet_get_drvinfo(struct net_device
*ndev
,
442 struct ethtool_drvinfo
*info
)
444 struct rionet_private
*rnet
= netdev_priv(ndev
);
446 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
447 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
448 strlcpy(info
->fw_version
, "n/a", sizeof(info
->fw_version
));
449 strlcpy(info
->bus_info
, rnet
->mport
->name
, sizeof(info
->bus_info
));
452 static u32
rionet_get_msglevel(struct net_device
*ndev
)
454 struct rionet_private
*rnet
= netdev_priv(ndev
);
456 return rnet
->msg_enable
;
459 static void rionet_set_msglevel(struct net_device
*ndev
, u32 value
)
461 struct rionet_private
*rnet
= netdev_priv(ndev
);
463 rnet
->msg_enable
= value
;
466 static const struct ethtool_ops rionet_ethtool_ops
= {
467 .get_drvinfo
= rionet_get_drvinfo
,
468 .get_msglevel
= rionet_get_msglevel
,
469 .set_msglevel
= rionet_set_msglevel
,
470 .get_link
= ethtool_op_get_link
,
473 static const struct net_device_ops rionet_netdev_ops
= {
474 .ndo_open
= rionet_open
,
475 .ndo_stop
= rionet_close
,
476 .ndo_start_xmit
= rionet_start_xmit
,
477 .ndo_validate_addr
= eth_validate_addr
,
478 .ndo_set_mac_address
= eth_mac_addr
,
481 static int rionet_setup_netdev(struct rio_mport
*mport
, struct net_device
*ndev
)
484 struct rionet_private
*rnet
;
486 const size_t rionet_active_bytes
= sizeof(void *) *
487 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
);
489 nets
[mport
->id
].active
= (struct rio_dev
**)__get_free_pages(GFP_KERNEL
,
490 get_order(rionet_active_bytes
));
491 if (!nets
[mport
->id
].active
) {
495 memset((void *)nets
[mport
->id
].active
, 0, rionet_active_bytes
);
497 /* Set up private area */
498 rnet
= netdev_priv(ndev
);
502 /* Set the default MAC address */
503 device_id
= rio_local_get_device_id(mport
);
504 ndev
->dev_addr
[0] = 0x00;
505 ndev
->dev_addr
[1] = 0x01;
506 ndev
->dev_addr
[2] = 0x00;
507 ndev
->dev_addr
[3] = 0x01;
508 ndev
->dev_addr
[4] = device_id
>> 8;
509 ndev
->dev_addr
[5] = device_id
& 0xff;
511 ndev
->netdev_ops
= &rionet_netdev_ops
;
512 ndev
->mtu
= RIONET_MAX_MTU
;
513 /* MTU range: 68 - 4082 */
514 ndev
->min_mtu
= ETH_MIN_MTU
;
515 ndev
->max_mtu
= RIONET_MAX_MTU
;
516 ndev
->features
= NETIF_F_LLTX
;
517 SET_NETDEV_DEV(ndev
, &mport
->dev
);
518 ndev
->ethtool_ops
= &rionet_ethtool_ops
;
520 spin_lock_init(&rnet
->lock
);
521 spin_lock_init(&rnet
->tx_lock
);
523 rnet
->msg_enable
= RIONET_DEFAULT_MSGLEVEL
;
525 rc
= register_netdev(ndev
);
527 free_pages((unsigned long)nets
[mport
->id
].active
,
528 get_order(rionet_active_bytes
));
532 printk(KERN_INFO
"%s: %s %s Version %s, MAC %pM, %s\n",
544 static int rionet_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
547 u32 lsrc_ops
, ldst_ops
;
548 struct rionet_peer
*peer
;
549 struct net_device
*ndev
= NULL
;
550 struct rio_dev
*rdev
= to_rio_dev(dev
);
551 unsigned char netid
= rdev
->net
->hport
->id
;
553 if (netid
>= RIONET_MAX_NETS
)
557 * If first time through this net, make sure local device is rionet
558 * capable and setup netdev (this step will be skipped in later probes
561 if (!nets
[netid
].ndev
) {
562 rio_local_read_config_32(rdev
->net
->hport
, RIO_SRC_OPS_CAR
,
564 rio_local_read_config_32(rdev
->net
->hport
, RIO_DST_OPS_CAR
,
566 if (!is_rionet_capable(lsrc_ops
, ldst_ops
)) {
568 "%s: local device %s is not network capable\n",
569 DRV_NAME
, rdev
->net
->hport
->name
);
573 /* Allocate our net_device structure */
574 ndev
= alloc_etherdev(sizeof(struct rionet_private
));
580 rc
= rionet_setup_netdev(rdev
->net
->hport
, ndev
);
582 printk(KERN_ERR
"%s: failed to setup netdev (rc=%d)\n",
588 INIT_LIST_HEAD(&nets
[netid
].peers
);
589 spin_lock_init(&nets
[netid
].lock
);
590 nets
[netid
].nact
= 0;
591 nets
[netid
].ndev
= ndev
;
595 * If the remote device has mailbox/doorbell capabilities,
596 * add it to the peer list.
598 if (dev_rionet_capable(rdev
)) {
599 struct rionet_private
*rnet
;
602 rnet
= netdev_priv(nets
[netid
].ndev
);
604 peer
= kzalloc(sizeof(*peer
), GFP_KERNEL
);
610 peer
->res
= rio_request_outb_dbell(peer
->rdev
,
611 RIONET_DOORBELL_JOIN
,
612 RIONET_DOORBELL_LEAVE
);
614 pr_err("%s: error requesting doorbells\n", DRV_NAME
);
620 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
621 list_add_tail(&peer
->node
, &nets
[netid
].peers
);
622 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
623 pr_debug("%s: %s add peer %s\n",
624 DRV_NAME
, __func__
, rio_name(rdev
));
626 /* If netdev is already opened, send join request to new peer */
628 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
636 static int rionet_shutdown(struct notifier_block
*nb
, unsigned long code
,
639 struct rionet_peer
*peer
;
643 pr_debug("%s: %s\n", DRV_NAME
, __func__
);
645 for (i
= 0; i
< RIONET_MAX_NETS
; i
++) {
649 spin_lock_irqsave(&nets
[i
].lock
, flags
);
650 list_for_each_entry(peer
, &nets
[i
].peers
, node
) {
651 if (nets
[i
].active
[peer
->rdev
->destid
]) {
652 rio_send_doorbell(peer
->rdev
,
653 RIONET_DOORBELL_LEAVE
);
654 nets
[i
].active
[peer
->rdev
->destid
] = NULL
;
657 spin_unlock_irqrestore(&nets
[i
].lock
, flags
);
663 static void rionet_remove_mport(struct device
*dev
,
664 struct class_interface
*class_intf
)
666 struct rio_mport
*mport
= to_rio_mport(dev
);
667 struct net_device
*ndev
;
670 pr_debug("%s %s\n", __func__
, mport
->name
);
672 WARN(nets
[id
].nact
, "%s called when connected to %d peers\n",
673 __func__
, nets
[id
].nact
);
674 WARN(!nets
[id
].ndev
, "%s called for mport without NDEV\n",
678 ndev
= nets
[id
].ndev
;
679 netif_stop_queue(ndev
);
680 unregister_netdev(ndev
);
682 free_pages((unsigned long)nets
[id
].active
,
683 get_order(sizeof(void *) *
684 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
)));
685 nets
[id
].active
= NULL
;
687 nets
[id
].ndev
= NULL
;
692 static struct rio_device_id rionet_id_table
[] = {
693 {RIO_DEVICE(RIO_ANY_ID
, RIO_ANY_ID
)},
694 { 0, } /* terminate list */
697 MODULE_DEVICE_TABLE(rapidio
, rionet_id_table
);
700 static struct subsys_interface rionet_interface
= {
702 .subsys
= &rio_bus_type
,
703 .add_dev
= rionet_add_dev
,
704 .remove_dev
= rionet_remove_dev
,
707 static struct notifier_block rionet_notifier
= {
708 .notifier_call
= rionet_shutdown
,
711 /* the rio_mport_interface is used to handle local mport devices */
712 static struct class_interface rio_mport_interface __refdata
= {
713 .class = &rio_mport_class
,
715 .remove_dev
= rionet_remove_mport
,
718 static int __init
rionet_init(void)
722 ret
= register_reboot_notifier(&rionet_notifier
);
724 pr_err("%s: failed to register reboot notifier (err=%d)\n",
729 ret
= class_interface_register(&rio_mport_interface
);
731 pr_err("%s: class_interface_register error: %d\n",
736 return subsys_interface_register(&rionet_interface
);
739 static void __exit
rionet_exit(void)
741 unregister_reboot_notifier(&rionet_notifier
);
742 subsys_interface_unregister(&rionet_interface
);
743 class_interface_unregister(&rio_mport_interface
);
746 late_initcall(rionet_init
);
747 module_exit(rionet_exit
);