1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * rionet - Ethernet driver over RapidIO messaging services
5 * Copyright 2005 MontaVista Software, Inc.
6 * Matt Porter <mporter@kernel.crashing.org>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/delay.h>
13 #include <linux/rio.h>
14 #include <linux/rio_drv.h>
15 #include <linux/slab.h>
16 #include <linux/rio_ids.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/skbuff.h>
21 #include <linux/crc32.h>
22 #include <linux/ethtool.h>
23 #include <linux/reboot.h>
25 #define DRV_NAME "rionet"
26 #define DRV_VERSION "0.3"
27 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
28 #define DRV_DESC "Ethernet over RapidIO"
30 MODULE_AUTHOR(DRV_AUTHOR
);
31 MODULE_DESCRIPTION(DRV_DESC
);
32 MODULE_LICENSE("GPL");
34 #define RIONET_DEFAULT_MSGLEVEL \
40 #define RIONET_DOORBELL_JOIN 0x1000
41 #define RIONET_DOORBELL_LEAVE 0x1001
43 #define RIONET_MAILBOX 0
45 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
46 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
47 #define RIONET_MAX_NETS 8
48 #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
49 #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
51 struct rionet_private
{
52 struct rio_mport
*mport
;
53 struct sk_buff
*rx_skb
[RIONET_RX_RING_SIZE
];
54 struct sk_buff
*tx_skb
[RIONET_TX_RING_SIZE
];
66 struct list_head node
;
72 struct net_device
*ndev
;
73 struct list_head peers
;
74 spinlock_t lock
; /* net info access lock */
75 struct rio_dev
**active
;
76 int nact
; /* number of active peers */
79 static struct rionet_net nets
[RIONET_MAX_NETS
];
81 #define is_rionet_capable(src_ops, dst_ops) \
82 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
83 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
84 (src_ops & RIO_SRC_OPS_DOORBELL) && \
85 (dst_ops & RIO_DST_OPS_DOORBELL))
86 #define dev_rionet_capable(dev) \
87 is_rionet_capable(dev->src_ops, dev->dst_ops)
89 #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
90 #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
92 static int rionet_rx_clean(struct net_device
*ndev
)
96 struct rionet_private
*rnet
= netdev_priv(ndev
);
102 if (!rnet
->rx_skb
[i
])
105 if (!(data
= rio_get_inb_message(rnet
->mport
, RIONET_MAILBOX
)))
108 rnet
->rx_skb
[i
]->data
= data
;
109 skb_put(rnet
->rx_skb
[i
], RIO_MAX_MSG_SIZE
);
110 rnet
->rx_skb
[i
]->protocol
=
111 eth_type_trans(rnet
->rx_skb
[i
], ndev
);
112 error
= __netif_rx(rnet
->rx_skb
[i
]);
114 if (error
== NET_RX_DROP
) {
115 ndev
->stats
.rx_dropped
++;
117 ndev
->stats
.rx_packets
++;
118 ndev
->stats
.rx_bytes
+= RIO_MAX_MSG_SIZE
;
121 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != rnet
->rx_slot
);
126 static void rionet_rx_fill(struct net_device
*ndev
, int end
)
129 struct rionet_private
*rnet
= netdev_priv(ndev
);
133 rnet
->rx_skb
[i
] = dev_alloc_skb(RIO_MAX_MSG_SIZE
);
135 if (!rnet
->rx_skb
[i
])
138 rio_add_inb_buffer(rnet
->mport
, RIONET_MAILBOX
,
139 rnet
->rx_skb
[i
]->data
);
140 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != end
);
145 static int rionet_queue_tx_msg(struct sk_buff
*skb
, struct net_device
*ndev
,
146 struct rio_dev
*rdev
)
148 struct rionet_private
*rnet
= netdev_priv(ndev
);
150 rio_add_outb_message(rnet
->mport
, rdev
, 0, skb
->data
, skb
->len
);
151 rnet
->tx_skb
[rnet
->tx_slot
] = skb
;
153 ndev
->stats
.tx_packets
++;
154 ndev
->stats
.tx_bytes
+= skb
->len
;
156 if (++rnet
->tx_cnt
== RIONET_TX_RING_SIZE
)
157 netif_stop_queue(ndev
);
160 rnet
->tx_slot
&= (RIONET_TX_RING_SIZE
- 1);
162 if (netif_msg_tx_queued(rnet
))
163 printk(KERN_INFO
"%s: queued skb len %8.8x\n", DRV_NAME
,
169 static netdev_tx_t
rionet_start_xmit(struct sk_buff
*skb
,
170 struct net_device
*ndev
)
173 struct rionet_private
*rnet
= netdev_priv(ndev
);
174 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
179 spin_lock_irqsave(&rnet
->tx_lock
, flags
);
181 if (is_multicast_ether_addr(eth
->h_dest
))
182 add_num
= nets
[rnet
->mport
->id
].nact
;
184 if ((rnet
->tx_cnt
+ add_num
) > RIONET_TX_RING_SIZE
) {
185 netif_stop_queue(ndev
);
186 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
187 printk(KERN_ERR
"%s: BUG! Tx Ring full when queue awake!\n",
189 return NETDEV_TX_BUSY
;
192 if (is_multicast_ether_addr(eth
->h_dest
)) {
195 for (i
= 0; i
< RIO_MAX_ROUTE_ENTRIES(rnet
->mport
->sys_size
);
197 if (nets
[rnet
->mport
->id
].active
[i
]) {
198 rionet_queue_tx_msg(skb
, ndev
,
199 nets
[rnet
->mport
->id
].active
[i
]);
201 refcount_inc(&skb
->users
);
204 } else if (RIONET_MAC_MATCH(eth
->h_dest
)) {
205 destid
= RIONET_GET_DESTID(eth
->h_dest
);
206 if (nets
[rnet
->mport
->id
].active
[destid
])
207 rionet_queue_tx_msg(skb
, ndev
,
208 nets
[rnet
->mport
->id
].active
[destid
]);
211 * If the target device was removed from the list of
212 * active peers but we still have TX packets targeting
213 * it just report sending a packet to the target
214 * (without actual packet transfer).
216 ndev
->stats
.tx_packets
++;
217 ndev
->stats
.tx_bytes
+= skb
->len
;
218 dev_kfree_skb_any(skb
);
222 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
227 static void rionet_dbell_event(struct rio_mport
*mport
, void *dev_id
, u16 sid
, u16 tid
,
230 struct net_device
*ndev
= dev_id
;
231 struct rionet_private
*rnet
= netdev_priv(ndev
);
232 struct rionet_peer
*peer
;
233 unsigned char netid
= rnet
->mport
->id
;
235 if (netif_msg_intr(rnet
))
236 printk(KERN_INFO
"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
237 DRV_NAME
, sid
, tid
, info
);
238 if (info
== RIONET_DOORBELL_JOIN
) {
239 if (!nets
[netid
].active
[sid
]) {
240 spin_lock(&nets
[netid
].lock
);
241 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
242 if (peer
->rdev
->destid
== sid
) {
243 nets
[netid
].active
[sid
] = peer
->rdev
;
247 spin_unlock(&nets
[netid
].lock
);
249 rio_mport_send_doorbell(mport
, sid
,
250 RIONET_DOORBELL_JOIN
);
252 } else if (info
== RIONET_DOORBELL_LEAVE
) {
253 spin_lock(&nets
[netid
].lock
);
254 if (nets
[netid
].active
[sid
]) {
255 nets
[netid
].active
[sid
] = NULL
;
258 spin_unlock(&nets
[netid
].lock
);
260 if (netif_msg_intr(rnet
))
261 printk(KERN_WARNING
"%s: unhandled doorbell\n",
266 static void rionet_inb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
269 struct net_device
*ndev
= dev_id
;
270 struct rionet_private
*rnet
= netdev_priv(ndev
);
272 if (netif_msg_intr(rnet
))
273 printk(KERN_INFO
"%s: inbound message event, mbox %d slot %d\n",
274 DRV_NAME
, mbox
, slot
);
276 spin_lock(&rnet
->lock
);
277 if ((n
= rionet_rx_clean(ndev
)) != rnet
->rx_slot
)
278 rionet_rx_fill(ndev
, n
);
279 spin_unlock(&rnet
->lock
);
282 static void rionet_outb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
284 struct net_device
*ndev
= dev_id
;
285 struct rionet_private
*rnet
= netdev_priv(ndev
);
287 spin_lock(&rnet
->tx_lock
);
289 if (netif_msg_intr(rnet
))
291 "%s: outbound message event, mbox %d slot %d\n",
292 DRV_NAME
, mbox
, slot
);
294 while (rnet
->tx_cnt
&& (rnet
->ack_slot
!= slot
)) {
295 /* dma unmap single */
296 dev_kfree_skb_irq(rnet
->tx_skb
[rnet
->ack_slot
]);
297 rnet
->tx_skb
[rnet
->ack_slot
] = NULL
;
299 rnet
->ack_slot
&= (RIONET_TX_RING_SIZE
- 1);
303 if (rnet
->tx_cnt
< RIONET_TX_RING_SIZE
)
304 netif_wake_queue(ndev
);
306 spin_unlock(&rnet
->tx_lock
);
309 static int rionet_open(struct net_device
*ndev
)
312 struct rionet_peer
*peer
;
313 struct rionet_private
*rnet
= netdev_priv(ndev
);
314 unsigned char netid
= rnet
->mport
->id
;
317 if (netif_msg_ifup(rnet
))
318 printk(KERN_INFO
"%s: open\n", DRV_NAME
);
320 if ((rc
= rio_request_inb_dbell(rnet
->mport
,
322 RIONET_DOORBELL_JOIN
,
323 RIONET_DOORBELL_LEAVE
,
324 rionet_dbell_event
)) < 0)
327 if ((rc
= rio_request_inb_mbox(rnet
->mport
,
331 rionet_inb_msg_event
)) < 0)
334 if ((rc
= rio_request_outb_mbox(rnet
->mport
,
338 rionet_outb_msg_event
)) < 0)
341 /* Initialize inbound message ring */
342 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
343 rnet
->rx_skb
[i
] = NULL
;
345 rionet_rx_fill(ndev
, 0);
351 netif_carrier_on(ndev
);
352 netif_start_queue(ndev
);
354 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
355 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
356 /* Send a join message */
357 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
359 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
366 static int rionet_close(struct net_device
*ndev
)
368 struct rionet_private
*rnet
= netdev_priv(ndev
);
369 struct rionet_peer
*peer
;
370 unsigned char netid
= rnet
->mport
->id
;
374 if (netif_msg_ifup(rnet
))
375 printk(KERN_INFO
"%s: close %s\n", DRV_NAME
, ndev
->name
);
377 netif_stop_queue(ndev
);
378 netif_carrier_off(ndev
);
381 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
382 kfree_skb(rnet
->rx_skb
[i
]);
384 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
385 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
386 if (nets
[netid
].active
[peer
->rdev
->destid
]) {
387 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_LEAVE
);
388 nets
[netid
].active
[peer
->rdev
->destid
] = NULL
;
391 rio_release_outb_dbell(peer
->rdev
, peer
->res
);
393 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
395 rio_release_inb_dbell(rnet
->mport
, RIONET_DOORBELL_JOIN
,
396 RIONET_DOORBELL_LEAVE
);
397 rio_release_inb_mbox(rnet
->mport
, RIONET_MAILBOX
);
398 rio_release_outb_mbox(rnet
->mport
, RIONET_MAILBOX
);
403 static void rionet_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
405 struct rio_dev
*rdev
= to_rio_dev(dev
);
406 unsigned char netid
= rdev
->net
->hport
->id
;
407 struct rionet_peer
*peer
;
408 int state
, found
= 0;
411 if (!dev_rionet_capable(rdev
))
414 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
415 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
416 if (peer
->rdev
== rdev
) {
417 list_del(&peer
->node
);
418 if (nets
[netid
].active
[rdev
->destid
]) {
419 state
= atomic_read(&rdev
->state
);
420 if (state
!= RIO_DEVICE_GONE
&&
421 state
!= RIO_DEVICE_INITIALIZING
) {
422 rio_send_doorbell(rdev
,
423 RIONET_DOORBELL_LEAVE
);
425 nets
[netid
].active
[rdev
->destid
] = NULL
;
432 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
436 rio_release_outb_dbell(rdev
, peer
->res
);
441 static void rionet_get_drvinfo(struct net_device
*ndev
,
442 struct ethtool_drvinfo
*info
)
444 struct rionet_private
*rnet
= netdev_priv(ndev
);
446 strscpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
447 strscpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
448 strscpy(info
->fw_version
, "n/a", sizeof(info
->fw_version
));
449 strscpy(info
->bus_info
, rnet
->mport
->name
, sizeof(info
->bus_info
));
452 static u32
rionet_get_msglevel(struct net_device
*ndev
)
454 struct rionet_private
*rnet
= netdev_priv(ndev
);
456 return rnet
->msg_enable
;
459 static void rionet_set_msglevel(struct net_device
*ndev
, u32 value
)
461 struct rionet_private
*rnet
= netdev_priv(ndev
);
463 rnet
->msg_enable
= value
;
466 static const struct ethtool_ops rionet_ethtool_ops
= {
467 .get_drvinfo
= rionet_get_drvinfo
,
468 .get_msglevel
= rionet_get_msglevel
,
469 .set_msglevel
= rionet_set_msglevel
,
470 .get_link
= ethtool_op_get_link
,
473 static const struct net_device_ops rionet_netdev_ops
= {
474 .ndo_open
= rionet_open
,
475 .ndo_stop
= rionet_close
,
476 .ndo_start_xmit
= rionet_start_xmit
,
477 .ndo_validate_addr
= eth_validate_addr
,
478 .ndo_set_mac_address
= eth_mac_addr
,
481 static int rionet_setup_netdev(struct rio_mport
*mport
, struct net_device
*ndev
)
484 struct rionet_private
*rnet
;
487 const size_t rionet_active_bytes
= sizeof(void *) *
488 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
);
490 nets
[mport
->id
].active
= (struct rio_dev
**)__get_free_pages(GFP_KERNEL
,
491 get_order(rionet_active_bytes
));
492 if (!nets
[mport
->id
].active
) {
496 memset((void *)nets
[mport
->id
].active
, 0, rionet_active_bytes
);
498 /* Set up private area */
499 rnet
= netdev_priv(ndev
);
503 /* Set the default MAC address */
504 device_id
= rio_local_get_device_id(mport
);
509 addr
[4] = device_id
>> 8;
510 addr
[5] = device_id
& 0xff;
511 eth_hw_addr_set(ndev
, addr
);
513 ndev
->netdev_ops
= &rionet_netdev_ops
;
514 ndev
->mtu
= RIONET_MAX_MTU
;
515 /* MTU range: 68 - 4082 */
516 ndev
->min_mtu
= ETH_MIN_MTU
;
517 ndev
->max_mtu
= RIONET_MAX_MTU
;
519 SET_NETDEV_DEV(ndev
, &mport
->dev
);
520 ndev
->ethtool_ops
= &rionet_ethtool_ops
;
522 spin_lock_init(&rnet
->lock
);
523 spin_lock_init(&rnet
->tx_lock
);
525 rnet
->msg_enable
= RIONET_DEFAULT_MSGLEVEL
;
527 rc
= register_netdev(ndev
);
529 free_pages((unsigned long)nets
[mport
->id
].active
,
530 get_order(rionet_active_bytes
));
534 printk(KERN_INFO
"%s: %s %s Version %s, MAC %pM, %s\n",
546 static int rionet_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
549 u32 lsrc_ops
, ldst_ops
;
550 struct rionet_peer
*peer
;
551 struct net_device
*ndev
= NULL
;
552 struct rio_dev
*rdev
= to_rio_dev(dev
);
553 unsigned char netid
= rdev
->net
->hport
->id
;
555 if (netid
>= RIONET_MAX_NETS
)
559 * If first time through this net, make sure local device is rionet
560 * capable and setup netdev (this step will be skipped in later probes
563 if (!nets
[netid
].ndev
) {
564 rio_local_read_config_32(rdev
->net
->hport
, RIO_SRC_OPS_CAR
,
566 rio_local_read_config_32(rdev
->net
->hport
, RIO_DST_OPS_CAR
,
568 if (!is_rionet_capable(lsrc_ops
, ldst_ops
)) {
570 "%s: local device %s is not network capable\n",
571 DRV_NAME
, rdev
->net
->hport
->name
);
575 /* Allocate our net_device structure */
576 ndev
= alloc_etherdev(sizeof(struct rionet_private
));
582 rc
= rionet_setup_netdev(rdev
->net
->hport
, ndev
);
584 printk(KERN_ERR
"%s: failed to setup netdev (rc=%d)\n",
590 INIT_LIST_HEAD(&nets
[netid
].peers
);
591 spin_lock_init(&nets
[netid
].lock
);
592 nets
[netid
].nact
= 0;
593 nets
[netid
].ndev
= ndev
;
597 * If the remote device has mailbox/doorbell capabilities,
598 * add it to the peer list.
600 if (dev_rionet_capable(rdev
)) {
601 struct rionet_private
*rnet
;
604 rnet
= netdev_priv(nets
[netid
].ndev
);
606 peer
= kzalloc(sizeof(*peer
), GFP_KERNEL
);
612 peer
->res
= rio_request_outb_dbell(peer
->rdev
,
613 RIONET_DOORBELL_JOIN
,
614 RIONET_DOORBELL_LEAVE
);
616 pr_err("%s: error requesting doorbells\n", DRV_NAME
);
622 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
623 list_add_tail(&peer
->node
, &nets
[netid
].peers
);
624 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
625 pr_debug("%s: %s add peer %s\n",
626 DRV_NAME
, __func__
, rio_name(rdev
));
628 /* If netdev is already opened, send join request to new peer */
630 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
638 static int rionet_shutdown(struct notifier_block
*nb
, unsigned long code
,
641 struct rionet_peer
*peer
;
645 pr_debug("%s: %s\n", DRV_NAME
, __func__
);
647 for (i
= 0; i
< RIONET_MAX_NETS
; i
++) {
651 spin_lock_irqsave(&nets
[i
].lock
, flags
);
652 list_for_each_entry(peer
, &nets
[i
].peers
, node
) {
653 if (nets
[i
].active
[peer
->rdev
->destid
]) {
654 rio_send_doorbell(peer
->rdev
,
655 RIONET_DOORBELL_LEAVE
);
656 nets
[i
].active
[peer
->rdev
->destid
] = NULL
;
659 spin_unlock_irqrestore(&nets
[i
].lock
, flags
);
665 static void rionet_remove_mport(struct device
*dev
)
667 struct rio_mport
*mport
= to_rio_mport(dev
);
668 struct net_device
*ndev
;
671 pr_debug("%s %s\n", __func__
, mport
->name
);
673 WARN(nets
[id
].nact
, "%s called when connected to %d peers\n",
674 __func__
, nets
[id
].nact
);
675 WARN(!nets
[id
].ndev
, "%s called for mport without NDEV\n",
679 ndev
= nets
[id
].ndev
;
680 netif_stop_queue(ndev
);
681 unregister_netdev(ndev
);
683 free_pages((unsigned long)nets
[id
].active
,
684 get_order(sizeof(void *) *
685 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
)));
686 nets
[id
].active
= NULL
;
688 nets
[id
].ndev
= NULL
;
693 static struct rio_device_id rionet_id_table
[] = {
694 {RIO_DEVICE(RIO_ANY_ID
, RIO_ANY_ID
)},
695 { 0, } /* terminate list */
698 MODULE_DEVICE_TABLE(rapidio
, rionet_id_table
);
701 static struct subsys_interface rionet_interface
= {
703 .subsys
= &rio_bus_type
,
704 .add_dev
= rionet_add_dev
,
705 .remove_dev
= rionet_remove_dev
,
708 static struct notifier_block rionet_notifier
= {
709 .notifier_call
= rionet_shutdown
,
712 /* the rio_mport_interface is used to handle local mport devices */
713 static struct class_interface rio_mport_interface __refdata
= {
714 .class = &rio_mport_class
,
716 .remove_dev
= rionet_remove_mport
,
719 static int __init
rionet_init(void)
723 ret
= register_reboot_notifier(&rionet_notifier
);
725 pr_err("%s: failed to register reboot notifier (err=%d)\n",
730 ret
= class_interface_register(&rio_mport_interface
);
732 pr_err("%s: class_interface_register error: %d\n",
737 return subsys_interface_register(&rionet_interface
);
740 static void __exit
rionet_exit(void)
742 unregister_reboot_notifier(&rionet_notifier
);
743 subsys_interface_unregister(&rionet_interface
);
744 class_interface_unregister(&rio_mport_interface
);
747 late_initcall(rionet_init
);
748 module_exit(rionet_exit
);