2 * rionet - Ethernet driver over RapidIO messaging services
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/rio.h>
18 #include <linux/rio_drv.h>
19 #include <linux/slab.h>
20 #include <linux/rio_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/crc32.h>
26 #include <linux/ethtool.h>
27 #include <linux/reboot.h>
29 #define DRV_NAME "rionet"
30 #define DRV_VERSION "0.3"
31 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
32 #define DRV_DESC "Ethernet over RapidIO"
34 MODULE_AUTHOR(DRV_AUTHOR
);
35 MODULE_DESCRIPTION(DRV_DESC
);
36 MODULE_LICENSE("GPL");
38 #define RIONET_DEFAULT_MSGLEVEL \
44 #define RIONET_DOORBELL_JOIN 0x1000
45 #define RIONET_DOORBELL_LEAVE 0x1001
47 #define RIONET_MAILBOX 0
49 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
50 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
51 #define RIONET_MAX_NETS 8
52 #define RIONET_MSG_SIZE RIO_MAX_MSG_SIZE
53 #define RIONET_MAX_MTU (RIONET_MSG_SIZE - ETH_HLEN)
55 struct rionet_private
{
56 struct rio_mport
*mport
;
57 struct sk_buff
*rx_skb
[RIONET_RX_RING_SIZE
];
58 struct sk_buff
*tx_skb
[RIONET_TX_RING_SIZE
];
70 struct list_head node
;
76 struct net_device
*ndev
;
77 struct list_head peers
;
78 spinlock_t lock
; /* net info access lock */
79 struct rio_dev
**active
;
80 int nact
; /* number of active peers */
83 static struct rionet_net nets
[RIONET_MAX_NETS
];
85 #define is_rionet_capable(src_ops, dst_ops) \
86 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
87 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
88 (src_ops & RIO_SRC_OPS_DOORBELL) && \
89 (dst_ops & RIO_DST_OPS_DOORBELL))
90 #define dev_rionet_capable(dev) \
91 is_rionet_capable(dev->src_ops, dev->dst_ops)
93 #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
94 #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
96 static int rionet_rx_clean(struct net_device
*ndev
)
100 struct rionet_private
*rnet
= netdev_priv(ndev
);
106 if (!rnet
->rx_skb
[i
])
109 if (!(data
= rio_get_inb_message(rnet
->mport
, RIONET_MAILBOX
)))
112 rnet
->rx_skb
[i
]->data
= data
;
113 skb_put(rnet
->rx_skb
[i
], RIO_MAX_MSG_SIZE
);
114 rnet
->rx_skb
[i
]->protocol
=
115 eth_type_trans(rnet
->rx_skb
[i
], ndev
);
116 error
= netif_rx(rnet
->rx_skb
[i
]);
118 if (error
== NET_RX_DROP
) {
119 ndev
->stats
.rx_dropped
++;
121 ndev
->stats
.rx_packets
++;
122 ndev
->stats
.rx_bytes
+= RIO_MAX_MSG_SIZE
;
125 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != rnet
->rx_slot
);
130 static void rionet_rx_fill(struct net_device
*ndev
, int end
)
133 struct rionet_private
*rnet
= netdev_priv(ndev
);
137 rnet
->rx_skb
[i
] = dev_alloc_skb(RIO_MAX_MSG_SIZE
);
139 if (!rnet
->rx_skb
[i
])
142 rio_add_inb_buffer(rnet
->mport
, RIONET_MAILBOX
,
143 rnet
->rx_skb
[i
]->data
);
144 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != end
);
149 static int rionet_queue_tx_msg(struct sk_buff
*skb
, struct net_device
*ndev
,
150 struct rio_dev
*rdev
)
152 struct rionet_private
*rnet
= netdev_priv(ndev
);
154 rio_add_outb_message(rnet
->mport
, rdev
, 0, skb
->data
, skb
->len
);
155 rnet
->tx_skb
[rnet
->tx_slot
] = skb
;
157 ndev
->stats
.tx_packets
++;
158 ndev
->stats
.tx_bytes
+= skb
->len
;
160 if (++rnet
->tx_cnt
== RIONET_TX_RING_SIZE
)
161 netif_stop_queue(ndev
);
164 rnet
->tx_slot
&= (RIONET_TX_RING_SIZE
- 1);
166 if (netif_msg_tx_queued(rnet
))
167 printk(KERN_INFO
"%s: queued skb len %8.8x\n", DRV_NAME
,
173 static int rionet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
176 struct rionet_private
*rnet
= netdev_priv(ndev
);
177 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
182 local_irq_save(flags
);
183 if (!spin_trylock(&rnet
->tx_lock
)) {
184 local_irq_restore(flags
);
185 return NETDEV_TX_LOCKED
;
188 if (is_multicast_ether_addr(eth
->h_dest
))
189 add_num
= nets
[rnet
->mport
->id
].nact
;
191 if ((rnet
->tx_cnt
+ add_num
) > RIONET_TX_RING_SIZE
) {
192 netif_stop_queue(ndev
);
193 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
194 printk(KERN_ERR
"%s: BUG! Tx Ring full when queue awake!\n",
196 return NETDEV_TX_BUSY
;
199 if (is_multicast_ether_addr(eth
->h_dest
)) {
202 for (i
= 0; i
< RIO_MAX_ROUTE_ENTRIES(rnet
->mport
->sys_size
);
204 if (nets
[rnet
->mport
->id
].active
[i
]) {
205 rionet_queue_tx_msg(skb
, ndev
,
206 nets
[rnet
->mport
->id
].active
[i
]);
208 atomic_inc(&skb
->users
);
211 } else if (RIONET_MAC_MATCH(eth
->h_dest
)) {
212 destid
= RIONET_GET_DESTID(eth
->h_dest
);
213 if (nets
[rnet
->mport
->id
].active
[destid
])
214 rionet_queue_tx_msg(skb
, ndev
,
215 nets
[rnet
->mport
->id
].active
[destid
]);
218 * If the target device was removed from the list of
219 * active peers but we still have TX packets targeting
220 * it just report sending a packet to the target
221 * (without actual packet transfer).
223 dev_kfree_skb_any(skb
);
224 ndev
->stats
.tx_packets
++;
225 ndev
->stats
.tx_bytes
+= skb
->len
;
229 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
234 static void rionet_dbell_event(struct rio_mport
*mport
, void *dev_id
, u16 sid
, u16 tid
,
237 struct net_device
*ndev
= dev_id
;
238 struct rionet_private
*rnet
= netdev_priv(ndev
);
239 struct rionet_peer
*peer
;
240 unsigned char netid
= rnet
->mport
->id
;
242 if (netif_msg_intr(rnet
))
243 printk(KERN_INFO
"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
244 DRV_NAME
, sid
, tid
, info
);
245 if (info
== RIONET_DOORBELL_JOIN
) {
246 if (!nets
[netid
].active
[sid
]) {
247 spin_lock(&nets
[netid
].lock
);
248 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
249 if (peer
->rdev
->destid
== sid
) {
250 nets
[netid
].active
[sid
] = peer
->rdev
;
254 spin_unlock(&nets
[netid
].lock
);
256 rio_mport_send_doorbell(mport
, sid
,
257 RIONET_DOORBELL_JOIN
);
259 } else if (info
== RIONET_DOORBELL_LEAVE
) {
260 spin_lock(&nets
[netid
].lock
);
261 if (nets
[netid
].active
[sid
]) {
262 nets
[netid
].active
[sid
] = NULL
;
265 spin_unlock(&nets
[netid
].lock
);
267 if (netif_msg_intr(rnet
))
268 printk(KERN_WARNING
"%s: unhandled doorbell\n",
273 static void rionet_inb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
276 struct net_device
*ndev
= dev_id
;
277 struct rionet_private
*rnet
= netdev_priv(ndev
);
279 if (netif_msg_intr(rnet
))
280 printk(KERN_INFO
"%s: inbound message event, mbox %d slot %d\n",
281 DRV_NAME
, mbox
, slot
);
283 spin_lock(&rnet
->lock
);
284 if ((n
= rionet_rx_clean(ndev
)) != rnet
->rx_slot
)
285 rionet_rx_fill(ndev
, n
);
286 spin_unlock(&rnet
->lock
);
289 static void rionet_outb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
291 struct net_device
*ndev
= dev_id
;
292 struct rionet_private
*rnet
= netdev_priv(ndev
);
294 spin_lock(&rnet
->tx_lock
);
296 if (netif_msg_intr(rnet
))
298 "%s: outbound message event, mbox %d slot %d\n",
299 DRV_NAME
, mbox
, slot
);
301 while (rnet
->tx_cnt
&& (rnet
->ack_slot
!= slot
)) {
302 /* dma unmap single */
303 dev_kfree_skb_irq(rnet
->tx_skb
[rnet
->ack_slot
]);
304 rnet
->tx_skb
[rnet
->ack_slot
] = NULL
;
306 rnet
->ack_slot
&= (RIONET_TX_RING_SIZE
- 1);
310 if (rnet
->tx_cnt
< RIONET_TX_RING_SIZE
)
311 netif_wake_queue(ndev
);
313 spin_unlock(&rnet
->tx_lock
);
316 static int rionet_open(struct net_device
*ndev
)
319 struct rionet_peer
*peer
;
320 struct rionet_private
*rnet
= netdev_priv(ndev
);
321 unsigned char netid
= rnet
->mport
->id
;
324 if (netif_msg_ifup(rnet
))
325 printk(KERN_INFO
"%s: open\n", DRV_NAME
);
327 if ((rc
= rio_request_inb_dbell(rnet
->mport
,
329 RIONET_DOORBELL_JOIN
,
330 RIONET_DOORBELL_LEAVE
,
331 rionet_dbell_event
)) < 0)
334 if ((rc
= rio_request_inb_mbox(rnet
->mport
,
338 rionet_inb_msg_event
)) < 0)
341 if ((rc
= rio_request_outb_mbox(rnet
->mport
,
345 rionet_outb_msg_event
)) < 0)
348 /* Initialize inbound message ring */
349 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
350 rnet
->rx_skb
[i
] = NULL
;
352 rionet_rx_fill(ndev
, 0);
358 netif_carrier_on(ndev
);
359 netif_start_queue(ndev
);
361 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
362 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
363 /* Send a join message */
364 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
366 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
373 static int rionet_close(struct net_device
*ndev
)
375 struct rionet_private
*rnet
= netdev_priv(ndev
);
376 struct rionet_peer
*peer
;
377 unsigned char netid
= rnet
->mport
->id
;
381 if (netif_msg_ifup(rnet
))
382 printk(KERN_INFO
"%s: close %s\n", DRV_NAME
, ndev
->name
);
384 netif_stop_queue(ndev
);
385 netif_carrier_off(ndev
);
388 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
389 kfree_skb(rnet
->rx_skb
[i
]);
391 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
392 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
393 if (nets
[netid
].active
[peer
->rdev
->destid
]) {
394 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_LEAVE
);
395 nets
[netid
].active
[peer
->rdev
->destid
] = NULL
;
398 rio_release_outb_dbell(peer
->rdev
, peer
->res
);
400 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
402 rio_release_inb_dbell(rnet
->mport
, RIONET_DOORBELL_JOIN
,
403 RIONET_DOORBELL_LEAVE
);
404 rio_release_inb_mbox(rnet
->mport
, RIONET_MAILBOX
);
405 rio_release_outb_mbox(rnet
->mport
, RIONET_MAILBOX
);
410 static void rionet_remove_dev(struct device
*dev
, struct subsys_interface
*sif
)
412 struct rio_dev
*rdev
= to_rio_dev(dev
);
413 unsigned char netid
= rdev
->net
->hport
->id
;
414 struct rionet_peer
*peer
;
415 int state
, found
= 0;
418 if (!dev_rionet_capable(rdev
))
421 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
422 list_for_each_entry(peer
, &nets
[netid
].peers
, node
) {
423 if (peer
->rdev
== rdev
) {
424 list_del(&peer
->node
);
425 if (nets
[netid
].active
[rdev
->destid
]) {
426 state
= atomic_read(&rdev
->state
);
427 if (state
!= RIO_DEVICE_GONE
&&
428 state
!= RIO_DEVICE_INITIALIZING
) {
429 rio_send_doorbell(rdev
,
430 RIONET_DOORBELL_LEAVE
);
432 nets
[netid
].active
[rdev
->destid
] = NULL
;
439 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
443 rio_release_outb_dbell(rdev
, peer
->res
);
448 static void rionet_get_drvinfo(struct net_device
*ndev
,
449 struct ethtool_drvinfo
*info
)
451 struct rionet_private
*rnet
= netdev_priv(ndev
);
453 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
454 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
455 strlcpy(info
->fw_version
, "n/a", sizeof(info
->fw_version
));
456 strlcpy(info
->bus_info
, rnet
->mport
->name
, sizeof(info
->bus_info
));
459 static u32
rionet_get_msglevel(struct net_device
*ndev
)
461 struct rionet_private
*rnet
= netdev_priv(ndev
);
463 return rnet
->msg_enable
;
466 static void rionet_set_msglevel(struct net_device
*ndev
, u32 value
)
468 struct rionet_private
*rnet
= netdev_priv(ndev
);
470 rnet
->msg_enable
= value
;
473 static int rionet_change_mtu(struct net_device
*ndev
, int new_mtu
)
475 if ((new_mtu
< 68) || (new_mtu
> RIONET_MAX_MTU
)) {
476 printk(KERN_ERR
"%s: Invalid MTU size %d\n",
477 ndev
->name
, new_mtu
);
484 static const struct ethtool_ops rionet_ethtool_ops
= {
485 .get_drvinfo
= rionet_get_drvinfo
,
486 .get_msglevel
= rionet_get_msglevel
,
487 .set_msglevel
= rionet_set_msglevel
,
488 .get_link
= ethtool_op_get_link
,
491 static const struct net_device_ops rionet_netdev_ops
= {
492 .ndo_open
= rionet_open
,
493 .ndo_stop
= rionet_close
,
494 .ndo_start_xmit
= rionet_start_xmit
,
495 .ndo_change_mtu
= rionet_change_mtu
,
496 .ndo_validate_addr
= eth_validate_addr
,
497 .ndo_set_mac_address
= eth_mac_addr
,
500 static int rionet_setup_netdev(struct rio_mport
*mport
, struct net_device
*ndev
)
503 struct rionet_private
*rnet
;
505 const size_t rionet_active_bytes
= sizeof(void *) *
506 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
);
508 nets
[mport
->id
].active
= (struct rio_dev
**)__get_free_pages(GFP_KERNEL
,
509 get_order(rionet_active_bytes
));
510 if (!nets
[mport
->id
].active
) {
514 memset((void *)nets
[mport
->id
].active
, 0, rionet_active_bytes
);
516 /* Set up private area */
517 rnet
= netdev_priv(ndev
);
521 /* Set the default MAC address */
522 device_id
= rio_local_get_device_id(mport
);
523 ndev
->dev_addr
[0] = 0x00;
524 ndev
->dev_addr
[1] = 0x01;
525 ndev
->dev_addr
[2] = 0x00;
526 ndev
->dev_addr
[3] = 0x01;
527 ndev
->dev_addr
[4] = device_id
>> 8;
528 ndev
->dev_addr
[5] = device_id
& 0xff;
530 ndev
->netdev_ops
= &rionet_netdev_ops
;
531 ndev
->mtu
= RIONET_MAX_MTU
;
532 ndev
->features
= NETIF_F_LLTX
;
533 SET_NETDEV_DEV(ndev
, &mport
->dev
);
534 ndev
->ethtool_ops
= &rionet_ethtool_ops
;
536 spin_lock_init(&rnet
->lock
);
537 spin_lock_init(&rnet
->tx_lock
);
539 rnet
->msg_enable
= RIONET_DEFAULT_MSGLEVEL
;
541 rc
= register_netdev(ndev
);
543 free_pages((unsigned long)nets
[mport
->id
].active
,
544 get_order(rionet_active_bytes
));
548 printk(KERN_INFO
"%s: %s %s Version %s, MAC %pM, %s\n",
560 static int rionet_add_dev(struct device
*dev
, struct subsys_interface
*sif
)
563 u32 lsrc_ops
, ldst_ops
;
564 struct rionet_peer
*peer
;
565 struct net_device
*ndev
= NULL
;
566 struct rio_dev
*rdev
= to_rio_dev(dev
);
567 unsigned char netid
= rdev
->net
->hport
->id
;
569 if (netid
>= RIONET_MAX_NETS
)
573 * If first time through this net, make sure local device is rionet
574 * capable and setup netdev (this step will be skipped in later probes
577 if (!nets
[netid
].ndev
) {
578 rio_local_read_config_32(rdev
->net
->hport
, RIO_SRC_OPS_CAR
,
580 rio_local_read_config_32(rdev
->net
->hport
, RIO_DST_OPS_CAR
,
582 if (!is_rionet_capable(lsrc_ops
, ldst_ops
)) {
584 "%s: local device %s is not network capable\n",
585 DRV_NAME
, rdev
->net
->hport
->name
);
589 /* Allocate our net_device structure */
590 ndev
= alloc_etherdev(sizeof(struct rionet_private
));
596 rc
= rionet_setup_netdev(rdev
->net
->hport
, ndev
);
598 printk(KERN_ERR
"%s: failed to setup netdev (rc=%d)\n",
604 INIT_LIST_HEAD(&nets
[netid
].peers
);
605 spin_lock_init(&nets
[netid
].lock
);
606 nets
[netid
].nact
= 0;
607 nets
[netid
].ndev
= ndev
;
611 * If the remote device has mailbox/doorbell capabilities,
612 * add it to the peer list.
614 if (dev_rionet_capable(rdev
)) {
615 struct rionet_private
*rnet
;
618 rnet
= netdev_priv(nets
[netid
].ndev
);
620 peer
= kzalloc(sizeof(*peer
), GFP_KERNEL
);
626 peer
->res
= rio_request_outb_dbell(peer
->rdev
,
627 RIONET_DOORBELL_JOIN
,
628 RIONET_DOORBELL_LEAVE
);
630 pr_err("%s: error requesting doorbells\n", DRV_NAME
);
636 spin_lock_irqsave(&nets
[netid
].lock
, flags
);
637 list_add_tail(&peer
->node
, &nets
[netid
].peers
);
638 spin_unlock_irqrestore(&nets
[netid
].lock
, flags
);
639 pr_debug("%s: %s add peer %s\n",
640 DRV_NAME
, __func__
, rio_name(rdev
));
642 /* If netdev is already opened, send join request to new peer */
644 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
652 static int rionet_shutdown(struct notifier_block
*nb
, unsigned long code
,
655 struct rionet_peer
*peer
;
659 pr_debug("%s: %s\n", DRV_NAME
, __func__
);
661 for (i
= 0; i
< RIONET_MAX_NETS
; i
++) {
665 spin_lock_irqsave(&nets
[i
].lock
, flags
);
666 list_for_each_entry(peer
, &nets
[i
].peers
, node
) {
667 if (nets
[i
].active
[peer
->rdev
->destid
]) {
668 rio_send_doorbell(peer
->rdev
,
669 RIONET_DOORBELL_LEAVE
);
670 nets
[i
].active
[peer
->rdev
->destid
] = NULL
;
673 spin_unlock_irqrestore(&nets
[i
].lock
, flags
);
679 static void rionet_remove_mport(struct device
*dev
,
680 struct class_interface
*class_intf
)
682 struct rio_mport
*mport
= to_rio_mport(dev
);
683 struct net_device
*ndev
;
686 pr_debug("%s %s\n", __func__
, mport
->name
);
688 WARN(nets
[id
].nact
, "%s called when connected to %d peers\n",
689 __func__
, nets
[id
].nact
);
690 WARN(!nets
[id
].ndev
, "%s called for mport without NDEV\n",
694 ndev
= nets
[id
].ndev
;
695 netif_stop_queue(ndev
);
696 unregister_netdev(ndev
);
698 free_pages((unsigned long)nets
[id
].active
,
699 get_order(sizeof(void *) *
700 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
)));
701 nets
[id
].active
= NULL
;
703 nets
[id
].ndev
= NULL
;
708 static struct rio_device_id rionet_id_table
[] = {
709 {RIO_DEVICE(RIO_ANY_ID
, RIO_ANY_ID
)},
710 { 0, } /* terminate list */
713 MODULE_DEVICE_TABLE(rapidio
, rionet_id_table
);
716 static struct subsys_interface rionet_interface
= {
718 .subsys
= &rio_bus_type
,
719 .add_dev
= rionet_add_dev
,
720 .remove_dev
= rionet_remove_dev
,
723 static struct notifier_block rionet_notifier
= {
724 .notifier_call
= rionet_shutdown
,
727 /* the rio_mport_interface is used to handle local mport devices */
728 static struct class_interface rio_mport_interface __refdata
= {
729 .class = &rio_mport_class
,
731 .remove_dev
= rionet_remove_mport
,
734 static int __init
rionet_init(void)
738 ret
= register_reboot_notifier(&rionet_notifier
);
740 pr_err("%s: failed to register reboot notifier (err=%d)\n",
745 ret
= class_interface_register(&rio_mport_interface
);
747 pr_err("%s: class_interface_register error: %d\n",
752 return subsys_interface_register(&rionet_interface
);
755 static void __exit
rionet_exit(void)
757 unregister_reboot_notifier(&rionet_notifier
);
758 subsys_interface_unregister(&rionet_interface
);
759 class_interface_unregister(&rio_mport_interface
);
762 late_initcall(rionet_init
);
763 module_exit(rionet_exit
);