2 * rionet - Ethernet driver over RapidIO messaging services
4 * Copyright 2005 MontaVista Software, Inc.
5 * Matt Porter <mporter@kernel.crashing.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/rio.h>
18 #include <linux/rio_drv.h>
19 #include <linux/slab.h>
20 #include <linux/rio_ids.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/crc32.h>
26 #include <linux/ethtool.h>
28 #define DRV_NAME "rionet"
29 #define DRV_VERSION "0.3"
30 #define DRV_AUTHOR "Matt Porter <mporter@kernel.crashing.org>"
31 #define DRV_DESC "Ethernet over RapidIO"
33 MODULE_AUTHOR(DRV_AUTHOR
);
34 MODULE_DESCRIPTION(DRV_DESC
);
35 MODULE_LICENSE("GPL");
37 #define RIONET_DEFAULT_MSGLEVEL \
43 #define RIONET_DOORBELL_JOIN 0x1000
44 #define RIONET_DOORBELL_LEAVE 0x1001
46 #define RIONET_MAILBOX 0
48 #define RIONET_TX_RING_SIZE CONFIG_RIONET_TX_SIZE
49 #define RIONET_RX_RING_SIZE CONFIG_RIONET_RX_SIZE
50 #define RIONET_MAX_NETS 8
52 struct rionet_private
{
53 struct rio_mport
*mport
;
54 struct sk_buff
*rx_skb
[RIONET_RX_RING_SIZE
];
55 struct sk_buff
*tx_skb
[RIONET_TX_RING_SIZE
];
66 struct list_head node
;
72 struct net_device
*ndev
;
73 struct list_head peers
;
74 struct rio_dev
**active
;
75 int nact
; /* number of active peers */
78 static struct rionet_net nets
[RIONET_MAX_NETS
];
80 #define is_rionet_capable(src_ops, dst_ops) \
81 ((src_ops & RIO_SRC_OPS_DATA_MSG) && \
82 (dst_ops & RIO_DST_OPS_DATA_MSG) && \
83 (src_ops & RIO_SRC_OPS_DOORBELL) && \
84 (dst_ops & RIO_DST_OPS_DOORBELL))
85 #define dev_rionet_capable(dev) \
86 is_rionet_capable(dev->src_ops, dev->dst_ops)
88 #define RIONET_MAC_MATCH(x) (!memcmp((x), "\00\01\00\01", 4))
89 #define RIONET_GET_DESTID(x) ((*((u8 *)x + 4) << 8) | *((u8 *)x + 5))
91 static int rionet_rx_clean(struct net_device
*ndev
)
95 struct rionet_private
*rnet
= netdev_priv(ndev
);
101 if (!rnet
->rx_skb
[i
])
104 if (!(data
= rio_get_inb_message(rnet
->mport
, RIONET_MAILBOX
)))
107 rnet
->rx_skb
[i
]->data
= data
;
108 skb_put(rnet
->rx_skb
[i
], RIO_MAX_MSG_SIZE
);
109 rnet
->rx_skb
[i
]->protocol
=
110 eth_type_trans(rnet
->rx_skb
[i
], ndev
);
111 error
= netif_rx(rnet
->rx_skb
[i
]);
113 if (error
== NET_RX_DROP
) {
114 ndev
->stats
.rx_dropped
++;
116 ndev
->stats
.rx_packets
++;
117 ndev
->stats
.rx_bytes
+= RIO_MAX_MSG_SIZE
;
120 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != rnet
->rx_slot
);
125 static void rionet_rx_fill(struct net_device
*ndev
, int end
)
128 struct rionet_private
*rnet
= netdev_priv(ndev
);
132 rnet
->rx_skb
[i
] = dev_alloc_skb(RIO_MAX_MSG_SIZE
);
134 if (!rnet
->rx_skb
[i
])
137 rio_add_inb_buffer(rnet
->mport
, RIONET_MAILBOX
,
138 rnet
->rx_skb
[i
]->data
);
139 } while ((i
= (i
+ 1) % RIONET_RX_RING_SIZE
) != end
);
144 static int rionet_queue_tx_msg(struct sk_buff
*skb
, struct net_device
*ndev
,
145 struct rio_dev
*rdev
)
147 struct rionet_private
*rnet
= netdev_priv(ndev
);
149 rio_add_outb_message(rnet
->mport
, rdev
, 0, skb
->data
, skb
->len
);
150 rnet
->tx_skb
[rnet
->tx_slot
] = skb
;
152 ndev
->stats
.tx_packets
++;
153 ndev
->stats
.tx_bytes
+= skb
->len
;
155 if (++rnet
->tx_cnt
== RIONET_TX_RING_SIZE
)
156 netif_stop_queue(ndev
);
159 rnet
->tx_slot
&= (RIONET_TX_RING_SIZE
- 1);
161 if (netif_msg_tx_queued(rnet
))
162 printk(KERN_INFO
"%s: queued skb len %8.8x\n", DRV_NAME
,
168 static int rionet_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
171 struct rionet_private
*rnet
= netdev_priv(ndev
);
172 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
177 local_irq_save(flags
);
178 if (!spin_trylock(&rnet
->tx_lock
)) {
179 local_irq_restore(flags
);
180 return NETDEV_TX_LOCKED
;
183 if (is_multicast_ether_addr(eth
->h_dest
))
184 add_num
= nets
[rnet
->mport
->id
].nact
;
186 if ((rnet
->tx_cnt
+ add_num
) > RIONET_TX_RING_SIZE
) {
187 netif_stop_queue(ndev
);
188 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
189 printk(KERN_ERR
"%s: BUG! Tx Ring full when queue awake!\n",
191 return NETDEV_TX_BUSY
;
194 if (is_multicast_ether_addr(eth
->h_dest
)) {
197 for (i
= 0; i
< RIO_MAX_ROUTE_ENTRIES(rnet
->mport
->sys_size
);
199 if (nets
[rnet
->mport
->id
].active
[i
]) {
200 rionet_queue_tx_msg(skb
, ndev
,
201 nets
[rnet
->mport
->id
].active
[i
]);
203 atomic_inc(&skb
->users
);
206 } else if (RIONET_MAC_MATCH(eth
->h_dest
)) {
207 destid
= RIONET_GET_DESTID(eth
->h_dest
);
208 if (nets
[rnet
->mport
->id
].active
[destid
])
209 rionet_queue_tx_msg(skb
, ndev
,
210 nets
[rnet
->mport
->id
].active
[destid
]);
213 spin_unlock_irqrestore(&rnet
->tx_lock
, flags
);
218 static void rionet_dbell_event(struct rio_mport
*mport
, void *dev_id
, u16 sid
, u16 tid
,
221 struct net_device
*ndev
= dev_id
;
222 struct rionet_private
*rnet
= netdev_priv(ndev
);
223 struct rionet_peer
*peer
;
225 if (netif_msg_intr(rnet
))
226 printk(KERN_INFO
"%s: doorbell sid %4.4x tid %4.4x info %4.4x",
227 DRV_NAME
, sid
, tid
, info
);
228 if (info
== RIONET_DOORBELL_JOIN
) {
229 if (!nets
[rnet
->mport
->id
].active
[sid
]) {
230 list_for_each_entry(peer
,
231 &nets
[rnet
->mport
->id
].peers
, node
) {
232 if (peer
->rdev
->destid
== sid
) {
233 nets
[rnet
->mport
->id
].active
[sid
] =
235 nets
[rnet
->mport
->id
].nact
++;
238 rio_mport_send_doorbell(mport
, sid
,
239 RIONET_DOORBELL_JOIN
);
241 } else if (info
== RIONET_DOORBELL_LEAVE
) {
242 nets
[rnet
->mport
->id
].active
[sid
] = NULL
;
243 nets
[rnet
->mport
->id
].nact
--;
245 if (netif_msg_intr(rnet
))
246 printk(KERN_WARNING
"%s: unhandled doorbell\n",
251 static void rionet_inb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
254 struct net_device
*ndev
= dev_id
;
255 struct rionet_private
*rnet
= netdev_priv(ndev
);
257 if (netif_msg_intr(rnet
))
258 printk(KERN_INFO
"%s: inbound message event, mbox %d slot %d\n",
259 DRV_NAME
, mbox
, slot
);
261 spin_lock(&rnet
->lock
);
262 if ((n
= rionet_rx_clean(ndev
)) != rnet
->rx_slot
)
263 rionet_rx_fill(ndev
, n
);
264 spin_unlock(&rnet
->lock
);
267 static void rionet_outb_msg_event(struct rio_mport
*mport
, void *dev_id
, int mbox
, int slot
)
269 struct net_device
*ndev
= dev_id
;
270 struct rionet_private
*rnet
= netdev_priv(ndev
);
272 spin_lock(&rnet
->lock
);
274 if (netif_msg_intr(rnet
))
276 "%s: outbound message event, mbox %d slot %d\n",
277 DRV_NAME
, mbox
, slot
);
279 while (rnet
->tx_cnt
&& (rnet
->ack_slot
!= slot
)) {
280 /* dma unmap single */
281 dev_kfree_skb_irq(rnet
->tx_skb
[rnet
->ack_slot
]);
282 rnet
->tx_skb
[rnet
->ack_slot
] = NULL
;
284 rnet
->ack_slot
&= (RIONET_TX_RING_SIZE
- 1);
288 if (rnet
->tx_cnt
< RIONET_TX_RING_SIZE
)
289 netif_wake_queue(ndev
);
291 spin_unlock(&rnet
->lock
);
294 static int rionet_open(struct net_device
*ndev
)
297 struct rionet_peer
*peer
, *tmp
;
298 struct rionet_private
*rnet
= netdev_priv(ndev
);
300 if (netif_msg_ifup(rnet
))
301 printk(KERN_INFO
"%s: open\n", DRV_NAME
);
303 if ((rc
= rio_request_inb_dbell(rnet
->mport
,
305 RIONET_DOORBELL_JOIN
,
306 RIONET_DOORBELL_LEAVE
,
307 rionet_dbell_event
)) < 0)
310 if ((rc
= rio_request_inb_mbox(rnet
->mport
,
314 rionet_inb_msg_event
)) < 0)
317 if ((rc
= rio_request_outb_mbox(rnet
->mport
,
321 rionet_outb_msg_event
)) < 0)
324 /* Initialize inbound message ring */
325 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
326 rnet
->rx_skb
[i
] = NULL
;
328 rionet_rx_fill(ndev
, 0);
334 netif_carrier_on(ndev
);
335 netif_start_queue(ndev
);
337 list_for_each_entry_safe(peer
, tmp
,
338 &nets
[rnet
->mport
->id
].peers
, node
) {
339 if (!(peer
->res
= rio_request_outb_dbell(peer
->rdev
,
340 RIONET_DOORBELL_JOIN
,
341 RIONET_DOORBELL_LEAVE
)))
343 printk(KERN_ERR
"%s: error requesting doorbells\n",
348 /* Send a join message */
349 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_JOIN
);
356 static int rionet_close(struct net_device
*ndev
)
358 struct rionet_private
*rnet
= netdev_priv(ndev
);
359 struct rionet_peer
*peer
, *tmp
;
362 if (netif_msg_ifup(rnet
))
363 printk(KERN_INFO
"%s: close %s\n", DRV_NAME
, ndev
->name
);
365 netif_stop_queue(ndev
);
366 netif_carrier_off(ndev
);
368 for (i
= 0; i
< RIONET_RX_RING_SIZE
; i
++)
369 kfree_skb(rnet
->rx_skb
[i
]);
371 list_for_each_entry_safe(peer
, tmp
,
372 &nets
[rnet
->mport
->id
].peers
, node
) {
373 if (nets
[rnet
->mport
->id
].active
[peer
->rdev
->destid
]) {
374 rio_send_doorbell(peer
->rdev
, RIONET_DOORBELL_LEAVE
);
375 nets
[rnet
->mport
->id
].active
[peer
->rdev
->destid
] = NULL
;
377 rio_release_outb_dbell(peer
->rdev
, peer
->res
);
380 rio_release_inb_dbell(rnet
->mport
, RIONET_DOORBELL_JOIN
,
381 RIONET_DOORBELL_LEAVE
);
382 rio_release_inb_mbox(rnet
->mport
, RIONET_MAILBOX
);
383 rio_release_outb_mbox(rnet
->mport
, RIONET_MAILBOX
);
388 static void rionet_remove(struct rio_dev
*rdev
)
390 struct net_device
*ndev
= rio_get_drvdata(rdev
);
391 unsigned char netid
= rdev
->net
->hport
->id
;
392 struct rionet_peer
*peer
, *tmp
;
394 unregister_netdev(ndev
);
396 free_pages((unsigned long)nets
[netid
].active
, get_order(sizeof(void *) *
397 RIO_MAX_ROUTE_ENTRIES(rdev
->net
->hport
->sys_size
)));
398 nets
[netid
].active
= NULL
;
400 list_for_each_entry_safe(peer
, tmp
, &nets
[netid
].peers
, node
) {
401 list_del(&peer
->node
);
408 static void rionet_get_drvinfo(struct net_device
*ndev
,
409 struct ethtool_drvinfo
*info
)
411 struct rionet_private
*rnet
= netdev_priv(ndev
);
413 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
414 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
415 strlcpy(info
->fw_version
, "n/a", sizeof(info
->fw_version
));
416 strlcpy(info
->bus_info
, rnet
->mport
->name
, sizeof(info
->bus_info
));
419 static u32
rionet_get_msglevel(struct net_device
*ndev
)
421 struct rionet_private
*rnet
= netdev_priv(ndev
);
423 return rnet
->msg_enable
;
426 static void rionet_set_msglevel(struct net_device
*ndev
, u32 value
)
428 struct rionet_private
*rnet
= netdev_priv(ndev
);
430 rnet
->msg_enable
= value
;
433 static const struct ethtool_ops rionet_ethtool_ops
= {
434 .get_drvinfo
= rionet_get_drvinfo
,
435 .get_msglevel
= rionet_get_msglevel
,
436 .set_msglevel
= rionet_set_msglevel
,
437 .get_link
= ethtool_op_get_link
,
440 static const struct net_device_ops rionet_netdev_ops
= {
441 .ndo_open
= rionet_open
,
442 .ndo_stop
= rionet_close
,
443 .ndo_start_xmit
= rionet_start_xmit
,
444 .ndo_change_mtu
= eth_change_mtu
,
445 .ndo_validate_addr
= eth_validate_addr
,
446 .ndo_set_mac_address
= eth_mac_addr
,
449 static int rionet_setup_netdev(struct rio_mport
*mport
, struct net_device
*ndev
)
452 struct rionet_private
*rnet
;
454 const size_t rionet_active_bytes
= sizeof(void *) *
455 RIO_MAX_ROUTE_ENTRIES(mport
->sys_size
);
457 nets
[mport
->id
].active
= (struct rio_dev
**)__get_free_pages(GFP_KERNEL
,
458 get_order(rionet_active_bytes
));
459 if (!nets
[mport
->id
].active
) {
463 memset((void *)nets
[mport
->id
].active
, 0, rionet_active_bytes
);
465 /* Set up private area */
466 rnet
= netdev_priv(ndev
);
469 /* Set the default MAC address */
470 device_id
= rio_local_get_device_id(mport
);
471 ndev
->dev_addr
[0] = 0x00;
472 ndev
->dev_addr
[1] = 0x01;
473 ndev
->dev_addr
[2] = 0x00;
474 ndev
->dev_addr
[3] = 0x01;
475 ndev
->dev_addr
[4] = device_id
>> 8;
476 ndev
->dev_addr
[5] = device_id
& 0xff;
478 ndev
->netdev_ops
= &rionet_netdev_ops
;
479 ndev
->mtu
= RIO_MAX_MSG_SIZE
- 14;
480 ndev
->features
= NETIF_F_LLTX
;
481 SET_ETHTOOL_OPS(ndev
, &rionet_ethtool_ops
);
483 spin_lock_init(&rnet
->lock
);
484 spin_lock_init(&rnet
->tx_lock
);
486 rnet
->msg_enable
= RIONET_DEFAULT_MSGLEVEL
;
488 rc
= register_netdev(ndev
);
492 printk(KERN_INFO
"%s: %s %s Version %s, MAC %pM, %s\n",
504 static unsigned long net_table
[RIONET_MAX_NETS
/sizeof(unsigned long) + 1];
506 static int rionet_probe(struct rio_dev
*rdev
, const struct rio_device_id
*id
)
509 u32 lsrc_ops
, ldst_ops
;
510 struct rionet_peer
*peer
;
511 struct net_device
*ndev
= NULL
;
512 unsigned char netid
= rdev
->net
->hport
->id
;
515 if (netid
>= RIONET_MAX_NETS
)
518 oldnet
= test_and_set_bit(netid
, net_table
);
521 * First time through, make sure local device is rionet
522 * capable, setup netdev (will be skipped on later probes)
525 rio_local_read_config_32(rdev
->net
->hport
, RIO_SRC_OPS_CAR
,
527 rio_local_read_config_32(rdev
->net
->hport
, RIO_DST_OPS_CAR
,
529 if (!is_rionet_capable(lsrc_ops
, ldst_ops
)) {
531 "%s: local device %s is not network capable\n",
532 DRV_NAME
, rdev
->net
->hport
->name
);
536 /* Allocate our net_device structure */
537 ndev
= alloc_etherdev(sizeof(struct rionet_private
));
542 nets
[netid
].ndev
= ndev
;
543 rc
= rionet_setup_netdev(rdev
->net
->hport
, ndev
);
544 INIT_LIST_HEAD(&nets
[netid
].peers
);
545 nets
[netid
].nact
= 0;
546 } else if (nets
[netid
].ndev
== NULL
)
550 * If the remote device has mailbox/doorbell capabilities,
551 * add it to the peer list.
553 if (dev_rionet_capable(rdev
)) {
554 if (!(peer
= kmalloc(sizeof(struct rionet_peer
), GFP_KERNEL
))) {
559 list_add_tail(&peer
->node
, &nets
[netid
].peers
);
562 rio_set_drvdata(rdev
, nets
[netid
].ndev
);
568 static struct rio_device_id rionet_id_table
[] = {
569 {RIO_DEVICE(RIO_ANY_ID
, RIO_ANY_ID
)}
572 static struct rio_driver rionet_driver
= {
574 .id_table
= rionet_id_table
,
575 .probe
= rionet_probe
,
576 .remove
= rionet_remove
,
579 static int __init
rionet_init(void)
581 return rio_register_driver(&rionet_driver
);
584 static void __exit
rionet_exit(void)
586 rio_unregister_driver(&rionet_driver
);
589 late_initcall(rionet_init
);
590 module_exit(rionet_exit
);