JBD: round commit timer up to avoid uncommitted transaction
[linux/fpc-iii.git] / net / irda / irlan / irlan_eth.c
blob7b6b631f647fbd704add643b8cac365dc11bcb51
1 /*********************************************************************
3 * Filename: irlan_eth.c
4 * Version:
5 * Description:
6 * Status: Experimental.
7 * Author: Dag Brattli <dagb@cs.uit.no>
8 * Created at: Thu Oct 15 08:37:58 1998
9 * Modified at: Tue Mar 21 09:06:41 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
11 * Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
12 * slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
13 * Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
15 * Copyright (c) 1998-2000 Dag Brattli, All Rights Reserved.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License as
19 * published by the Free Software Foundation; either version 2 of
20 * the License, or (at your option) any later version.
22 * Neither Dag Brattli nor University of Tromsø admit liability nor
23 * provide warranty for any of this software. This material is
24 * provided "AS-IS" and at no charge.
26 ********************************************************************/
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/module.h>
33 #include <net/arp.h>
35 #include <net/irda/irda.h>
36 #include <net/irda/irmod.h>
37 #include <net/irda/irlan_common.h>
38 #include <net/irda/irlan_client.h>
39 #include <net/irda/irlan_event.h>
40 #include <net/irda/irlan_eth.h>
42 static int irlan_eth_open(struct net_device *dev);
43 static int irlan_eth_close(struct net_device *dev);
44 static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
45 struct net_device *dev);
46 static void irlan_eth_set_multicast_list( struct net_device *dev);
47 static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
49 static const struct net_device_ops irlan_eth_netdev_ops = {
50 .ndo_open = irlan_eth_open,
51 .ndo_stop = irlan_eth_close,
52 .ndo_start_xmit = irlan_eth_xmit,
53 .ndo_get_stats = irlan_eth_get_stats,
54 .ndo_set_multicast_list = irlan_eth_set_multicast_list,
55 .ndo_change_mtu = eth_change_mtu,
56 .ndo_validate_addr = eth_validate_addr,
60 * Function irlan_eth_setup (dev)
62 * The network device initialization function.
65 static void irlan_eth_setup(struct net_device *dev)
67 ether_setup(dev);
69 dev->netdev_ops = &irlan_eth_netdev_ops;
70 dev->destructor = free_netdev;
74 * Lets do all queueing in IrTTP instead of this device driver.
75 * Queueing here as well can introduce some strange latency
76 * problems, which we will avoid by setting the queue size to 0.
79 * The bugs in IrTTP and IrLAN that created this latency issue
80 * have now been fixed, and we can propagate flow control properly
81 * to the network layer. However, this requires a minimal queue of
82 * packets for the device.
83 * Without flow control, the Tx Queue is 14 (ttp) + 0 (dev) = 14
84 * With flow control, the Tx Queue is 7 (ttp) + 4 (dev) = 11
85 * See irlan_eth_flow_indication()...
86 * Note : this number was randomly selected and would need to
87 * be adjusted.
88 * Jean II */
89 dev->tx_queue_len = 4;
93 * Function alloc_irlandev
95 * Allocate network device and control block
98 struct net_device *alloc_irlandev(const char *name)
100 return alloc_netdev(sizeof(struct irlan_cb), name,
101 irlan_eth_setup);
105 * Function irlan_eth_open (dev)
107 * Network device has been opened by user
110 static int irlan_eth_open(struct net_device *dev)
112 struct irlan_cb *self = netdev_priv(dev);
114 IRDA_DEBUG(2, "%s()\n", __func__ );
116 /* Ready to play! */
117 netif_stop_queue(dev); /* Wait until data link is ready */
119 /* We are now open, so time to do some work */
120 self->disconnect_reason = 0;
121 irlan_client_wakeup(self, self->saddr, self->daddr);
123 /* Make sure we have a hardware address before we return,
124 so DHCP clients gets happy */
125 return wait_event_interruptible(self->open_wait,
126 !self->tsap_data->connected);
130 * Function irlan_eth_close (dev)
132 * Stop the ether network device, his function will usually be called by
133 * ifconfig down. We should now disconnect the link, We start the
134 * close timer, so that the instance will be removed if we are unable
135 * to discover the remote device after the disconnect.
137 static int irlan_eth_close(struct net_device *dev)
139 struct irlan_cb *self = netdev_priv(dev);
141 IRDA_DEBUG(2, "%s()\n", __func__ );
143 /* Stop device */
144 netif_stop_queue(dev);
146 irlan_close_data_channel(self);
147 irlan_close_tsaps(self);
149 irlan_do_client_event(self, IRLAN_LMP_DISCONNECT, NULL);
150 irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
152 /* Remove frames queued on the control channel */
153 skb_queue_purge(&self->client.txq);
155 self->client.tx_busy = 0;
157 return 0;
161 * Function irlan_eth_tx (skb)
163 * Transmits ethernet frames over IrDA link.
166 static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
167 struct net_device *dev)
169 struct irlan_cb *self = netdev_priv(dev);
170 int ret;
172 /* skb headroom large enough to contain all IrDA-headers? */
173 if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
174 struct sk_buff *new_skb =
175 skb_realloc_headroom(skb, self->max_header_size);
177 /* We have to free the original skb anyway */
178 dev_kfree_skb(skb);
180 /* Did the realloc succeed? */
181 if (new_skb == NULL)
182 return NETDEV_TX_OK;
184 /* Use the new skb instead */
185 skb = new_skb;
188 dev->trans_start = jiffies;
190 /* Now queue the packet in the transport layer */
191 if (self->use_udata)
192 ret = irttp_udata_request(self->tsap_data, skb);
193 else
194 ret = irttp_data_request(self->tsap_data, skb);
196 if (ret < 0) {
198 * IrTTPs tx queue is full, so we just have to
199 * drop the frame! You might think that we should
200 * just return -1 and don't deallocate the frame,
201 * but that is dangerous since it's possible that
202 * we have replaced the original skb with a new
203 * one with larger headroom, and that would really
204 * confuse do_dev_queue_xmit() in dev.c! I have
205 * tried :-) DB
207 /* irttp_data_request already free the packet */
208 self->stats.tx_dropped++;
209 } else {
210 self->stats.tx_packets++;
211 self->stats.tx_bytes += skb->len;
214 return NETDEV_TX_OK;
218 * Function irlan_eth_receive (handle, skb)
220 * This function gets the data that is received on the data channel
223 int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
225 struct irlan_cb *self = instance;
227 if (skb == NULL) {
228 ++self->stats.rx_dropped;
229 return 0;
231 if (skb->len < ETH_HLEN) {
232 IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
233 __func__, skb->len);
234 ++self->stats.rx_dropped;
235 dev_kfree_skb(skb);
236 return 0;
240 * Adopt this frame! Important to set all these fields since they
241 * might have been previously set by the low level IrDA network
242 * device driver
244 skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */
246 self->stats.rx_packets++;
247 self->stats.rx_bytes += skb->len;
249 netif_rx(skb); /* Eat it! */
251 return 0;
255 * Function irlan_eth_flow (status)
257 * Do flow control between IP/Ethernet and IrLAN/IrTTP. This is done by
258 * controlling the queue stop/start.
260 * The IrDA link layer has the advantage to have flow control, and
261 * IrTTP now properly handles that. Flow controlling the higher layers
262 * prevent us to drop Tx packets in here (up to 15% for a TCP socket,
263 * more for UDP socket).
264 * Also, this allow us to reduce the overall transmit queue, which means
265 * less latency in case of mixed traffic.
266 * Jean II
268 void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
270 struct irlan_cb *self;
271 struct net_device *dev;
273 self = (struct irlan_cb *) instance;
275 IRDA_ASSERT(self != NULL, return;);
276 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
278 dev = self->dev;
280 IRDA_ASSERT(dev != NULL, return;);
282 IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__,
283 flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START",
284 netif_running(dev));
286 switch (flow) {
287 case FLOW_STOP:
288 /* IrTTP is full, stop higher layers */
289 netif_stop_queue(dev);
290 break;
291 case FLOW_START:
292 default:
293 /* Tell upper layers that its time to transmit frames again */
294 /* Schedule network layer */
295 netif_wake_queue(dev);
296 break;
301 * Function set_multicast_list (dev)
303 * Configure the filtering of the device
306 #define HW_MAX_ADDRS 4 /* Must query to get it! */
307 static void irlan_eth_set_multicast_list(struct net_device *dev)
309 struct irlan_cb *self = netdev_priv(dev);
311 IRDA_DEBUG(2, "%s()\n", __func__ );
313 /* Check if data channel has been connected yet */
314 if (self->client.state != IRLAN_DATA) {
315 IRDA_DEBUG(1, "%s(), delaying!\n", __func__ );
316 return;
319 if (dev->flags & IFF_PROMISC) {
320 /* Enable promiscuous mode */
321 IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
323 else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS) {
324 /* Disable promiscuous mode, use normal mode. */
325 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
326 /* hardware_set_filter(NULL); */
328 irlan_set_multicast_filter(self, TRUE);
330 else if (dev->mc_count) {
331 IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
332 /* Walk the address list, and load the filter */
333 /* hardware_set_filter(dev->mc_list); */
335 irlan_set_multicast_filter(self, TRUE);
337 else {
338 IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ );
339 irlan_set_multicast_filter(self, FALSE);
342 if (dev->flags & IFF_BROADCAST)
343 irlan_set_broadcast_filter(self, TRUE);
344 else
345 irlan_set_broadcast_filter(self, FALSE);
349 * Function irlan_get_stats (dev)
351 * Get the current statistics for this device
354 static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
356 struct irlan_cb *self = netdev_priv(dev);
358 return &self->stats;