spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / drivers / net / wireless / ipw2x00 / libipw_tx.c
blobe8c039879b05a57ef5213a43a51c4cd1785ed8f4
1 /******************************************************************************
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
21 Contact Information:
22 Intel Linux Wireless <ilw@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
26 #include <linux/compiler.h>
27 #include <linux/errno.h>
28 #include <linux/if_arp.h>
29 #include <linux/in6.h>
30 #include <linux/in.h>
31 #include <linux/ip.h>
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/netdevice.h>
35 #include <linux/proc_fs.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <linux/tcp.h>
39 #include <linux/types.h>
40 #include <linux/wireless.h>
41 #include <linux/etherdevice.h>
42 #include <asm/uaccess.h>
44 #include "libipw.h"
48 802.11 Data Frame
50 ,-------------------------------------------------------------------.
51 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
52 |------|------|---------|---------|---------|------|---------|------|
53 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
54 | | tion | (BSSID) | | | ence | data | |
55 `--------------------------------------------------| |------'
56 Total: 28 non-data bytes `----.----'
58 .- 'Frame data' expands, if WEP enabled, to <----------'
61 ,-----------------------.
62 Bytes | 4 | 0-2296 | 4 |
63 |-----|-----------|-----|
64 Desc. | IV | Encrypted | ICV |
65 | | Packet | |
66 `-----| |-----'
67 `-----.-----'
69 .- 'Encrypted Packet' expands to
72 ,---------------------------------------------------.
73 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
74 |------|------|---------|----------|------|---------|
75 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
76 | DSAP | SSAP | | | | Packet |
77 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
78 `----------------------------------------------------
79 Total: 8 non-data bytes
81 802.3 Ethernet Data Frame
83 ,-----------------------------------------.
84 Bytes | 6 | 6 | 2 | Variable | 4 |
85 |-------|-------|------|-----------|------|
86 Desc. | Dest. | Source| Type | IP Packet | fcs |
87 | MAC | MAC | | | |
88 `-----------------------------------------'
89 Total: 18 non-data bytes
91 In the event that fragmentation is required, the incoming payload is split into
92 N parts of size ieee->fts. The first fragment contains the SNAP header and the
93 remaining packets are just data.
95 If encryption is enabled, each fragment payload size is reduced by enough space
96 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
97 So if you have 1500 bytes of payload with ieee->fts set to 500 without
98 encryption it will take 3 frames. With WEP it will take 4 frames as the
99 payload of each frame is reduced to 492 bytes.
101 * SKB visualization
103 * ,- skb->data
105 * | ETHERNET HEADER ,-<-- PAYLOAD
106 * | | 14 bytes from skb->data
107 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
108 * | | | |
109 * |,-Dest.--. ,--Src.---. | | |
110 * | 6 bytes| | 6 bytes | | | |
111 * v | | | | | |
112 * 0 | v 1 | v | v 2
113 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
114 * ^ | ^ | ^ |
115 * | | | | | |
116 * | | | | `T' <---- 2 bytes for Type
117 * | | | |
118 * | | '---SNAP--' <-------- 6 bytes for SNAP
119 * | |
120 * `-IV--' <-------------------- 4 bytes for IV (WEP)
122 * SNAP HEADER
126 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
127 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
129 static int libipw_copy_snap(u8 * data, __be16 h_proto)
131 struct libipw_snap_hdr *snap;
132 u8 *oui;
134 snap = (struct libipw_snap_hdr *)data;
135 snap->dsap = 0xaa;
136 snap->ssap = 0xaa;
137 snap->ctrl = 0x03;
139 if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
140 oui = P802_1H_OUI;
141 else
142 oui = RFC1042_OUI;
143 snap->oui[0] = oui[0];
144 snap->oui[1] = oui[1];
145 snap->oui[2] = oui[2];
147 memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
149 return SNAP_SIZE + sizeof(u16);
152 static int libipw_encrypt_fragment(struct libipw_device *ieee,
153 struct sk_buff *frag, int hdr_len)
155 struct lib80211_crypt_data *crypt =
156 ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
157 int res;
159 if (crypt == NULL)
160 return -1;
162 /* To encrypt, frame format is:
163 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
164 atomic_inc(&crypt->refcnt);
165 res = 0;
166 if (crypt->ops && crypt->ops->encrypt_mpdu)
167 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
169 atomic_dec(&crypt->refcnt);
170 if (res < 0) {
171 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
172 ieee->dev->name, frag->len);
173 ieee->ieee_stats.tx_discards++;
174 return -1;
177 return 0;
180 void libipw_txb_free(struct libipw_txb *txb)
182 int i;
183 if (unlikely(!txb))
184 return;
185 for (i = 0; i < txb->nr_frags; i++)
186 if (txb->fragments[i])
187 dev_kfree_skb_any(txb->fragments[i]);
188 kfree(txb);
191 static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
192 int headroom, gfp_t gfp_mask)
194 struct libipw_txb *txb;
195 int i;
196 txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags),
197 gfp_mask);
198 if (!txb)
199 return NULL;
201 memset(txb, 0, sizeof(struct libipw_txb));
202 txb->nr_frags = nr_frags;
203 txb->frag_size = txb_size;
205 for (i = 0; i < nr_frags; i++) {
206 txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
207 gfp_mask);
208 if (unlikely(!txb->fragments[i])) {
209 i--;
210 break;
212 skb_reserve(txb->fragments[i], headroom);
214 if (unlikely(i != nr_frags)) {
215 while (i >= 0)
216 dev_kfree_skb_any(txb->fragments[i--]);
217 kfree(txb);
218 return NULL;
220 return txb;
223 static int libipw_classify(struct sk_buff *skb)
225 struct ethhdr *eth;
226 struct iphdr *ip;
228 eth = (struct ethhdr *)skb->data;
229 if (eth->h_proto != htons(ETH_P_IP))
230 return 0;
232 ip = ip_hdr(skb);
233 switch (ip->tos & 0xfc) {
234 case 0x20:
235 return 2;
236 case 0x40:
237 return 1;
238 case 0x60:
239 return 3;
240 case 0x80:
241 return 4;
242 case 0xa0:
243 return 5;
244 case 0xc0:
245 return 6;
246 case 0xe0:
247 return 7;
248 default:
249 return 0;
253 /* Incoming skb is converted to a txb which consists of
254 * a block of 802.11 fragment packets (stored as skbs) */
255 netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
257 struct libipw_device *ieee = netdev_priv(dev);
258 struct libipw_txb *txb = NULL;
259 struct libipw_hdr_3addrqos *frag_hdr;
260 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
261 rts_required;
262 unsigned long flags;
263 int encrypt, host_encrypt, host_encrypt_msdu;
264 __be16 ether_type;
265 int bytes, fc, hdr_len;
266 struct sk_buff *skb_frag;
267 struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */
268 .duration_id = 0,
269 .seq_ctl = 0,
270 .qos_ctl = 0
272 u8 dest[ETH_ALEN], src[ETH_ALEN];
273 struct lib80211_crypt_data *crypt;
274 int priority = skb->priority;
275 int snapped = 0;
277 if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
278 return NETDEV_TX_BUSY;
280 spin_lock_irqsave(&ieee->lock, flags);
282 /* If there is no driver handler to take the TXB, dont' bother
283 * creating it... */
284 if (!ieee->hard_start_xmit) {
285 printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
286 goto success;
289 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
290 printk(KERN_WARNING "%s: skb too small (%d).\n",
291 ieee->dev->name, skb->len);
292 goto success;
295 ether_type = ((struct ethhdr *)skb->data)->h_proto;
297 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
299 encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
300 ieee->sec.encrypt;
302 host_encrypt = ieee->host_encrypt && encrypt && crypt;
303 host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
305 if (!encrypt && ieee->ieee802_1x &&
306 ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
307 dev->stats.tx_dropped++;
308 goto success;
311 /* Save source and destination addresses */
312 skb_copy_from_linear_data(skb, dest, ETH_ALEN);
313 skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
315 if (host_encrypt)
316 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
317 IEEE80211_FCTL_PROTECTED;
318 else
319 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
321 if (ieee->iw_mode == IW_MODE_INFRA) {
322 fc |= IEEE80211_FCTL_TODS;
323 /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
324 memcpy(header.addr1, ieee->bssid, ETH_ALEN);
325 memcpy(header.addr2, src, ETH_ALEN);
326 memcpy(header.addr3, dest, ETH_ALEN);
327 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
328 /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
329 memcpy(header.addr1, dest, ETH_ALEN);
330 memcpy(header.addr2, src, ETH_ALEN);
331 memcpy(header.addr3, ieee->bssid, ETH_ALEN);
333 hdr_len = LIBIPW_3ADDR_LEN;
335 if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
336 fc |= IEEE80211_STYPE_QOS_DATA;
337 hdr_len += 2;
339 skb->priority = libipw_classify(skb);
340 header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
342 header.frame_ctl = cpu_to_le16(fc);
344 /* Advance the SKB to the start of the payload */
345 skb_pull(skb, sizeof(struct ethhdr));
347 /* Determine total amount of storage required for TXB packets */
348 bytes = skb->len + SNAP_SIZE + sizeof(u16);
350 /* Encrypt msdu first on the whole data packet. */
351 if ((host_encrypt || host_encrypt_msdu) &&
352 crypt && crypt->ops && crypt->ops->encrypt_msdu) {
353 int res = 0;
354 int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
355 crypt->ops->extra_msdu_postfix_len;
356 struct sk_buff *skb_new = dev_alloc_skb(len);
358 if (unlikely(!skb_new))
359 goto failed;
361 skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
362 memcpy(skb_put(skb_new, hdr_len), &header, hdr_len);
363 snapped = 1;
364 libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
365 ether_type);
366 skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
367 res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
368 if (res < 0) {
369 LIBIPW_ERROR("msdu encryption failed\n");
370 dev_kfree_skb_any(skb_new);
371 goto failed;
373 dev_kfree_skb_any(skb);
374 skb = skb_new;
375 bytes += crypt->ops->extra_msdu_prefix_len +
376 crypt->ops->extra_msdu_postfix_len;
377 skb_pull(skb, hdr_len);
380 if (host_encrypt || ieee->host_open_frag) {
381 /* Determine fragmentation size based on destination (multicast
382 * and broadcast are not fragmented) */
383 if (is_multicast_ether_addr(dest) ||
384 is_broadcast_ether_addr(dest))
385 frag_size = MAX_FRAG_THRESHOLD;
386 else
387 frag_size = ieee->fts;
389 /* Determine amount of payload per fragment. Regardless of if
390 * this stack is providing the full 802.11 header, one will
391 * eventually be affixed to this fragment -- so we must account
392 * for it when determining the amount of payload space. */
393 bytes_per_frag = frag_size - hdr_len;
394 if (ieee->config &
395 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
396 bytes_per_frag -= LIBIPW_FCS_LEN;
398 /* Each fragment may need to have room for encryption
399 * pre/postfix */
400 if (host_encrypt)
401 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
402 crypt->ops->extra_mpdu_postfix_len;
404 /* Number of fragments is the total
405 * bytes_per_frag / payload_per_fragment */
406 nr_frags = bytes / bytes_per_frag;
407 bytes_last_frag = bytes % bytes_per_frag;
408 if (bytes_last_frag)
409 nr_frags++;
410 else
411 bytes_last_frag = bytes_per_frag;
412 } else {
413 nr_frags = 1;
414 bytes_per_frag = bytes_last_frag = bytes;
415 frag_size = bytes + hdr_len;
418 rts_required = (frag_size > ieee->rts
419 && ieee->config & CFG_LIBIPW_RTS);
420 if (rts_required)
421 nr_frags++;
423 /* When we allocate the TXB we allocate enough space for the reserve
424 * and full fragment bytes (bytes_per_frag doesn't include prefix,
425 * postfix, header, FCS, etc.) */
426 txb = libipw_alloc_txb(nr_frags, frag_size,
427 ieee->tx_headroom, GFP_ATOMIC);
428 if (unlikely(!txb)) {
429 printk(KERN_WARNING "%s: Could not allocate TXB\n",
430 ieee->dev->name);
431 goto failed;
433 txb->encrypted = encrypt;
434 if (host_encrypt)
435 txb->payload_size = frag_size * (nr_frags - 1) +
436 bytes_last_frag;
437 else
438 txb->payload_size = bytes;
440 if (rts_required) {
441 skb_frag = txb->fragments[0];
442 frag_hdr =
443 (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
446 * Set header frame_ctl to the RTS.
448 header.frame_ctl =
449 cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
450 memcpy(frag_hdr, &header, hdr_len);
453 * Restore header frame_ctl to the original data setting.
455 header.frame_ctl = cpu_to_le16(fc);
457 if (ieee->config &
458 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
459 skb_put(skb_frag, 4);
461 txb->rts_included = 1;
462 i = 1;
463 } else
464 i = 0;
466 for (; i < nr_frags; i++) {
467 skb_frag = txb->fragments[i];
469 if (host_encrypt)
470 skb_reserve(skb_frag,
471 crypt->ops->extra_mpdu_prefix_len);
473 frag_hdr =
474 (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
475 memcpy(frag_hdr, &header, hdr_len);
477 /* If this is not the last fragment, then add the MOREFRAGS
478 * bit to the frame control */
479 if (i != nr_frags - 1) {
480 frag_hdr->frame_ctl =
481 cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
482 bytes = bytes_per_frag;
483 } else {
484 /* The last fragment takes the remaining length */
485 bytes = bytes_last_frag;
488 if (i == 0 && !snapped) {
489 libipw_copy_snap(skb_put
490 (skb_frag, SNAP_SIZE + sizeof(u16)),
491 ether_type);
492 bytes -= SNAP_SIZE + sizeof(u16);
495 skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
497 /* Advance the SKB... */
498 skb_pull(skb, bytes);
500 /* Encryption routine will move the header forward in order
501 * to insert the IV between the header and the payload */
502 if (host_encrypt)
503 libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
505 if (ieee->config &
506 (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
507 skb_put(skb_frag, 4);
510 success:
511 spin_unlock_irqrestore(&ieee->lock, flags);
513 dev_kfree_skb_any(skb);
515 if (txb) {
516 netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
517 if (ret == NETDEV_TX_OK) {
518 dev->stats.tx_packets++;
519 dev->stats.tx_bytes += txb->payload_size;
520 return NETDEV_TX_OK;
523 libipw_txb_free(txb);
526 return NETDEV_TX_OK;
528 failed:
529 spin_unlock_irqrestore(&ieee->lock, flags);
530 netif_stop_queue(dev);
531 dev->stats.tx_errors++;
532 return NETDEV_TX_BUSY;
534 EXPORT_SYMBOL(libipw_xmit);
536 EXPORT_SYMBOL(libipw_txb_free);