i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / staging / rtl8192e / rtllib_tx.c
blobf451bfc27a86742c6db849404c47a89620906341
1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andreamrl@tiscali.it>
30 A special thanks goes to Realtek for their support !
32 ******************************************************************************/
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/wireless.h>
50 #include <linux/etherdevice.h>
51 #include <linux/uaccess.h>
52 #include <linux/if_vlan.h>
54 #include "rtllib.h"
59 802.11 Data Frame
62 802.11 frame_contorl for data frames - 2 bytes
63 ,-----------------------------------------------------------------------------------------.
64 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
65 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
66 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
67 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
68 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
69 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
70 '-----------------------------------------------------------------------------------------'
73 802.11 Data Frame |
74 ,--------- 'ctrl' expands to >-----------'
76 ,--'---,-------------------------------------------------------------.
77 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
78 |------|------|---------|---------|---------|------|---------|------|
79 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
80 | | tion | (BSSID) | | | ence | data | |
81 `--------------------------------------------------| |------'
82 Total: 28 non-data bytes `----.----'
84 .- 'Frame data' expands to <---------------------------'
87 ,---------------------------------------------------.
88 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
89 |------|------|---------|----------|------|---------|
90 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
91 | DSAP | SSAP | | | | Packet |
92 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
93 `-----------------------------------------| |
94 Total: 8 non-data bytes `----.----'
96 .- 'IP Packet' expands, if WEP enabled, to <--'
99 ,-----------------------.
100 Bytes | 4 | 0-2296 | 4 |
101 |-----|-----------|-----|
102 Desc. | IV | Encrypted | ICV |
103 | | IP Packet | |
104 `-----------------------'
105 Total: 8 non-data bytes
108 802.3 Ethernet Data Frame
110 ,-----------------------------------------.
111 Bytes | 6 | 6 | 2 | Variable | 4 |
112 |-------|-------|------|-----------|------|
113 Desc. | Dest. | Source| Type | IP Packet | fcs |
114 | MAC | MAC | | | |
115 `-----------------------------------------'
116 Total: 18 non-data bytes
118 In the event that fragmentation is required, the incoming payload is split into
119 N parts of size ieee->fts. The first fragment contains the SNAP header and the
120 remaining packets are just data.
122 If encryption is enabled, each fragment payload size is reduced by enough space
123 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
124 So if you have 1500 bytes of payload with ieee->fts set to 500 without
125 encryption it will take 3 frames. With WEP it will take 4 frames as the
126 payload of each frame is reduced to 492 bytes.
128 * SKB visualization
130 * ,- skb->data
132 * | ETHERNET HEADER ,-<-- PAYLOAD
133 * | | 14 bytes from skb->data
134 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
135 * | | | |
136 * |,-Dest.--. ,--Src.---. | | |
137 * | 6 bytes| | 6 bytes | | | |
138 * v | | | | | |
139 * 0 | v 1 | v | v 2
140 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
141 * ^ | ^ | ^ |
142 * | | | | | |
143 * | | | | `T' <---- 2 bytes for Type
144 * | | | |
145 * | | '---SNAP--' <-------- 6 bytes for SNAP
146 * | |
147 * `-IV--' <-------------------- 4 bytes for IV (WEP)
149 * SNAP HEADER
153 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
154 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
156 inline int rtllib_put_snap(u8 *data, u16 h_proto)
158 struct rtllib_snap_hdr *snap;
159 u8 *oui;
161 snap = (struct rtllib_snap_hdr *)data;
162 snap->dsap = 0xaa;
163 snap->ssap = 0xaa;
164 snap->ctrl = 0x03;
166 if (h_proto == 0x8137 || h_proto == 0x80f3)
167 oui = P802_1H_OUI;
168 else
169 oui = RFC1042_OUI;
170 snap->oui[0] = oui[0];
171 snap->oui[1] = oui[1];
172 snap->oui[2] = oui[2];
174 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
176 return SNAP_SIZE + sizeof(u16);
179 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
180 int hdr_len)
182 struct lib80211_crypt_data *crypt = NULL;
183 int res;
185 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
187 if (!(crypt && crypt->ops)) {
188 printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
189 return -1;
191 /* To encrypt, frame format is:
192 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
194 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
195 * call both MSDU and MPDU encryption functions from here. */
196 atomic_inc(&crypt->refcnt);
197 res = 0;
198 if (crypt->ops->encrypt_msdu)
199 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
200 if (res == 0 && crypt->ops->encrypt_mpdu)
201 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
203 atomic_dec(&crypt->refcnt);
204 if (res < 0) {
205 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
206 ieee->dev->name, frag->len);
207 ieee->ieee_stats.tx_discards++;
208 return -1;
211 return 0;
215 void rtllib_txb_free(struct rtllib_txb *txb)
217 if (unlikely(!txb))
218 return;
219 kfree(txb);
222 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
223 gfp_t gfp_mask)
225 struct rtllib_txb *txb;
226 int i;
227 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
228 gfp_mask);
229 if (!txb)
230 return NULL;
232 memset(txb, 0, sizeof(struct rtllib_txb));
233 txb->nr_frags = nr_frags;
234 txb->frag_size = txb_size;
236 for (i = 0; i < nr_frags; i++) {
237 txb->fragments[i] = dev_alloc_skb(txb_size);
238 if (unlikely(!txb->fragments[i])) {
239 i--;
240 break;
242 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
244 if (unlikely(i != nr_frags)) {
245 while (i >= 0)
246 dev_kfree_skb_any(txb->fragments[i--]);
247 kfree(txb);
248 return NULL;
250 return txb;
253 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
255 struct ethhdr *eth;
256 struct iphdr *ip;
258 eth = (struct ethhdr *)skb->data;
259 if (eth->h_proto != htons(ETH_P_IP))
260 return 0;
262 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
263 ip = ip_hdr(skb);
264 switch (ip->tos & 0xfc) {
265 case 0x20:
266 return 2;
267 case 0x40:
268 return 1;
269 case 0x60:
270 return 3;
271 case 0x80:
272 return 4;
273 case 0xa0:
274 return 5;
275 case 0xc0:
276 return 6;
277 case 0xe0:
278 return 7;
279 default:
280 return 0;
284 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
285 struct sk_buff *skb,
286 struct cb_desc *tcb_desc)
288 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
289 struct tx_ts_record *pTxTs = NULL;
290 struct rtllib_hdr_1addr* hdr = (struct rtllib_hdr_1addr *)skb->data;
292 if (rtllib_act_scanning(ieee, false))
293 return;
295 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
296 return;
297 if (!IsQoSDataFrame(skb->data))
298 return;
299 if (is_multicast_ether_addr(hdr->addr1) ||
300 is_broadcast_ether_addr(hdr->addr1))
301 return;
303 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
304 return;
306 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
307 return;
309 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
310 return;
311 if (pHTInfo->bCurrentAMPDUEnable) {
312 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
313 skb->priority, TX_DIR, true)) {
314 printk(KERN_INFO "%s: can't get TS\n", __func__);
315 return;
317 if (pTxTs->TxAdmittedBARecord.bValid == false) {
318 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
319 KEY_TYPE_NA)) {
321 } else if (tcb_desc->bdhcp == 1) {
323 } else if (!pTxTs->bDisable_AddBa) {
324 TsStartAddBaProcess(ieee, pTxTs);
326 goto FORCED_AGG_SETTING;
327 } else if (pTxTs->bUsingBa == false) {
328 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
329 (pTxTs->TxCurSeq+1)%4096))
330 pTxTs->bUsingBa = true;
331 else
332 goto FORCED_AGG_SETTING;
334 if (ieee->iw_mode == IW_MODE_INFRA) {
335 tcb_desc->bAMPDUEnable = true;
336 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
337 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
340 FORCED_AGG_SETTING:
341 switch (pHTInfo->ForcedAMPDUMode) {
342 case HT_AGG_AUTO:
343 break;
345 case HT_AGG_FORCE_ENABLE:
346 tcb_desc->bAMPDUEnable = true;
347 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
348 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
349 break;
351 case HT_AGG_FORCE_DISABLE:
352 tcb_desc->bAMPDUEnable = false;
353 tcb_desc->ampdu_density = 0;
354 tcb_desc->ampdu_factor = 0;
355 break;
357 return;
360 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
361 struct cb_desc *tcb_desc)
363 tcb_desc->bUseShortPreamble = false;
364 if (tcb_desc->data_rate == 2)
365 return;
366 else if (ieee->current_network.capability &
367 WLAN_CAPABILITY_SHORT_PREAMBLE)
368 tcb_desc->bUseShortPreamble = true;
369 return;
372 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
373 struct cb_desc *tcb_desc)
375 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
377 tcb_desc->bUseShortGI = false;
379 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
380 return;
382 if (pHTInfo->bForcedShortGI) {
383 tcb_desc->bUseShortGI = true;
384 return;
387 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
388 tcb_desc->bUseShortGI = true;
389 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
390 tcb_desc->bUseShortGI = true;
393 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
394 struct cb_desc *tcb_desc)
396 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
398 tcb_desc->bPacketBW = false;
400 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
401 return;
403 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
404 return;
406 if ((tcb_desc->data_rate & 0x80) == 0)
407 return;
408 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
409 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
410 tcb_desc->bPacketBW = true;
411 return;
414 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
415 struct cb_desc *tcb_desc,
416 struct sk_buff *skb)
418 tcb_desc->bRTSSTBC = false;
419 tcb_desc->bRTSUseShortGI = false;
420 tcb_desc->bCTSEnable = false;
421 tcb_desc->RTSSC = 0;
422 tcb_desc->bRTSBW = false;
424 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
425 return;
427 if (is_broadcast_ether_addr(skb->data+16))
428 return;
430 if (ieee->mode < IEEE_N_24G) {
431 if (skb->len > ieee->rts) {
432 tcb_desc->bRTSEnable = true;
433 tcb_desc->rts_rate = MGN_24M;
434 } else if (ieee->current_network.buseprotection) {
435 tcb_desc->bRTSEnable = true;
436 tcb_desc->bCTSEnable = true;
437 tcb_desc->rts_rate = MGN_24M;
439 return;
440 } else {
441 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
442 while (true) {
443 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
444 tcb_desc->bCTSEnable = true;
445 tcb_desc->rts_rate = MGN_24M;
446 tcb_desc->bRTSEnable = true;
447 break;
448 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
449 HT_IOT_ACT_PURE_N_MODE)) {
450 tcb_desc->bRTSEnable = true;
451 tcb_desc->rts_rate = MGN_24M;
452 break;
454 if (ieee->current_network.buseprotection) {
455 tcb_desc->bRTSEnable = true;
456 tcb_desc->bCTSEnable = true;
457 tcb_desc->rts_rate = MGN_24M;
458 break;
460 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
461 u8 HTOpMode = pHTInfo->CurrentOpMode;
462 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
463 HTOpMode == 3)) ||
464 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
465 tcb_desc->rts_rate = MGN_24M;
466 tcb_desc->bRTSEnable = true;
467 break;
470 if (skb->len > ieee->rts) {
471 tcb_desc->rts_rate = MGN_24M;
472 tcb_desc->bRTSEnable = true;
473 break;
475 if (tcb_desc->bAMPDUEnable) {
476 tcb_desc->rts_rate = MGN_24M;
477 tcb_desc->bRTSEnable = false;
478 break;
480 goto NO_PROTECTION;
483 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
484 tcb_desc->bUseShortPreamble = true;
485 if (ieee->iw_mode == IW_MODE_MASTER)
486 goto NO_PROTECTION;
487 return;
488 NO_PROTECTION:
489 tcb_desc->bRTSEnable = false;
490 tcb_desc->bCTSEnable = false;
491 tcb_desc->rts_rate = 0;
492 tcb_desc->RTSSC = 0;
493 tcb_desc->bRTSBW = false;
497 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
498 struct cb_desc *tcb_desc)
500 if (ieee->bTxDisableRateFallBack)
501 tcb_desc->bTxDisableRateFallBack = true;
503 if (ieee->bTxUseDriverAssingedRate)
504 tcb_desc->bTxUseDriverAssingedRate = true;
505 if (!tcb_desc->bTxDisableRateFallBack ||
506 !tcb_desc->bTxUseDriverAssingedRate) {
507 if (ieee->iw_mode == IW_MODE_INFRA ||
508 ieee->iw_mode == IW_MODE_ADHOC)
509 tcb_desc->RATRIndex = 0;
513 u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
514 u8 *dst)
516 u16 seqnum = 0;
518 if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
519 return 0;
520 if (IsQoSDataFrame(skb->data)) {
521 struct tx_ts_record *pTS = NULL;
522 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
523 skb->priority, TX_DIR, true))
524 return 0;
525 seqnum = pTS->TxCurSeq;
526 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
527 return seqnum;
529 return 0;
532 static int wme_downgrade_ac(struct sk_buff *skb)
534 switch (skb->priority) {
535 case 6:
536 case 7:
537 skb->priority = 5; /* VO -> VI */
538 return 0;
539 case 4:
540 case 5:
541 skb->priority = 3; /* VI -> BE */
542 return 0;
543 case 0:
544 case 3:
545 skb->priority = 1; /* BE -> BK */
546 return 0;
547 default:
548 return -1;
552 int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
554 struct rtllib_device *ieee = (struct rtllib_device *)
555 netdev_priv_rsl(dev);
556 struct rtllib_txb *txb = NULL;
557 struct rtllib_hdr_3addrqos *frag_hdr;
558 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
559 unsigned long flags;
560 struct net_device_stats *stats = &ieee->stats;
561 int ether_type = 0, encrypt;
562 int bytes, fc, qos_ctl = 0, hdr_len;
563 struct sk_buff *skb_frag;
564 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
565 .duration_id = 0,
566 .seq_ctl = 0,
567 .qos_ctl = 0
569 u8 dest[ETH_ALEN], src[ETH_ALEN];
570 int qos_actived = ieee->current_network.qos_data.active;
571 struct lib80211_crypt_data *crypt = NULL;
572 struct cb_desc *tcb_desc;
573 u8 bIsMulticast = false;
574 u8 IsAmsdu = false;
575 bool bdhcp = false;
577 spin_lock_irqsave(&ieee->lock, flags);
579 /* If there is no driver handler to take the TXB, dont' bother
580 * creating it... */
581 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
582 IEEE_SOFTMAC_TX_QUEUE)) ||
583 ((!ieee->softmac_data_hard_start_xmit &&
584 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
585 printk(KERN_WARNING "%s: No xmit handler.\n",
586 ieee->dev->name);
587 goto success;
591 if (likely(ieee->raw_tx == 0)) {
592 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
593 printk(KERN_WARNING "%s: skb too small (%d).\n",
594 ieee->dev->name, skb->len);
595 goto success;
597 /* Save source and destination addresses */
598 memcpy(dest, skb->data, ETH_ALEN);
599 memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
601 memset(skb->cb, 0, sizeof(skb->cb));
602 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
604 if (ieee->iw_mode == IW_MODE_MONITOR) {
605 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
606 if (unlikely(!txb)) {
607 printk(KERN_WARNING "%s: Could not allocate "
608 "TXB\n",
609 ieee->dev->name);
610 goto failed;
613 txb->encrypted = 0;
614 txb->payload_size = skb->len;
615 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
616 skb->len);
618 goto success;
621 if (skb->len > 282) {
622 if (ETH_P_IP == ether_type) {
623 const struct iphdr *ip = (struct iphdr *)
624 ((u8 *)skb->data+14);
625 if (IPPROTO_UDP == ip->protocol) {
626 struct udphdr *udp;
628 udp = (struct udphdr *)((u8 *)ip +
629 (ip->ihl << 2));
630 if (((((u8 *)udp)[1] == 68) &&
631 (((u8 *)udp)[3] == 67)) ||
632 ((((u8 *)udp)[1] == 67) &&
633 (((u8 *)udp)[3] == 68))) {
634 bdhcp = true;
635 ieee->LPSDelayCnt = 200;
638 } else if (ETH_P_ARP == ether_type) {
639 printk(KERN_INFO "=================>DHCP "
640 "Protocol start tx ARP pkt!!\n");
641 bdhcp = true;
642 ieee->LPSDelayCnt =
643 ieee->current_network.tim.tim_count;
647 skb->priority = rtllib_classify(skb, IsAmsdu);
648 crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
649 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
650 ieee->host_encrypt && crypt && crypt->ops;
651 if (!encrypt && ieee->ieee802_1x &&
652 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
653 stats->tx_dropped++;
654 goto success;
656 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
657 struct eapol *eap = (struct eapol *)(skb->data +
658 sizeof(struct ethhdr) - SNAP_SIZE -
659 sizeof(u16));
660 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
661 eap_get_type(eap->type));
664 /* Advance the SKB to the start of the payload */
665 skb_pull(skb, sizeof(struct ethhdr));
667 /* Determine total amount of storage required for TXB packets */
668 bytes = skb->len + SNAP_SIZE + sizeof(u16);
670 if (encrypt)
671 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
672 else
673 fc = RTLLIB_FTYPE_DATA;
675 if (qos_actived)
676 fc |= RTLLIB_STYPE_QOS_DATA;
677 else
678 fc |= RTLLIB_STYPE_DATA;
680 if (ieee->iw_mode == IW_MODE_INFRA) {
681 fc |= RTLLIB_FCTL_TODS;
682 /* To DS: Addr1 = BSSID, Addr2 = SA,
683 Addr3 = DA */
684 memcpy(&header.addr1, ieee->current_network.bssid,
685 ETH_ALEN);
686 memcpy(&header.addr2, &src, ETH_ALEN);
687 if (IsAmsdu)
688 memcpy(&header.addr3,
689 ieee->current_network.bssid, ETH_ALEN);
690 else
691 memcpy(&header.addr3, &dest, ETH_ALEN);
692 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
693 /* not From/To DS: Addr1 = DA, Addr2 = SA,
694 Addr3 = BSSID */
695 memcpy(&header.addr1, dest, ETH_ALEN);
696 memcpy(&header.addr2, src, ETH_ALEN);
697 memcpy(&header.addr3, ieee->current_network.bssid,
698 ETH_ALEN);
701 bIsMulticast = is_broadcast_ether_addr(header.addr1) ||
702 is_multicast_ether_addr(header.addr1);
704 header.frame_ctl = cpu_to_le16(fc);
706 /* Determine fragmentation size based on destination (multicast
707 * and broadcast are not fragmented) */
708 if (bIsMulticast) {
709 frag_size = MAX_FRAG_THRESHOLD;
710 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
711 } else {
712 frag_size = ieee->fts;
713 qos_ctl = 0;
716 if (qos_actived) {
717 hdr_len = RTLLIB_3ADDR_LEN + 2;
719 /* in case we are a client verify acm is not set for this ac */
720 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
721 printk(KERN_INFO "skb->priority = %x\n", skb->priority);
722 if (wme_downgrade_ac(skb))
723 break;
724 printk(KERN_INFO "converted skb->priority = %x\n",
725 skb->priority);
727 qos_ctl |= skb->priority;
728 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
729 } else {
730 hdr_len = RTLLIB_3ADDR_LEN;
732 /* Determine amount of payload per fragment. Regardless of if
733 * this stack is providing the full 802.11 header, one will
734 * eventually be affixed to this fragment -- so we must account
735 * for it when determining the amount of payload space. */
736 bytes_per_frag = frag_size - hdr_len;
737 if (ieee->config &
738 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
739 bytes_per_frag -= RTLLIB_FCS_LEN;
741 /* Each fragment may need to have room for encryptiong
742 * pre/postfix */
743 if (encrypt) {
744 bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
745 crypt->ops->extra_mpdu_postfix_len +
746 crypt->ops->extra_msdu_prefix_len +
747 crypt->ops->extra_msdu_postfix_len;
749 /* Number of fragments is the total bytes_per_frag /
750 * payload_per_fragment */
751 nr_frags = bytes / bytes_per_frag;
752 bytes_last_frag = bytes % bytes_per_frag;
753 if (bytes_last_frag)
754 nr_frags++;
755 else
756 bytes_last_frag = bytes_per_frag;
758 /* When we allocate the TXB we allocate enough space for the
759 * reserve and full fragment bytes (bytes_per_frag doesn't
760 * include prefix, postfix, header, FCS, etc.) */
761 txb = rtllib_alloc_txb(nr_frags, frag_size +
762 ieee->tx_headroom, GFP_ATOMIC);
763 if (unlikely(!txb)) {
764 printk(KERN_WARNING "%s: Could not allocate TXB\n",
765 ieee->dev->name);
766 goto failed;
768 txb->encrypted = encrypt;
769 txb->payload_size = bytes;
771 if (qos_actived)
772 txb->queue_index = UP2AC(skb->priority);
773 else
774 txb->queue_index = WME_AC_BE;
776 for (i = 0; i < nr_frags; i++) {
777 skb_frag = txb->fragments[i];
778 tcb_desc = (struct cb_desc *)(skb_frag->cb +
779 MAX_DEV_ADDR_SIZE);
780 if (qos_actived) {
781 skb_frag->priority = skb->priority;
782 tcb_desc->queue_index = UP2AC(skb->priority);
783 } else {
784 skb_frag->priority = WME_AC_BE;
785 tcb_desc->queue_index = WME_AC_BE;
787 skb_reserve(skb_frag, ieee->tx_headroom);
789 if (encrypt) {
790 if (ieee->hwsec_active)
791 tcb_desc->bHwSec = 1;
792 else
793 tcb_desc->bHwSec = 0;
794 skb_reserve(skb_frag,
795 crypt->ops->extra_mpdu_prefix_len +
796 crypt->ops->extra_msdu_prefix_len);
797 } else {
798 tcb_desc->bHwSec = 0;
800 frag_hdr = (struct rtllib_hdr_3addrqos *)
801 skb_put(skb_frag, hdr_len);
802 memcpy(frag_hdr, &header, hdr_len);
804 /* If this is not the last fragment, then add the
805 * MOREFRAGS bit to the frame control */
806 if (i != nr_frags - 1) {
807 frag_hdr->frame_ctl = cpu_to_le16(
808 fc | RTLLIB_FCTL_MOREFRAGS);
809 bytes = bytes_per_frag;
811 } else {
812 /* The last fragment has the remaining length */
813 bytes = bytes_last_frag;
815 if ((qos_actived) && (!bIsMulticast)) {
816 frag_hdr->seq_ctl =
817 rtllib_query_seqnum(ieee, skb_frag,
818 header.addr1);
819 frag_hdr->seq_ctl =
820 cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
821 } else {
822 frag_hdr->seq_ctl =
823 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
825 /* Put a SNAP header on the first fragment */
826 if (i == 0) {
827 rtllib_put_snap(
828 skb_put(skb_frag, SNAP_SIZE +
829 sizeof(u16)), ether_type);
830 bytes -= SNAP_SIZE + sizeof(u16);
833 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
835 /* Advance the SKB... */
836 skb_pull(skb, bytes);
838 /* Encryption routine will move the header forward in
839 * order to insert the IV between the header and the
840 * payload */
841 if (encrypt)
842 rtllib_encrypt_fragment(ieee, skb_frag,
843 hdr_len);
844 if (ieee->config &
845 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
846 skb_put(skb_frag, 4);
849 if ((qos_actived) && (!bIsMulticast)) {
850 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
851 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
852 else
853 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
854 } else {
855 if (ieee->seq_ctrl[0] == 0xFFF)
856 ieee->seq_ctrl[0] = 0;
857 else
858 ieee->seq_ctrl[0]++;
860 } else {
861 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
862 printk(KERN_WARNING "%s: skb too small (%d).\n",
863 ieee->dev->name, skb->len);
864 goto success;
867 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
868 if (!txb) {
869 printk(KERN_WARNING "%s: Could not allocate TXB\n",
870 ieee->dev->name);
871 goto failed;
874 txb->encrypted = 0;
875 txb->payload_size = skb->len;
876 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
877 skb->len);
880 success:
881 if (txb) {
882 struct cb_desc *tcb_desc = (struct cb_desc *)
883 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
884 tcb_desc->bTxEnableFwCalcDur = 1;
885 tcb_desc->priority = skb->priority;
887 if (ether_type == ETH_P_PAE) {
888 if (ieee->pHTInfo->IOTAction &
889 HT_IOT_ACT_WA_IOT_Broadcom) {
890 tcb_desc->data_rate =
891 MgntQuery_TxRateExcludeCCKRates(ieee);
892 tcb_desc->bTxDisableRateFallBack = false;
893 } else {
894 tcb_desc->data_rate = ieee->basic_rate;
895 tcb_desc->bTxDisableRateFallBack = 1;
899 tcb_desc->RATRIndex = 7;
900 tcb_desc->bTxUseDriverAssingedRate = 1;
901 } else {
902 if (is_multicast_ether_addr(header.addr1))
903 tcb_desc->bMulticast = 1;
904 if (is_broadcast_ether_addr(header.addr1))
905 tcb_desc->bBroadcast = 1;
906 rtllib_txrate_selectmode(ieee, tcb_desc);
907 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
908 tcb_desc->data_rate = ieee->basic_rate;
909 else
910 tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
911 ieee->rate, ieee->HTCurrentOperaRate);
913 if (bdhcp == true) {
914 if (ieee->pHTInfo->IOTAction &
915 HT_IOT_ACT_WA_IOT_Broadcom) {
916 tcb_desc->data_rate =
917 MgntQuery_TxRateExcludeCCKRates(ieee);
918 tcb_desc->bTxDisableRateFallBack = false;
919 } else {
920 tcb_desc->data_rate = MGN_1M;
921 tcb_desc->bTxDisableRateFallBack = 1;
925 tcb_desc->RATRIndex = 7;
926 tcb_desc->bTxUseDriverAssingedRate = 1;
927 tcb_desc->bdhcp = 1;
930 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
931 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
932 tcb_desc);
933 rtllib_query_HTCapShortGI(ieee, tcb_desc);
934 rtllib_query_BandwidthMode(ieee, tcb_desc);
935 rtllib_query_protectionmode(ieee, tcb_desc,
936 txb->fragments[0]);
939 spin_unlock_irqrestore(&ieee->lock, flags);
940 dev_kfree_skb_any(skb);
941 if (txb) {
942 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
943 dev->stats.tx_packets++;
944 dev->stats.tx_bytes += txb->payload_size;
945 rtllib_softmac_xmit(txb, ieee);
946 } else {
947 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
948 stats->tx_packets++;
949 stats->tx_bytes += txb->payload_size;
950 return 0;
952 rtllib_txb_free(txb);
956 return 0;
958 failed:
959 spin_unlock_irqrestore(&ieee->lock, flags);
960 netif_stop_queue(dev);
961 stats->tx_errors++;
962 return 1;
965 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
967 memset(skb->cb, 0, sizeof(skb->cb));
968 return rtllib_xmit_inter(skb, dev);
970 EXPORT_SYMBOL(rtllib_xmit);