Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-btrfs-devel.git] / drivers / staging / rtl8192e / rtllib_tx.c
blob44e8006bc1af1f81a284261f87632ccac549c46c
1 /******************************************************************************
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************
27 Few modifications for Realtek's Wi-Fi drivers by
28 Andrea Merello <andreamrl@tiscali.it>
30 A special thanks goes to Realtek for their support !
32 ******************************************************************************/
34 #include <linux/compiler.h>
35 #include <linux/errno.h>
36 #include <linux/if_arp.h>
37 #include <linux/in6.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/netdevice.h>
43 #include <linux/pci.h>
44 #include <linux/proc_fs.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/tcp.h>
48 #include <linux/types.h>
49 #include <linux/version.h>
50 #include <linux/wireless.h>
51 #include <linux/etherdevice.h>
52 #include <linux/uaccess.h>
53 #include <linux/if_vlan.h>
55 #include "rtllib.h"
60 802.11 Data Frame
63 802.11 frame_contorl for data frames - 2 bytes
64 ,-----------------------------------------------------------------------------------------.
65 bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
66 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
67 val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
68 |----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
69 desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
70 | | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
71 '-----------------------------------------------------------------------------------------'
74 802.11 Data Frame |
75 ,--------- 'ctrl' expands to >-----------'
77 ,--'---,-------------------------------------------------------------.
78 Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
79 |------|------|---------|---------|---------|------|---------|------|
80 Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
81 | | tion | (BSSID) | | | ence | data | |
82 `--------------------------------------------------| |------'
83 Total: 28 non-data bytes `----.----'
85 .- 'Frame data' expands to <---------------------------'
88 ,---------------------------------------------------.
89 Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
90 |------|------|---------|----------|------|---------|
91 Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
92 | DSAP | SSAP | | | | Packet |
93 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
94 `-----------------------------------------| |
95 Total: 8 non-data bytes `----.----'
97 .- 'IP Packet' expands, if WEP enabled, to <--'
100 ,-----------------------.
101 Bytes | 4 | 0-2296 | 4 |
102 |-----|-----------|-----|
103 Desc. | IV | Encrypted | ICV |
104 | | IP Packet | |
105 `-----------------------'
106 Total: 8 non-data bytes
109 802.3 Ethernet Data Frame
111 ,-----------------------------------------.
112 Bytes | 6 | 6 | 2 | Variable | 4 |
113 |-------|-------|------|-----------|------|
114 Desc. | Dest. | Source| Type | IP Packet | fcs |
115 | MAC | MAC | | | |
116 `-----------------------------------------'
117 Total: 18 non-data bytes
119 In the event that fragmentation is required, the incoming payload is split into
120 N parts of size ieee->fts. The first fragment contains the SNAP header and the
121 remaining packets are just data.
123 If encryption is enabled, each fragment payload size is reduced by enough space
124 to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
125 So if you have 1500 bytes of payload with ieee->fts set to 500 without
126 encryption it will take 3 frames. With WEP it will take 4 frames as the
127 payload of each frame is reduced to 492 bytes.
129 * SKB visualization
131 * ,- skb->data
133 * | ETHERNET HEADER ,-<-- PAYLOAD
134 * | | 14 bytes from skb->data
135 * | 2 bytes for Type --> ,T. | (sizeof ethhdr)
136 * | | | |
137 * |,-Dest.--. ,--Src.---. | | |
138 * | 6 bytes| | 6 bytes | | | |
139 * v | | | | | |
140 * 0 | v 1 | v | v 2
141 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
142 * ^ | ^ | ^ |
143 * | | | | | |
144 * | | | | `T' <---- 2 bytes for Type
145 * | | | |
146 * | | '---SNAP--' <-------- 6 bytes for SNAP
147 * | |
148 * `-IV--' <-------------------- 4 bytes for IV (WEP)
150 * SNAP HEADER
154 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
155 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
157 inline int rtllib_put_snap(u8 *data, u16 h_proto)
159 struct rtllib_snap_hdr *snap;
160 u8 *oui;
162 snap = (struct rtllib_snap_hdr *)data;
163 snap->dsap = 0xaa;
164 snap->ssap = 0xaa;
165 snap->ctrl = 0x03;
167 if (h_proto == 0x8137 || h_proto == 0x80f3)
168 oui = P802_1H_OUI;
169 else
170 oui = RFC1042_OUI;
171 snap->oui[0] = oui[0];
172 snap->oui[1] = oui[1];
173 snap->oui[2] = oui[2];
175 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
177 return SNAP_SIZE + sizeof(u16);
180 int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
181 int hdr_len)
183 struct rtllib_crypt_data *crypt = NULL;
184 int res;
186 crypt = ieee->crypt[ieee->tx_keyidx];
188 if (!(crypt && crypt->ops)) {
189 printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
190 return -1;
192 /* To encrypt, frame format is:
193 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
195 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
196 * call both MSDU and MPDU encryption functions from here. */
197 atomic_inc(&crypt->refcnt);
198 res = 0;
199 if (crypt->ops->encrypt_msdu)
200 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
201 if (res == 0 && crypt->ops->encrypt_mpdu)
202 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
204 atomic_dec(&crypt->refcnt);
205 if (res < 0) {
206 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
207 ieee->dev->name, frag->len);
208 ieee->ieee_stats.tx_discards++;
209 return -1;
212 return 0;
216 void rtllib_txb_free(struct rtllib_txb *txb)
218 if (unlikely(!txb))
219 return;
220 kfree(txb);
223 static struct rtllib_txb *rtllib_alloc_txb(int nr_frags, int txb_size,
224 gfp_t gfp_mask)
226 struct rtllib_txb *txb;
227 int i;
228 txb = kmalloc(sizeof(struct rtllib_txb) + (sizeof(u8 *) * nr_frags),
229 gfp_mask);
230 if (!txb)
231 return NULL;
233 memset(txb, 0, sizeof(struct rtllib_txb));
234 txb->nr_frags = nr_frags;
235 txb->frag_size = txb_size;
237 for (i = 0; i < nr_frags; i++) {
238 txb->fragments[i] = dev_alloc_skb(txb_size);
239 if (unlikely(!txb->fragments[i])) {
240 i--;
241 break;
243 memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
245 if (unlikely(i != nr_frags)) {
246 while (i >= 0)
247 dev_kfree_skb_any(txb->fragments[i--]);
248 kfree(txb);
249 return NULL;
251 return txb;
254 static int rtllib_classify(struct sk_buff *skb, u8 bIsAmsdu)
256 struct ethhdr *eth;
257 struct iphdr *ip;
259 eth = (struct ethhdr *)skb->data;
260 if (eth->h_proto != htons(ETH_P_IP))
261 return 0;
263 RTLLIB_DEBUG_DATA(RTLLIB_DL_DATA, skb->data, skb->len);
264 ip = ip_hdr(skb);
265 switch (ip->tos & 0xfc) {
266 case 0x20:
267 return 2;
268 case 0x40:
269 return 1;
270 case 0x60:
271 return 3;
272 case 0x80:
273 return 4;
274 case 0xa0:
275 return 5;
276 case 0xc0:
277 return 6;
278 case 0xe0:
279 return 7;
280 default:
281 return 0;
285 static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
286 struct sk_buff *skb,
287 struct cb_desc *tcb_desc)
289 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
290 struct tx_ts_record *pTxTs = NULL;
291 struct rtllib_hdr_1addr* hdr = (struct rtllib_hdr_1addr *)skb->data;
293 if (rtllib_act_scanning(ieee, false))
294 return;
296 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
297 return;
298 if (!IsQoSDataFrame(skb->data))
299 return;
300 if (is_multicast_ether_addr(hdr->addr1) ||
301 is_broadcast_ether_addr(hdr->addr1))
302 return;
304 if (tcb_desc->bdhcp || ieee->CntAfterLink < 2)
305 return;
307 if (pHTInfo->IOTAction & HT_IOT_ACT_TX_NO_AGGREGATION)
308 return;
310 if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
311 return;
312 if (pHTInfo->bCurrentAMPDUEnable) {
313 if (!GetTs(ieee, (struct ts_common_info **)(&pTxTs), hdr->addr1,
314 skb->priority, TX_DIR, true)) {
315 printk(KERN_INFO "%s: can't get TS\n", __func__);
316 return;
318 if (pTxTs->TxAdmittedBARecord.bValid == false) {
319 if (ieee->wpa_ie_len && (ieee->pairwise_key_type ==
320 KEY_TYPE_NA)) {
322 } else if (tcb_desc->bdhcp == 1) {
324 } else if (!pTxTs->bDisable_AddBa) {
325 TsStartAddBaProcess(ieee, pTxTs);
327 goto FORCED_AGG_SETTING;
328 } else if (pTxTs->bUsingBa == false) {
329 if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum,
330 (pTxTs->TxCurSeq+1)%4096))
331 pTxTs->bUsingBa = true;
332 else
333 goto FORCED_AGG_SETTING;
335 if (ieee->iw_mode == IW_MODE_INFRA) {
336 tcb_desc->bAMPDUEnable = true;
337 tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
338 tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
341 FORCED_AGG_SETTING:
342 switch (pHTInfo->ForcedAMPDUMode) {
343 case HT_AGG_AUTO:
344 break;
346 case HT_AGG_FORCE_ENABLE:
347 tcb_desc->bAMPDUEnable = true;
348 tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
349 tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
350 break;
352 case HT_AGG_FORCE_DISABLE:
353 tcb_desc->bAMPDUEnable = false;
354 tcb_desc->ampdu_density = 0;
355 tcb_desc->ampdu_factor = 0;
356 break;
358 return;
361 static void rtllib_qurey_ShortPreambleMode(struct rtllib_device *ieee,
362 struct cb_desc *tcb_desc)
364 tcb_desc->bUseShortPreamble = false;
365 if (tcb_desc->data_rate == 2)
366 return;
367 else if (ieee->current_network.capability &
368 WLAN_CAPABILITY_SHORT_PREAMBLE)
369 tcb_desc->bUseShortPreamble = true;
370 return;
373 static void rtllib_query_HTCapShortGI(struct rtllib_device *ieee,
374 struct cb_desc *tcb_desc)
376 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
378 tcb_desc->bUseShortGI = false;
380 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
381 return;
383 if (pHTInfo->bForcedShortGI) {
384 tcb_desc->bUseShortGI = true;
385 return;
388 if ((pHTInfo->bCurBW40MHz == true) && pHTInfo->bCurShortGI40MHz)
389 tcb_desc->bUseShortGI = true;
390 else if ((pHTInfo->bCurBW40MHz == false) && pHTInfo->bCurShortGI20MHz)
391 tcb_desc->bUseShortGI = true;
394 static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
395 struct cb_desc *tcb_desc)
397 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
399 tcb_desc->bPacketBW = false;
401 if (!pHTInfo->bCurrentHTSupport || !pHTInfo->bEnableHT)
402 return;
404 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
405 return;
407 if ((tcb_desc->data_rate & 0x80) == 0)
408 return;
409 if (pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz &&
410 !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
411 tcb_desc->bPacketBW = true;
412 return;
415 static void rtllib_query_protectionmode(struct rtllib_device *ieee,
416 struct cb_desc *tcb_desc,
417 struct sk_buff *skb)
419 tcb_desc->bRTSSTBC = false;
420 tcb_desc->bRTSUseShortGI = false;
421 tcb_desc->bCTSEnable = false;
422 tcb_desc->RTSSC = 0;
423 tcb_desc->bRTSBW = false;
425 if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
426 return;
428 if (is_broadcast_ether_addr(skb->data+16))
429 return;
431 if (ieee->mode < IEEE_N_24G) {
432 if (skb->len > ieee->rts) {
433 tcb_desc->bRTSEnable = true;
434 tcb_desc->rts_rate = MGN_24M;
435 } else if (ieee->current_network.buseprotection) {
436 tcb_desc->bRTSEnable = true;
437 tcb_desc->bCTSEnable = true;
438 tcb_desc->rts_rate = MGN_24M;
440 return;
441 } else {
442 struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
443 while (true) {
444 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
445 tcb_desc->bCTSEnable = true;
446 tcb_desc->rts_rate = MGN_24M;
447 tcb_desc->bRTSEnable = true;
448 break;
449 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS |
450 HT_IOT_ACT_PURE_N_MODE)) {
451 tcb_desc->bRTSEnable = true;
452 tcb_desc->rts_rate = MGN_24M;
453 break;
455 if (ieee->current_network.buseprotection) {
456 tcb_desc->bRTSEnable = true;
457 tcb_desc->bCTSEnable = true;
458 tcb_desc->rts_rate = MGN_24M;
459 break;
461 if (pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT) {
462 u8 HTOpMode = pHTInfo->CurrentOpMode;
463 if ((pHTInfo->bCurBW40MHz && (HTOpMode == 2 ||
464 HTOpMode == 3)) ||
465 (!pHTInfo->bCurBW40MHz && HTOpMode == 3)) {
466 tcb_desc->rts_rate = MGN_24M;
467 tcb_desc->bRTSEnable = true;
468 break;
471 if (skb->len > ieee->rts) {
472 tcb_desc->rts_rate = MGN_24M;
473 tcb_desc->bRTSEnable = true;
474 break;
476 if (tcb_desc->bAMPDUEnable) {
477 tcb_desc->rts_rate = MGN_24M;
478 tcb_desc->bRTSEnable = false;
479 break;
481 goto NO_PROTECTION;
484 if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
485 tcb_desc->bUseShortPreamble = true;
486 if (ieee->iw_mode == IW_MODE_MASTER)
487 goto NO_PROTECTION;
488 return;
489 NO_PROTECTION:
490 tcb_desc->bRTSEnable = false;
491 tcb_desc->bCTSEnable = false;
492 tcb_desc->rts_rate = 0;
493 tcb_desc->RTSSC = 0;
494 tcb_desc->bRTSBW = false;
498 static void rtllib_txrate_selectmode(struct rtllib_device *ieee,
499 struct cb_desc *tcb_desc)
501 if (ieee->bTxDisableRateFallBack)
502 tcb_desc->bTxDisableRateFallBack = true;
504 if (ieee->bTxUseDriverAssingedRate)
505 tcb_desc->bTxUseDriverAssingedRate = true;
506 if (!tcb_desc->bTxDisableRateFallBack ||
507 !tcb_desc->bTxUseDriverAssingedRate) {
508 if (ieee->iw_mode == IW_MODE_INFRA ||
509 ieee->iw_mode == IW_MODE_ADHOC)
510 tcb_desc->RATRIndex = 0;
514 u16 rtllib_query_seqnum(struct rtllib_device *ieee, struct sk_buff *skb,
515 u8 *dst)
517 u16 seqnum = 0;
519 if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
520 return 0;
521 if (IsQoSDataFrame(skb->data)) {
522 struct tx_ts_record *pTS = NULL;
523 if (!GetTs(ieee, (struct ts_common_info **)(&pTS), dst,
524 skb->priority, TX_DIR, true))
525 return 0;
526 seqnum = pTS->TxCurSeq;
527 pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
528 return seqnum;
530 return 0;
533 static int wme_downgrade_ac(struct sk_buff *skb)
535 switch (skb->priority) {
536 case 6:
537 case 7:
538 skb->priority = 5; /* VO -> VI */
539 return 0;
540 case 4:
541 case 5:
542 skb->priority = 3; /* VI -> BE */
543 return 0;
544 case 0:
545 case 3:
546 skb->priority = 1; /* BE -> BK */
547 return 0;
548 default:
549 return -1;
553 int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
555 struct rtllib_device *ieee = (struct rtllib_device *)
556 netdev_priv_rsl(dev);
557 struct rtllib_txb *txb = NULL;
558 struct rtllib_hdr_3addrqos *frag_hdr;
559 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
560 unsigned long flags;
561 struct net_device_stats *stats = &ieee->stats;
562 int ether_type = 0, encrypt;
563 int bytes, fc, qos_ctl = 0, hdr_len;
564 struct sk_buff *skb_frag;
565 struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
566 .duration_id = 0,
567 .seq_ctl = 0,
568 .qos_ctl = 0
570 u8 dest[ETH_ALEN], src[ETH_ALEN];
571 int qos_actived = ieee->current_network.qos_data.active;
572 struct rtllib_crypt_data *crypt = NULL;
573 struct cb_desc *tcb_desc;
574 u8 bIsMulticast = false;
575 u8 IsAmsdu = false;
576 bool bdhcp = false;
578 spin_lock_irqsave(&ieee->lock, flags);
580 /* If there is no driver handler to take the TXB, dont' bother
581 * creating it... */
582 if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
583 IEEE_SOFTMAC_TX_QUEUE)) ||
584 ((!ieee->softmac_data_hard_start_xmit &&
585 (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
586 printk(KERN_WARNING "%s: No xmit handler.\n",
587 ieee->dev->name);
588 goto success;
592 if (likely(ieee->raw_tx == 0)) {
593 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
594 printk(KERN_WARNING "%s: skb too small (%d).\n",
595 ieee->dev->name, skb->len);
596 goto success;
598 /* Save source and destination addresses */
599 memcpy(dest, skb->data, ETH_ALEN);
600 memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);
602 memset(skb->cb, 0, sizeof(skb->cb));
603 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
605 if (ieee->iw_mode == IW_MODE_MONITOR) {
606 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
607 if (unlikely(!txb)) {
608 printk(KERN_WARNING "%s: Could not allocate "
609 "TXB\n",
610 ieee->dev->name);
611 goto failed;
614 txb->encrypted = 0;
615 txb->payload_size = skb->len;
616 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
617 skb->len);
619 goto success;
622 if (skb->len > 282) {
623 if (ETH_P_IP == ether_type) {
624 const struct iphdr *ip = (struct iphdr *)
625 ((u8 *)skb->data+14);
626 if (IPPROTO_UDP == ip->protocol) {
627 struct udphdr *udp;
629 udp = (struct udphdr *)((u8 *)ip +
630 (ip->ihl << 2));
631 if (((((u8 *)udp)[1] == 68) &&
632 (((u8 *)udp)[3] == 67)) ||
633 ((((u8 *)udp)[1] == 67) &&
634 (((u8 *)udp)[3] == 68))) {
635 bdhcp = true;
636 ieee->LPSDelayCnt = 200;
639 } else if (ETH_P_ARP == ether_type) {
640 printk(KERN_INFO "=================>DHCP "
641 "Protocol start tx ARP pkt!!\n");
642 bdhcp = true;
643 ieee->LPSDelayCnt =
644 ieee->current_network.tim.tim_count;
648 skb->priority = rtllib_classify(skb, IsAmsdu);
649 crypt = ieee->crypt[ieee->tx_keyidx];
650 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
651 ieee->host_encrypt && crypt && crypt->ops;
652 if (!encrypt && ieee->ieee802_1x &&
653 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
654 stats->tx_dropped++;
655 goto success;
657 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
658 struct eapol *eap = (struct eapol *)(skb->data +
659 sizeof(struct ethhdr) - SNAP_SIZE -
660 sizeof(u16));
661 RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
662 eap_get_type(eap->type));
665 /* Advance the SKB to the start of the payload */
666 skb_pull(skb, sizeof(struct ethhdr));
668 /* Determine total amount of storage required for TXB packets */
669 bytes = skb->len + SNAP_SIZE + sizeof(u16);
671 if (encrypt)
672 fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
673 else
674 fc = RTLLIB_FTYPE_DATA;
676 if (qos_actived)
677 fc |= RTLLIB_STYPE_QOS_DATA;
678 else
679 fc |= RTLLIB_STYPE_DATA;
681 if (ieee->iw_mode == IW_MODE_INFRA) {
682 fc |= RTLLIB_FCTL_TODS;
683 /* To DS: Addr1 = BSSID, Addr2 = SA,
684 Addr3 = DA */
685 memcpy(&header.addr1, ieee->current_network.bssid,
686 ETH_ALEN);
687 memcpy(&header.addr2, &src, ETH_ALEN);
688 if (IsAmsdu)
689 memcpy(&header.addr3,
690 ieee->current_network.bssid, ETH_ALEN);
691 else
692 memcpy(&header.addr3, &dest, ETH_ALEN);
693 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
694 /* not From/To DS: Addr1 = DA, Addr2 = SA,
695 Addr3 = BSSID */
696 memcpy(&header.addr1, dest, ETH_ALEN);
697 memcpy(&header.addr2, src, ETH_ALEN);
698 memcpy(&header.addr3, ieee->current_network.bssid,
699 ETH_ALEN);
702 bIsMulticast = is_broadcast_ether_addr(header.addr1) ||
703 is_multicast_ether_addr(header.addr1);
705 header.frame_ctl = cpu_to_le16(fc);
707 /* Determine fragmentation size based on destination (multicast
708 * and broadcast are not fragmented) */
709 if (bIsMulticast) {
710 frag_size = MAX_FRAG_THRESHOLD;
711 qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
712 } else {
713 frag_size = ieee->fts;
714 qos_ctl = 0;
717 if (qos_actived) {
718 hdr_len = RTLLIB_3ADDR_LEN + 2;
720 /* in case we are a client verify acm is not set for this ac */
721 while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
722 printk(KERN_INFO "skb->priority = %x\n", skb->priority);
723 if (wme_downgrade_ac(skb))
724 break;
725 printk(KERN_INFO "converted skb->priority = %x\n",
726 skb->priority);
728 qos_ctl |= skb->priority;
729 header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
730 } else {
731 hdr_len = RTLLIB_3ADDR_LEN;
733 /* Determine amount of payload per fragment. Regardless of if
734 * this stack is providing the full 802.11 header, one will
735 * eventually be affixed to this fragment -- so we must account
736 * for it when determining the amount of payload space. */
737 bytes_per_frag = frag_size - hdr_len;
738 if (ieee->config &
739 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
740 bytes_per_frag -= RTLLIB_FCS_LEN;
742 /* Each fragment may need to have room for encryptiong
743 * pre/postfix */
744 if (encrypt) {
745 bytes_per_frag -= crypt->ops->extra_prefix_len +
746 crypt->ops->extra_postfix_len;
748 /* Number of fragments is the total bytes_per_frag /
749 * payload_per_fragment */
750 nr_frags = bytes / bytes_per_frag;
751 bytes_last_frag = bytes % bytes_per_frag;
752 if (bytes_last_frag)
753 nr_frags++;
754 else
755 bytes_last_frag = bytes_per_frag;
757 /* When we allocate the TXB we allocate enough space for the
758 * reserve and full fragment bytes (bytes_per_frag doesn't
759 * include prefix, postfix, header, FCS, etc.) */
760 txb = rtllib_alloc_txb(nr_frags, frag_size +
761 ieee->tx_headroom, GFP_ATOMIC);
762 if (unlikely(!txb)) {
763 printk(KERN_WARNING "%s: Could not allocate TXB\n",
764 ieee->dev->name);
765 goto failed;
767 txb->encrypted = encrypt;
768 txb->payload_size = bytes;
770 if (qos_actived)
771 txb->queue_index = UP2AC(skb->priority);
772 else
773 txb->queue_index = WME_AC_BE;
775 for (i = 0; i < nr_frags; i++) {
776 skb_frag = txb->fragments[i];
777 tcb_desc = (struct cb_desc *)(skb_frag->cb +
778 MAX_DEV_ADDR_SIZE);
779 if (qos_actived) {
780 skb_frag->priority = skb->priority;
781 tcb_desc->queue_index = UP2AC(skb->priority);
782 } else {
783 skb_frag->priority = WME_AC_BE;
784 tcb_desc->queue_index = WME_AC_BE;
786 skb_reserve(skb_frag, ieee->tx_headroom);
788 if (encrypt) {
789 if (ieee->hwsec_active)
790 tcb_desc->bHwSec = 1;
791 else
792 tcb_desc->bHwSec = 0;
793 skb_reserve(skb_frag,
794 crypt->ops->extra_prefix_len);
795 } else {
796 tcb_desc->bHwSec = 0;
798 frag_hdr = (struct rtllib_hdr_3addrqos *)
799 skb_put(skb_frag, hdr_len);
800 memcpy(frag_hdr, &header, hdr_len);
802 /* If this is not the last fragment, then add the
803 * MOREFRAGS bit to the frame control */
804 if (i != nr_frags - 1) {
805 frag_hdr->frame_ctl = cpu_to_le16(
806 fc | RTLLIB_FCTL_MOREFRAGS);
807 bytes = bytes_per_frag;
809 } else {
810 /* The last fragment has the remaining length */
811 bytes = bytes_last_frag;
813 if ((qos_actived) && (!bIsMulticast)) {
814 frag_hdr->seq_ctl =
815 rtllib_query_seqnum(ieee, skb_frag,
816 header.addr1);
817 frag_hdr->seq_ctl =
818 cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
819 } else {
820 frag_hdr->seq_ctl =
821 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
823 /* Put a SNAP header on the first fragment */
824 if (i == 0) {
825 rtllib_put_snap(
826 skb_put(skb_frag, SNAP_SIZE +
827 sizeof(u16)), ether_type);
828 bytes -= SNAP_SIZE + sizeof(u16);
831 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
833 /* Advance the SKB... */
834 skb_pull(skb, bytes);
836 /* Encryption routine will move the header forward in
837 * order to insert the IV between the header and the
838 * payload */
839 if (encrypt)
840 rtllib_encrypt_fragment(ieee, skb_frag,
841 hdr_len);
842 if (ieee->config &
843 (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
844 skb_put(skb_frag, 4);
847 if ((qos_actived) && (!bIsMulticast)) {
848 if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
849 ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
850 else
851 ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
852 } else {
853 if (ieee->seq_ctrl[0] == 0xFFF)
854 ieee->seq_ctrl[0] = 0;
855 else
856 ieee->seq_ctrl[0]++;
858 } else {
859 if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
860 printk(KERN_WARNING "%s: skb too small (%d).\n",
861 ieee->dev->name, skb->len);
862 goto success;
865 txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
866 if (!txb) {
867 printk(KERN_WARNING "%s: Could not allocate TXB\n",
868 ieee->dev->name);
869 goto failed;
872 txb->encrypted = 0;
873 txb->payload_size = skb->len;
874 memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
875 skb->len);
878 success:
879 if (txb) {
880 struct cb_desc *tcb_desc = (struct cb_desc *)
881 (txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
882 tcb_desc->bTxEnableFwCalcDur = 1;
883 tcb_desc->priority = skb->priority;
885 if (ether_type == ETH_P_PAE) {
886 if (ieee->pHTInfo->IOTAction &
887 HT_IOT_ACT_WA_IOT_Broadcom) {
888 tcb_desc->data_rate =
889 MgntQuery_TxRateExcludeCCKRates(ieee);
890 tcb_desc->bTxDisableRateFallBack = false;
891 } else {
892 tcb_desc->data_rate = ieee->basic_rate;
893 tcb_desc->bTxDisableRateFallBack = 1;
897 tcb_desc->RATRIndex = 7;
898 tcb_desc->bTxUseDriverAssingedRate = 1;
899 } else {
900 if (is_multicast_ether_addr(header.addr1))
901 tcb_desc->bMulticast = 1;
902 if (is_broadcast_ether_addr(header.addr1))
903 tcb_desc->bBroadcast = 1;
904 rtllib_txrate_selectmode(ieee, tcb_desc);
905 if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
906 tcb_desc->data_rate = ieee->basic_rate;
907 else
908 tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
909 ieee->rate, ieee->HTCurrentOperaRate);
911 if (bdhcp == true) {
912 if (ieee->pHTInfo->IOTAction &
913 HT_IOT_ACT_WA_IOT_Broadcom) {
914 tcb_desc->data_rate =
915 MgntQuery_TxRateExcludeCCKRates(ieee);
916 tcb_desc->bTxDisableRateFallBack = false;
917 } else {
918 tcb_desc->data_rate = MGN_1M;
919 tcb_desc->bTxDisableRateFallBack = 1;
923 tcb_desc->RATRIndex = 7;
924 tcb_desc->bTxUseDriverAssingedRate = 1;
925 tcb_desc->bdhcp = 1;
928 rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
929 rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
930 tcb_desc);
931 rtllib_query_HTCapShortGI(ieee, tcb_desc);
932 rtllib_query_BandwidthMode(ieee, tcb_desc);
933 rtllib_query_protectionmode(ieee, tcb_desc,
934 txb->fragments[0]);
937 spin_unlock_irqrestore(&ieee->lock, flags);
938 dev_kfree_skb_any(skb);
939 if (txb) {
940 if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
941 dev->stats.tx_packets++;
942 dev->stats.tx_bytes += txb->payload_size;
943 rtllib_softmac_xmit(txb, ieee);
944 } else {
945 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
946 stats->tx_packets++;
947 stats->tx_bytes += txb->payload_size;
948 return 0;
950 rtllib_txb_free(txb);
954 return 0;
956 failed:
957 spin_unlock_irqrestore(&ieee->lock, flags);
958 netif_stop_queue(dev);
959 stats->tx_errors++;
960 return 1;
963 int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
965 memset(skb->cb, 0, sizeof(skb->cb));
966 return rtllib_xmit_inter(skb, dev);