cris: add arch/cris/include/asm/serial.h
[linux-2.6/next.git] / drivers / net / wireless / ath / carl9170 / tx.c
blobd20946939cd8cae0968abfb096dbe3321c0e63eb
1 /*
2 * Atheros CARL9170 driver
4 * 802.11 xmit & status routines
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <net/mac80211.h>
45 #include "carl9170.h"
46 #include "hw.h"
47 #include "cmd.h"
49 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
50 unsigned int queue)
52 if (unlikely(modparam_noht)) {
53 return queue;
54 } else {
56 * This is just another workaround, until
57 * someone figures out how to get QoS and
58 * AMPDU to play nicely together.
61 return 2; /* AC_BE */
65 static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
66 struct sk_buff *skb)
68 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
71 static bool is_mem_full(struct ar9170 *ar)
73 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
74 atomic_read(&ar->mem_free_blocks));
77 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
79 int queue, i;
80 bool mem_full;
82 atomic_inc(&ar->tx_total_queued);
84 queue = skb_get_queue_mapping(skb);
85 spin_lock_bh(&ar->tx_stats_lock);
88 * The driver has to accept the frame, regardless if the queue is
89 * full to the brim, or not. We have to do the queuing internally,
90 * since mac80211 assumes that a driver which can operate with
91 * aggregated frames does not reject frames for this reason.
93 ar->tx_stats[queue].len++;
94 ar->tx_stats[queue].count++;
96 mem_full = is_mem_full(ar);
97 for (i = 0; i < ar->hw->queues; i++) {
98 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
99 ieee80211_stop_queue(ar->hw, i);
100 ar->queue_stop_timeout[i] = jiffies;
104 spin_unlock_bh(&ar->tx_stats_lock);
107 /* needs rcu_read_lock */
108 static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
109 struct sk_buff *skb)
111 struct _carl9170_tx_superframe *super = (void *) skb->data;
112 struct ieee80211_hdr *hdr = (void *) super->frame_data;
113 struct ieee80211_vif *vif;
114 unsigned int vif_id;
116 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
117 CARL9170_TX_SUPER_MISC_VIF_ID_S;
119 if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC))
120 return NULL;
122 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
123 if (unlikely(!vif))
124 return NULL;
127 * Normally we should use wrappers like ieee80211_get_DA to get
128 * the correct peer ieee80211_sta.
130 * But there is a problem with indirect traffic (broadcasts, or
131 * data which is designated for other stations) in station mode.
132 * The frame will be directed to the AP for distribution and not
133 * to the actual destination.
136 return ieee80211_find_sta(vif, hdr->addr1);
139 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
141 struct ieee80211_sta *sta;
142 struct carl9170_sta_info *sta_info;
144 rcu_read_lock();
145 sta = __carl9170_get_tx_sta(ar, skb);
146 if (unlikely(!sta))
147 goto out_rcu;
149 sta_info = (struct carl9170_sta_info *) sta->drv_priv;
150 if (atomic_dec_return(&sta_info->pending_frames) == 0)
151 ieee80211_sta_block_awake(ar->hw, sta, false);
153 out_rcu:
154 rcu_read_unlock();
157 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
159 int queue;
161 queue = skb_get_queue_mapping(skb);
163 spin_lock_bh(&ar->tx_stats_lock);
165 ar->tx_stats[queue].len--;
167 if (!is_mem_full(ar)) {
168 unsigned int i;
169 for (i = 0; i < ar->hw->queues; i++) {
170 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
171 continue;
173 if (ieee80211_queue_stopped(ar->hw, i)) {
174 unsigned long tmp;
176 tmp = jiffies - ar->queue_stop_timeout[i];
177 if (tmp > ar->max_queue_stop_timeout[i])
178 ar->max_queue_stop_timeout[i] = tmp;
181 ieee80211_wake_queue(ar->hw, i);
185 spin_unlock_bh(&ar->tx_stats_lock);
187 if (atomic_dec_and_test(&ar->tx_total_queued))
188 complete(&ar->tx_flush);
191 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
193 struct _carl9170_tx_superframe *super = (void *) skb->data;
194 unsigned int chunks;
195 int cookie = -1;
197 atomic_inc(&ar->mem_allocs);
199 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
200 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
201 atomic_add(chunks, &ar->mem_free_blocks);
202 return -ENOSPC;
205 spin_lock_bh(&ar->mem_lock);
206 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
207 spin_unlock_bh(&ar->mem_lock);
209 if (unlikely(cookie < 0)) {
210 atomic_add(chunks, &ar->mem_free_blocks);
211 return -ENOSPC;
214 super = (void *) skb->data;
217 * Cookie #0 serves two special purposes:
218 * 1. The firmware might use it generate BlockACK frames
219 * in responds of an incoming BlockAckReqs.
221 * 2. Prevent double-free bugs.
223 super->s.cookie = (u8) cookie + 1;
224 return 0;
227 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
229 struct _carl9170_tx_superframe *super = (void *) skb->data;
230 int cookie;
232 /* make a local copy of the cookie */
233 cookie = super->s.cookie;
234 /* invalidate cookie */
235 super->s.cookie = 0;
238 * Do a out-of-bounds check on the cookie:
240 * * cookie "0" is reserved and won't be assigned to any
241 * out-going frame. Internally however, it is used to
242 * mark no longer/un-accounted frames and serves as a
243 * cheap way of preventing frames from being freed
244 * twice by _accident_. NB: There is a tiny race...
246 * * obviously, cookie number is limited by the amount
247 * of available memory blocks, so the number can
248 * never execeed the mem_blocks count.
250 if (unlikely(WARN_ON_ONCE(cookie == 0) ||
251 WARN_ON_ONCE(cookie > ar->fw.mem_blocks)))
252 return;
254 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
255 &ar->mem_free_blocks);
257 spin_lock_bh(&ar->mem_lock);
258 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
259 spin_unlock_bh(&ar->mem_lock);
262 /* Called from any context */
263 static void carl9170_tx_release(struct kref *ref)
265 struct ar9170 *ar;
266 struct carl9170_tx_info *arinfo;
267 struct ieee80211_tx_info *txinfo;
268 struct sk_buff *skb;
270 arinfo = container_of(ref, struct carl9170_tx_info, ref);
271 txinfo = container_of((void *) arinfo, struct ieee80211_tx_info,
272 rate_driver_data);
273 skb = container_of((void *) txinfo, struct sk_buff, cb);
275 ar = arinfo->ar;
276 if (WARN_ON_ONCE(!ar))
277 return;
279 BUILD_BUG_ON(
280 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
282 memset(&txinfo->status.ampdu_ack_len, 0,
283 sizeof(struct ieee80211_tx_info) -
284 offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
286 if (atomic_read(&ar->tx_total_queued))
287 ar->tx_schedule = true;
289 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) {
290 if (!atomic_read(&ar->tx_ampdu_upload))
291 ar->tx_ampdu_schedule = true;
293 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
294 struct _carl9170_tx_superframe *super;
296 super = (void *)skb->data;
297 txinfo->status.ampdu_len = super->s.rix;
298 txinfo->status.ampdu_ack_len = super->s.cnt;
299 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
301 * drop redundant tx_status reports:
303 * 1. ampdu_ack_len of the final tx_status does
304 * include the feedback of this particular frame.
306 * 2. tx_status_irqsafe only queues up to 128
307 * tx feedback reports and discards the rest.
309 * 3. minstrel_ht is picky, it only accepts
310 * reports of frames with the TX_STATUS_AMPDU flag.
313 dev_kfree_skb_any(skb);
314 return;
315 } else {
317 * Frame has failed, but we want to keep it in
318 * case it was lost due to a power-state
319 * transition.
324 skb_pull(skb, sizeof(struct _carl9170_tx_superframe));
325 ieee80211_tx_status_irqsafe(ar->hw, skb);
328 void carl9170_tx_get_skb(struct sk_buff *skb)
330 struct carl9170_tx_info *arinfo = (void *)
331 (IEEE80211_SKB_CB(skb))->rate_driver_data;
332 kref_get(&arinfo->ref);
335 int carl9170_tx_put_skb(struct sk_buff *skb)
337 struct carl9170_tx_info *arinfo = (void *)
338 (IEEE80211_SKB_CB(skb))->rate_driver_data;
340 return kref_put(&arinfo->ref, carl9170_tx_release);
343 /* Caller must hold the tid_info->lock & rcu_read_lock */
344 static void carl9170_tx_shift_bm(struct ar9170 *ar,
345 struct carl9170_sta_tid *tid_info, u16 seq)
347 u16 off;
349 off = SEQ_DIFF(seq, tid_info->bsn);
351 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
352 return;
355 * Sanity check. For each MPDU we set the bit in bitmap and
356 * clear it once we received the tx_status.
357 * But if the bit is already cleared then we've been bitten
358 * by a bug.
360 WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap));
362 off = SEQ_DIFF(tid_info->snx, tid_info->bsn);
363 if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS))
364 return;
366 if (!bitmap_empty(tid_info->bitmap, off))
367 off = find_first_bit(tid_info->bitmap, off);
369 tid_info->bsn += off;
370 tid_info->bsn &= 0x0fff;
372 bitmap_shift_right(tid_info->bitmap, tid_info->bitmap,
373 off, CARL9170_BAW_BITS);
376 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
377 struct sk_buff *skb, struct ieee80211_tx_info *txinfo)
379 struct _carl9170_tx_superframe *super = (void *) skb->data;
380 struct ieee80211_hdr *hdr = (void *) super->frame_data;
381 struct ieee80211_sta *sta;
382 struct carl9170_sta_info *sta_info;
383 struct carl9170_sta_tid *tid_info;
384 u8 tid;
386 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
387 txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
388 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
389 return;
391 rcu_read_lock();
392 sta = __carl9170_get_tx_sta(ar, skb);
393 if (unlikely(!sta))
394 goto out_rcu;
396 tid = get_tid_h(hdr);
398 sta_info = (void *) sta->drv_priv;
399 tid_info = rcu_dereference(sta_info->agg[tid]);
400 if (!tid_info)
401 goto out_rcu;
403 spin_lock_bh(&tid_info->lock);
404 if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE))
405 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
407 if (sta_info->stats[tid].clear) {
408 sta_info->stats[tid].clear = false;
409 sta_info->stats[tid].req = false;
410 sta_info->stats[tid].ampdu_len = 0;
411 sta_info->stats[tid].ampdu_ack_len = 0;
414 sta_info->stats[tid].ampdu_len++;
415 if (txinfo->status.rates[0].count == 1)
416 sta_info->stats[tid].ampdu_ack_len++;
418 if (!(txinfo->flags & IEEE80211_TX_STAT_ACK))
419 sta_info->stats[tid].req = true;
421 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
422 super->s.rix = sta_info->stats[tid].ampdu_len;
423 super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
424 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
425 if (sta_info->stats[tid].req)
426 txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
428 sta_info->stats[tid].clear = true;
430 spin_unlock_bh(&tid_info->lock);
432 out_rcu:
433 rcu_read_unlock();
436 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
437 const bool success)
439 struct ieee80211_tx_info *txinfo;
441 carl9170_tx_accounting_free(ar, skb);
443 txinfo = IEEE80211_SKB_CB(skb);
445 if (success)
446 txinfo->flags |= IEEE80211_TX_STAT_ACK;
447 else
448 ar->tx_ack_failures++;
450 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
451 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
453 carl9170_tx_ps_unblock(ar, skb);
454 carl9170_tx_put_skb(skb);
457 /* This function may be called form any context */
458 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
460 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
462 atomic_dec(&ar->tx_total_pending);
464 if (txinfo->flags & IEEE80211_TX_CTL_AMPDU)
465 atomic_dec(&ar->tx_ampdu_upload);
467 if (carl9170_tx_put_skb(skb))
468 tasklet_hi_schedule(&ar->usb_tasklet);
471 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
472 struct sk_buff_head *queue)
474 struct sk_buff *skb;
476 spin_lock_bh(&queue->lock);
477 skb_queue_walk(queue, skb) {
478 struct _carl9170_tx_superframe *txc = (void *) skb->data;
480 if (txc->s.cookie != cookie)
481 continue;
483 __skb_unlink(skb, queue);
484 spin_unlock_bh(&queue->lock);
486 carl9170_release_dev_space(ar, skb);
487 return skb;
489 spin_unlock_bh(&queue->lock);
491 return NULL;
494 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
495 unsigned int tries, struct ieee80211_tx_info *txinfo)
497 unsigned int i;
499 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
500 if (txinfo->status.rates[i].idx < 0)
501 break;
503 if (i == rix) {
504 txinfo->status.rates[i].count = tries;
505 i++;
506 break;
510 for (; i < IEEE80211_TX_MAX_RATES; i++) {
511 txinfo->status.rates[i].idx = -1;
512 txinfo->status.rates[i].count = 0;
516 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
518 int i;
519 struct sk_buff *skb;
520 struct ieee80211_tx_info *txinfo;
521 struct carl9170_tx_info *arinfo;
522 bool restart = false;
524 for (i = 0; i < ar->hw->queues; i++) {
525 spin_lock_bh(&ar->tx_status[i].lock);
527 skb = skb_peek(&ar->tx_status[i]);
529 if (!skb)
530 goto next;
532 txinfo = IEEE80211_SKB_CB(skb);
533 arinfo = (void *) txinfo->rate_driver_data;
535 if (time_is_before_jiffies(arinfo->timeout +
536 msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true)
537 restart = true;
539 next:
540 spin_unlock_bh(&ar->tx_status[i].lock);
543 if (restart) {
545 * At least one queue has been stuck for long enough.
546 * Give the device a kick and hope it gets back to
547 * work.
549 * possible reasons may include:
550 * - frames got lost/corrupted (bad connection to the device)
551 * - stalled rx processing/usb controller hiccups
552 * - firmware errors/bugs
553 * - every bug you can think of.
554 * - all bugs you can't...
555 * - ...
557 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
561 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
563 struct carl9170_sta_tid *iter;
564 struct sk_buff *skb;
565 struct ieee80211_tx_info *txinfo;
566 struct carl9170_tx_info *arinfo;
567 struct ieee80211_sta *sta;
569 rcu_read_lock();
570 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
571 if (iter->state < CARL9170_TID_STATE_IDLE)
572 continue;
574 spin_lock_bh(&iter->lock);
575 skb = skb_peek(&iter->queue);
576 if (!skb)
577 goto unlock;
579 txinfo = IEEE80211_SKB_CB(skb);
580 arinfo = (void *)txinfo->rate_driver_data;
581 if (time_is_after_jiffies(arinfo->timeout +
582 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
583 goto unlock;
585 sta = __carl9170_get_tx_sta(ar, skb);
586 if (WARN_ON(!sta))
587 goto unlock;
589 ieee80211_stop_tx_ba_session(sta, iter->tid);
590 unlock:
591 spin_unlock_bh(&iter->lock);
594 rcu_read_unlock();
597 void carl9170_tx_janitor(struct work_struct *work)
599 struct ar9170 *ar = container_of(work, struct ar9170,
600 tx_janitor.work);
601 if (!IS_STARTED(ar))
602 return;
604 ar->tx_janitor_last_run = jiffies;
606 carl9170_check_queue_stop_timeout(ar);
607 carl9170_tx_ampdu_timeout(ar);
609 if (!atomic_read(&ar->tx_total_queued))
610 return;
612 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
613 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
616 static void __carl9170_tx_process_status(struct ar9170 *ar,
617 const uint8_t cookie, const uint8_t info)
619 struct sk_buff *skb;
620 struct ieee80211_tx_info *txinfo;
621 unsigned int r, t, q;
622 bool success = true;
624 q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
626 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
627 if (!skb) {
629 * We have lost the race to another thread.
632 return ;
635 txinfo = IEEE80211_SKB_CB(skb);
637 if (!(info & CARL9170_TX_STATUS_SUCCESS))
638 success = false;
640 r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S;
641 t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S;
643 carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
644 carl9170_tx_status(ar, skb, success);
647 void carl9170_tx_process_status(struct ar9170 *ar,
648 const struct carl9170_rsp *cmd)
650 unsigned int i;
652 for (i = 0; i < cmd->hdr.ext; i++) {
653 if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) {
654 print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE,
655 (void *) cmd, cmd->hdr.len + 4);
656 break;
659 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
660 cmd->_tx_status[i].info);
664 static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
665 struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
666 unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
668 struct ieee80211_rate *rate = NULL;
669 u8 *txpower;
670 unsigned int idx;
672 idx = txrate->idx;
673 *tpc = 0;
674 *phyrate = 0;
676 if (txrate->flags & IEEE80211_TX_RC_MCS) {
677 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
678 /* +1 dBm for HT40 */
679 *tpc += 2;
681 if (info->band == IEEE80211_BAND_2GHZ)
682 txpower = ar->power_2G_ht40;
683 else
684 txpower = ar->power_5G_ht40;
685 } else {
686 if (info->band == IEEE80211_BAND_2GHZ)
687 txpower = ar->power_2G_ht20;
688 else
689 txpower = ar->power_5G_ht20;
692 *phyrate = txrate->idx;
693 *tpc += txpower[idx & 7];
694 } else {
695 if (info->band == IEEE80211_BAND_2GHZ) {
696 if (idx < 4)
697 txpower = ar->power_2G_cck;
698 else
699 txpower = ar->power_2G_ofdm;
700 } else {
701 txpower = ar->power_5G_leg;
702 idx += 4;
705 rate = &__carl9170_ratetable[idx];
706 *tpc += txpower[(rate->hw_value & 0x30) >> 4];
707 *phyrate = rate->hw_value & 0xf;
710 if (ar->eeprom.tx_mask == 1) {
711 *chains = AR9170_TX_PHY_TXCHAIN_1;
712 } else {
713 if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
714 rate && rate->bitrate >= 360)
715 *chains = AR9170_TX_PHY_TXCHAIN_1;
716 else
717 *chains = AR9170_TX_PHY_TXCHAIN_2;
721 static __le32 carl9170_tx_physet(struct ar9170 *ar,
722 struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
724 unsigned int power = 0, chains = 0, phyrate = 0;
725 __le32 tmp;
727 tmp = cpu_to_le32(0);
729 if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
730 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ <<
731 AR9170_TX_PHY_BW_S);
732 /* this works because 40 MHz is 2 and dup is 3 */
733 if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
734 tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP <<
735 AR9170_TX_PHY_BW_S);
737 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
738 tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
740 if (txrate->flags & IEEE80211_TX_RC_MCS) {
741 SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
743 /* heavy clip control */
744 tmp |= cpu_to_le32((txrate->idx & 0x7) <<
745 AR9170_TX_PHY_TX_HEAVY_CLIP_S);
747 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
750 * green field preamble does not work.
752 * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
753 * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
755 } else {
756 if (info->band == IEEE80211_BAND_2GHZ) {
757 if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
758 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
759 else
760 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
761 } else {
762 tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
766 * short preamble seems to be broken too.
768 * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
769 * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
772 carl9170_tx_rate_tpc_chains(ar, info, txrate,
773 &phyrate, &power, &chains);
775 tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
776 tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
777 tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
778 return tmp;
781 static bool carl9170_tx_rts_check(struct ar9170 *ar,
782 struct ieee80211_tx_rate *rate,
783 bool ampdu, bool multi)
785 switch (ar->erp_mode) {
786 case CARL9170_ERP_AUTO:
787 if (ampdu)
788 break;
790 case CARL9170_ERP_MAC80211:
791 if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
792 break;
794 case CARL9170_ERP_RTS:
795 if (likely(!multi))
796 return true;
798 default:
799 break;
802 return false;
805 static bool carl9170_tx_cts_check(struct ar9170 *ar,
806 struct ieee80211_tx_rate *rate)
808 switch (ar->erp_mode) {
809 case CARL9170_ERP_AUTO:
810 case CARL9170_ERP_MAC80211:
811 if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
812 break;
814 case CARL9170_ERP_CTS:
815 return true;
817 default:
818 break;
821 return false;
824 static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
826 struct ieee80211_hdr *hdr;
827 struct _carl9170_tx_superframe *txc;
828 struct carl9170_vif_info *cvif;
829 struct ieee80211_tx_info *info;
830 struct ieee80211_tx_rate *txrate;
831 struct ieee80211_sta *sta;
832 struct carl9170_tx_info *arinfo;
833 unsigned int hw_queue;
834 int i;
835 __le16 mac_tmp;
836 u16 len;
837 bool ampdu, no_ack;
839 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
840 BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) !=
841 CARL9170_TX_SUPERDESC_LEN);
843 BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) !=
844 AR9170_TX_HWDESC_LEN);
846 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES);
848 BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC >
849 ((CARL9170_TX_SUPER_MISC_VIF_ID >>
850 CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
852 hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
854 hdr = (void *)skb->data;
855 info = IEEE80211_SKB_CB(skb);
856 len = skb->len;
859 * Note: If the frame was sent through a monitor interface,
860 * the ieee80211_vif pointer can be NULL.
862 if (likely(info->control.vif))
863 cvif = (void *) info->control.vif->drv_priv;
864 else
865 cvif = NULL;
867 sta = info->control.sta;
869 txc = (void *)skb_push(skb, sizeof(*txc));
870 memset(txc, 0, sizeof(*txc));
872 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue);
874 if (likely(cvif))
875 SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id);
877 if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM))
878 txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB;
880 if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
881 txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ;
883 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control)))
884 txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF;
886 mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
887 AR9170_TX_MAC_BACKOFF);
888 mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) &
889 AR9170_TX_MAC_QOS);
891 no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK);
892 if (unlikely(no_ack))
893 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
895 if (info->control.hw_key) {
896 len += info->control.hw_key->icv_len;
898 switch (info->control.hw_key->cipher) {
899 case WLAN_CIPHER_SUITE_WEP40:
900 case WLAN_CIPHER_SUITE_WEP104:
901 case WLAN_CIPHER_SUITE_TKIP:
902 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4);
903 break;
904 case WLAN_CIPHER_SUITE_CCMP:
905 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES);
906 break;
907 default:
908 WARN_ON(1);
909 goto err_out;
913 ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU);
914 if (ampdu) {
915 unsigned int density, factor;
917 if (unlikely(!sta || !cvif))
918 goto err_out;
920 factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
921 density = sta->ht_cap.ampdu_density;
923 if (density) {
925 * Watch out!
927 * Otus uses slightly different density values than
928 * those from the 802.11n spec.
931 density = max_t(unsigned int, density + 1, 7u);
934 SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY,
935 txc->s.ampdu_settings, density);
937 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
938 txc->s.ampdu_settings, factor);
940 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
941 txrate = &info->control.rates[i];
942 if (txrate->idx >= 0) {
943 txc->s.ri[i] =
944 CARL9170_TX_SUPER_RI_AMPDU;
946 if (WARN_ON(!(txrate->flags &
947 IEEE80211_TX_RC_MCS))) {
949 * Not sure if it's even possible
950 * to aggregate non-ht rates with
951 * this HW.
953 goto err_out;
955 continue;
958 txrate->idx = 0;
959 txrate->count = ar->hw->max_rate_tries;
962 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
966 * NOTE: For the first rate, the ERP & AMPDU flags are directly
967 * taken from mac_control. For all fallback rate, the firmware
968 * updates the mac_control flags from the rate info field.
970 for (i = 1; i < CARL9170_TX_MAX_RATES; i++) {
971 txrate = &info->control.rates[i];
972 if (txrate->idx < 0)
973 break;
975 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
976 txrate->count);
978 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
979 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS <<
980 CARL9170_TX_SUPER_RI_ERP_PROT_S);
981 else if (carl9170_tx_cts_check(ar, txrate))
982 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
983 CARL9170_TX_SUPER_RI_ERP_PROT_S);
985 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate);
988 txrate = &info->control.rates[0];
989 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
991 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
992 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
993 else if (carl9170_tx_cts_check(ar, txrate))
994 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
996 txc->s.len = cpu_to_le16(skb->len);
997 txc->f.length = cpu_to_le16(len + FCS_LEN);
998 txc->f.mac_control = mac_tmp;
999 txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
1001 arinfo = (void *)info->rate_driver_data;
1002 arinfo->timeout = jiffies;
1003 arinfo->ar = ar;
1004 kref_init(&arinfo->ref);
1005 return 0;
1007 err_out:
1008 skb_pull(skb, sizeof(*txc));
1009 return -EINVAL;
1012 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
1014 struct _carl9170_tx_superframe *super;
1016 super = (void *) skb->data;
1017 super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA);
1020 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
1022 struct _carl9170_tx_superframe *super;
1023 int tmp;
1025 super = (void *) skb->data;
1027 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) <<
1028 CARL9170_TX_SUPER_AMPDU_DENSITY_S;
1031 * If you haven't noticed carl9170_tx_prepare has already filled
1032 * in all ampdu spacing & factor parameters.
1033 * Now it's the time to check whenever the settings have to be
1034 * updated by the firmware, or if everything is still the same.
1036 * There's no sane way to handle different density values with
1037 * this hardware, so we may as well just do the compare in the
1038 * driver.
1041 if (tmp != ar->current_density) {
1042 ar->current_density = tmp;
1043 super->s.ampdu_settings |=
1044 CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY;
1047 tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) <<
1048 CARL9170_TX_SUPER_AMPDU_FACTOR_S;
1050 if (tmp != ar->current_factor) {
1051 ar->current_factor = tmp;
1052 super->s.ampdu_settings |=
1053 CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR;
1057 static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest,
1058 struct sk_buff *_src)
1060 struct _carl9170_tx_superframe *dest, *src;
1062 dest = (void *) _dest->data;
1063 src = (void *) _src->data;
1066 * The mac80211 rate control algorithm expects that all MPDUs in
1067 * an AMPDU share the same tx vectors.
1068 * This is not really obvious right now, because the hardware
1069 * does the AMPDU setup according to its own rulebook.
1070 * Our nicely assembled, strictly monotonic increasing mpdu
1071 * chains will be broken up, mashed back together...
1074 return (dest->f.phy_control == src->f.phy_control);
1077 static void carl9170_tx_ampdu(struct ar9170 *ar)
1079 struct sk_buff_head agg;
1080 struct carl9170_sta_tid *tid_info;
1081 struct sk_buff *skb, *first;
1082 unsigned int i = 0, done_ampdus = 0;
1083 u16 seq, queue, tmpssn;
1085 atomic_inc(&ar->tx_ampdu_scheduler);
1086 ar->tx_ampdu_schedule = false;
1088 if (atomic_read(&ar->tx_ampdu_upload))
1089 return;
1091 if (!ar->tx_ampdu_list_len)
1092 return;
1094 __skb_queue_head_init(&agg);
1096 rcu_read_lock();
1097 tid_info = rcu_dereference(ar->tx_ampdu_iter);
1098 if (WARN_ON_ONCE(!tid_info)) {
1099 rcu_read_unlock();
1100 return;
1103 retry:
1104 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1105 i++;
1107 if (tid_info->state < CARL9170_TID_STATE_PROGRESS)
1108 continue;
1110 queue = TID_TO_WME_AC(tid_info->tid);
1112 spin_lock_bh(&tid_info->lock);
1113 if (tid_info->state != CARL9170_TID_STATE_XMIT)
1114 goto processed;
1116 tid_info->counter++;
1117 first = skb_peek(&tid_info->queue);
1118 tmpssn = carl9170_get_seq(first);
1119 seq = tid_info->snx;
1121 if (unlikely(tmpssn != seq)) {
1122 tid_info->state = CARL9170_TID_STATE_IDLE;
1124 goto processed;
1127 while ((skb = skb_peek(&tid_info->queue))) {
1128 /* strict 0, 1, ..., n - 1, n frame sequence order */
1129 if (unlikely(carl9170_get_seq(skb) != seq))
1130 break;
1132 /* don't upload more than AMPDU FACTOR allows. */
1133 if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >=
1134 (tid_info->max - 1)))
1135 break;
1137 if (!carl9170_tx_rate_check(ar, skb, first))
1138 break;
1140 atomic_inc(&ar->tx_ampdu_upload);
1141 tid_info->snx = seq = SEQ_NEXT(seq);
1142 __skb_unlink(skb, &tid_info->queue);
1144 __skb_queue_tail(&agg, skb);
1146 if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX)
1147 break;
1150 if (skb_queue_empty(&tid_info->queue) ||
1151 carl9170_get_seq(skb_peek(&tid_info->queue)) !=
1152 tid_info->snx) {
1154 * stop TID, if A-MPDU frames are still missing,
1155 * or whenever the queue is empty.
1158 tid_info->state = CARL9170_TID_STATE_IDLE;
1160 done_ampdus++;
1162 processed:
1163 spin_unlock_bh(&tid_info->lock);
1165 if (skb_queue_empty(&agg))
1166 continue;
1168 /* apply ampdu spacing & factor settings */
1169 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1171 /* set aggregation push bit */
1172 carl9170_set_immba(ar, skb_peek_tail(&agg));
1174 spin_lock_bh(&ar->tx_pending[queue].lock);
1175 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1176 spin_unlock_bh(&ar->tx_pending[queue].lock);
1177 ar->tx_schedule = true;
1179 if ((done_ampdus++ == 0) && (i++ == 0))
1180 goto retry;
1182 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1183 rcu_read_unlock();
1186 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1187 struct sk_buff_head *queue)
1189 struct sk_buff *skb;
1190 struct ieee80211_tx_info *info;
1191 struct carl9170_tx_info *arinfo;
1193 BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
1195 spin_lock_bh(&queue->lock);
1196 skb = skb_peek(queue);
1197 if (unlikely(!skb))
1198 goto err_unlock;
1200 if (carl9170_alloc_dev_space(ar, skb))
1201 goto err_unlock;
1203 __skb_unlink(skb, queue);
1204 spin_unlock_bh(&queue->lock);
1206 info = IEEE80211_SKB_CB(skb);
1207 arinfo = (void *) info->rate_driver_data;
1209 arinfo->timeout = jiffies;
1210 return skb;
1212 err_unlock:
1213 spin_unlock_bh(&queue->lock);
1214 return NULL;
1217 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1219 struct _carl9170_tx_superframe *super;
1220 uint8_t q = 0;
1222 ar->tx_dropped++;
1224 super = (void *)skb->data;
1225 SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
1226 ar9170_qmap[carl9170_get_queue(ar, skb)]);
1227 __carl9170_tx_process_status(ar, super->s.cookie, q);
1230 static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
1232 struct ieee80211_sta *sta;
1233 struct carl9170_sta_info *sta_info;
1235 rcu_read_lock();
1236 sta = __carl9170_get_tx_sta(ar, skb);
1237 if (!sta)
1238 goto out_rcu;
1240 sta_info = (void *) sta->drv_priv;
1241 if (unlikely(sta_info->sleeping)) {
1242 struct ieee80211_tx_info *tx_info;
1244 rcu_read_unlock();
1246 tx_info = IEEE80211_SKB_CB(skb);
1247 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
1248 atomic_dec(&ar->tx_ampdu_upload);
1250 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1251 carl9170_tx_status(ar, skb, false);
1252 return true;
1255 out_rcu:
1256 rcu_read_unlock();
1257 return false;
1260 static void carl9170_tx(struct ar9170 *ar)
1262 struct sk_buff *skb;
1263 unsigned int i, q;
1264 bool schedule_garbagecollector = false;
1266 ar->tx_schedule = false;
1268 if (unlikely(!IS_STARTED(ar)))
1269 return;
1271 carl9170_usb_handle_tx_err(ar);
1273 for (i = 0; i < ar->hw->queues; i++) {
1274 while (!skb_queue_empty(&ar->tx_pending[i])) {
1275 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1276 if (unlikely(!skb))
1277 break;
1279 if (unlikely(carl9170_tx_ps_drop(ar, skb)))
1280 continue;
1282 atomic_inc(&ar->tx_total_pending);
1284 q = __carl9170_get_queue(ar, i);
1286 * NB: tx_status[i] vs. tx_status[q],
1287 * TODO: Move into pick_skb or alloc_dev_space.
1289 skb_queue_tail(&ar->tx_status[q], skb);
1292 * increase ref count to "2".
1293 * Ref counting is the easiest way to solve the
1294 * race between the urb's completion routine:
1295 * carl9170_tx_callback
1296 * and wlan tx status functions:
1297 * carl9170_tx_status/janitor.
1299 carl9170_tx_get_skb(skb);
1301 carl9170_usb_tx(ar, skb);
1302 schedule_garbagecollector = true;
1306 if (!schedule_garbagecollector)
1307 return;
1309 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1310 msecs_to_jiffies(CARL9170_TX_TIMEOUT));
1313 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1314 struct ieee80211_sta *sta, struct sk_buff *skb)
1316 struct _carl9170_tx_superframe *super = (void *) skb->data;
1317 struct carl9170_sta_info *sta_info;
1318 struct carl9170_sta_tid *agg;
1319 struct sk_buff *iter;
1320 u16 tid, seq, qseq, off;
1321 bool run = false;
1323 tid = carl9170_get_tid(skb);
1324 seq = carl9170_get_seq(skb);
1325 sta_info = (void *) sta->drv_priv;
1327 rcu_read_lock();
1328 agg = rcu_dereference(sta_info->agg[tid]);
1330 if (!agg)
1331 goto err_unlock_rcu;
1333 spin_lock_bh(&agg->lock);
1334 if (unlikely(agg->state < CARL9170_TID_STATE_IDLE))
1335 goto err_unlock;
1337 /* check if sequence is within the BA window */
1338 if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq)))
1339 goto err_unlock;
1341 if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq)))
1342 goto err_unlock;
1344 off = SEQ_DIFF(seq, agg->bsn);
1345 if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap)))
1346 goto err_unlock;
1348 if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) {
1349 __skb_queue_tail(&agg->queue, skb);
1350 agg->hsn = seq;
1351 goto queued;
1354 skb_queue_reverse_walk(&agg->queue, iter) {
1355 qseq = carl9170_get_seq(iter);
1357 if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) {
1358 __skb_queue_after(&agg->queue, iter, skb);
1359 goto queued;
1363 __skb_queue_head(&agg->queue, skb);
1364 queued:
1366 if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) {
1367 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) {
1368 agg->state = CARL9170_TID_STATE_XMIT;
1369 run = true;
1373 spin_unlock_bh(&agg->lock);
1374 rcu_read_unlock();
1376 return run;
1378 err_unlock:
1379 spin_unlock_bh(&agg->lock);
1381 err_unlock_rcu:
1382 rcu_read_unlock();
1383 super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
1384 carl9170_tx_status(ar, skb, false);
1385 ar->tx_dropped++;
1386 return false;
1389 void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1391 struct ar9170 *ar = hw->priv;
1392 struct ieee80211_tx_info *info;
1393 struct ieee80211_sta *sta;
1394 bool run;
1396 if (unlikely(!IS_STARTED(ar)))
1397 goto err_free;
1399 info = IEEE80211_SKB_CB(skb);
1400 sta = info->control.sta;
1402 if (unlikely(carl9170_tx_prepare(ar, skb)))
1403 goto err_free;
1405 carl9170_tx_accounting(ar, skb);
1407 * from now on, one has to use carl9170_tx_status to free
1408 * all ressouces which are associated with the frame.
1411 if (sta) {
1412 struct carl9170_sta_info *stai = (void *) sta->drv_priv;
1413 atomic_inc(&stai->pending_frames);
1416 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1417 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1418 if (run)
1419 carl9170_tx_ampdu(ar);
1421 } else {
1422 unsigned int queue = skb_get_queue_mapping(skb);
1424 skb_queue_tail(&ar->tx_pending[queue], skb);
1427 carl9170_tx(ar);
1428 return;
1430 err_free:
1431 ar->tx_dropped++;
1432 dev_kfree_skb_any(skb);
1435 void carl9170_tx_scheduler(struct ar9170 *ar)
1438 if (ar->tx_ampdu_schedule)
1439 carl9170_tx_ampdu(ar);
1441 if (ar->tx_schedule)
1442 carl9170_tx(ar);
1445 int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
1447 struct sk_buff *skb = NULL;
1448 struct carl9170_vif_info *cvif;
1449 struct ieee80211_tx_info *txinfo;
1450 struct ieee80211_tx_rate *rate;
1451 __le32 *data, *old = NULL;
1452 unsigned int plcp, power, chains;
1453 u32 word, ht1, off, addr, len;
1454 int i = 0, err = 0;
1456 rcu_read_lock();
1457 cvif = rcu_dereference(ar->beacon_iter);
1458 retry:
1459 if (ar->vifs == 0 || !cvif)
1460 goto out_unlock;
1462 list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
1463 if (cvif->active && cvif->enable_beacon)
1464 goto found;
1467 if (!ar->beacon_enabled || i++)
1468 goto out_unlock;
1470 goto retry;
1472 found:
1473 rcu_assign_pointer(ar->beacon_iter, cvif);
1475 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
1476 NULL, NULL);
1478 if (!skb) {
1479 err = -ENOMEM;
1480 goto err_free;
1483 txinfo = IEEE80211_SKB_CB(skb);
1484 spin_lock_bh(&ar->beacon_lock);
1485 data = (__le32 *)skb->data;
1486 if (cvif->beacon)
1487 old = (__le32 *)cvif->beacon->data;
1489 off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
1490 addr = ar->fw.beacon_addr + off;
1491 len = roundup(skb->len + FCS_LEN, 4);
1493 if ((off + len) > ar->fw.beacon_max_len) {
1494 if (net_ratelimit()) {
1495 wiphy_err(ar->hw->wiphy, "beacon does not "
1496 "fit into device memory!\n");
1498 err = -EINVAL;
1499 goto err_unlock;
1502 if (len > AR9170_MAC_BCN_LENGTH_MAX) {
1503 if (net_ratelimit()) {
1504 wiphy_err(ar->hw->wiphy, "no support for beacons "
1505 "bigger than %d (yours:%d).\n",
1506 AR9170_MAC_BCN_LENGTH_MAX, len);
1509 err = -EMSGSIZE;
1510 goto err_unlock;
1513 ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
1514 rate = &txinfo->control.rates[0];
1515 carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
1516 if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
1517 if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
1518 plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
1519 else
1520 plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
1521 } else {
1522 ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
1523 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
1524 plcp |= AR9170_MAC_BCN_HT2_SGI;
1526 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
1527 ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
1528 plcp |= AR9170_MAC_BCN_HT2_BW40;
1530 if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
1531 ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
1532 plcp |= AR9170_MAC_BCN_HT2_BW40;
1535 SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
1538 SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
1539 SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
1540 SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
1541 if (chains == AR9170_TX_PHY_TXCHAIN_2)
1542 ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
1544 carl9170_async_regwrite_begin(ar);
1545 carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
1546 if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
1547 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
1548 else
1549 carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
1551 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
1553 * XXX: This accesses beyond skb data for up
1554 * to the last 3 bytes!!
1557 if (old && (data[i] == old[i]))
1558 continue;
1560 word = le32_to_cpu(data[i]);
1561 carl9170_async_regwrite(addr + 4 * i, word);
1563 carl9170_async_regwrite_finish();
1565 dev_kfree_skb_any(cvif->beacon);
1566 cvif->beacon = NULL;
1568 err = carl9170_async_regwrite_result();
1569 if (!err)
1570 cvif->beacon = skb;
1571 spin_unlock_bh(&ar->beacon_lock);
1572 if (err)
1573 goto err_free;
1575 if (submit) {
1576 err = carl9170_bcn_ctrl(ar, cvif->id,
1577 CARL9170_BCN_CTRL_CAB_TRIGGER,
1578 addr, skb->len + FCS_LEN);
1580 if (err)
1581 goto err_free;
1583 out_unlock:
1584 rcu_read_unlock();
1585 return 0;
1587 err_unlock:
1588 spin_unlock_bh(&ar->beacon_lock);
1590 err_free:
1591 rcu_read_unlock();
1592 dev_kfree_skb_any(skb);
1593 return err;