proc: use seq_puts()/seq_putc() where possible
[linux-2.6/next.git] / drivers / net / wireless / ath / ath9k / virtual.c
blob2dc7095e56d189432eb80aa5f82134a892a869d0
1 /*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/slab.h>
19 #include "ath9k.h"
21 struct ath9k_vif_iter_data {
22 const u8 *hw_macaddr;
23 u8 mask[ETH_ALEN];
26 static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
28 struct ath9k_vif_iter_data *iter_data = data;
29 int i;
31 for (i = 0; i < ETH_ALEN; i++)
32 iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
35 void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
37 struct ath_wiphy *aphy = hw->priv;
38 struct ath_softc *sc = aphy->sc;
39 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
40 struct ath9k_vif_iter_data iter_data;
41 int i;
44 * Use the hardware MAC address as reference, the hardware uses it
45 * together with the BSSID mask when matching addresses.
47 iter_data.hw_macaddr = common->macaddr;
48 memset(&iter_data.mask, 0xff, ETH_ALEN);
50 if (vif)
51 ath9k_vif_iter(&iter_data, vif->addr, vif);
53 /* Get list of all active MAC addresses */
54 spin_lock_bh(&sc->wiphy_lock);
55 ieee80211_iterate_active_interfaces_atomic(sc->hw, ath9k_vif_iter,
56 &iter_data);
57 for (i = 0; i < sc->num_sec_wiphy; i++) {
58 if (sc->sec_wiphy[i] == NULL)
59 continue;
60 ieee80211_iterate_active_interfaces_atomic(
61 sc->sec_wiphy[i]->hw, ath9k_vif_iter, &iter_data);
63 spin_unlock_bh(&sc->wiphy_lock);
65 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
66 ath_hw_setbssidmask(common);
69 int ath9k_wiphy_add(struct ath_softc *sc)
71 int i, error;
72 struct ath_wiphy *aphy;
73 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
74 struct ieee80211_hw *hw;
75 u8 addr[ETH_ALEN];
77 hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
78 if (hw == NULL)
79 return -ENOMEM;
81 spin_lock_bh(&sc->wiphy_lock);
82 for (i = 0; i < sc->num_sec_wiphy; i++) {
83 if (sc->sec_wiphy[i] == NULL)
84 break;
87 if (i == sc->num_sec_wiphy) {
88 /* No empty slot available; increase array length */
89 struct ath_wiphy **n;
90 n = krealloc(sc->sec_wiphy,
91 (sc->num_sec_wiphy + 1) *
92 sizeof(struct ath_wiphy *),
93 GFP_ATOMIC);
94 if (n == NULL) {
95 spin_unlock_bh(&sc->wiphy_lock);
96 ieee80211_free_hw(hw);
97 return -ENOMEM;
99 n[i] = NULL;
100 sc->sec_wiphy = n;
101 sc->num_sec_wiphy++;
104 SET_IEEE80211_DEV(hw, sc->dev);
106 aphy = hw->priv;
107 aphy->sc = sc;
108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy;
110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
111 spin_unlock_bh(&sc->wiphy_lock);
113 memcpy(addr, common->macaddr, ETH_ALEN);
114 addr[0] |= 0x02; /* Locally managed address */
116 * XOR virtual wiphy index into the least significant bits to generate
117 * a different MAC address for each virtual wiphy.
119 addr[5] ^= i & 0xff;
120 addr[4] ^= (i & 0xff00) >> 8;
121 addr[3] ^= (i & 0xff0000) >> 16;
123 SET_IEEE80211_PERM_ADDR(hw, addr);
125 ath9k_set_hw_capab(sc, hw);
127 error = ieee80211_register_hw(hw);
129 if (error == 0) {
130 /* Make sure wiphy scheduler is started (if enabled) */
131 ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
134 return error;
137 int ath9k_wiphy_del(struct ath_wiphy *aphy)
139 struct ath_softc *sc = aphy->sc;
140 int i;
142 spin_lock_bh(&sc->wiphy_lock);
143 for (i = 0; i < sc->num_sec_wiphy; i++) {
144 if (aphy == sc->sec_wiphy[i]) {
145 sc->sec_wiphy[i] = NULL;
146 spin_unlock_bh(&sc->wiphy_lock);
147 ieee80211_unregister_hw(aphy->hw);
148 ieee80211_free_hw(aphy->hw);
149 return 0;
152 spin_unlock_bh(&sc->wiphy_lock);
153 return -ENOENT;
156 static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
157 struct ieee80211_vif *vif, const u8 *bssid,
158 int ps)
160 struct ath_softc *sc = aphy->sc;
161 struct ath_tx_control txctl;
162 struct sk_buff *skb;
163 struct ieee80211_hdr *hdr;
164 __le16 fc;
165 struct ieee80211_tx_info *info;
167 skb = dev_alloc_skb(24);
168 if (skb == NULL)
169 return -ENOMEM;
170 hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
171 memset(hdr, 0, 24);
172 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
173 IEEE80211_FCTL_TODS);
174 if (ps)
175 fc |= cpu_to_le16(IEEE80211_FCTL_PM);
176 hdr->frame_control = fc;
177 memcpy(hdr->addr1, bssid, ETH_ALEN);
178 memcpy(hdr->addr2, aphy->hw->wiphy->perm_addr, ETH_ALEN);
179 memcpy(hdr->addr3, bssid, ETH_ALEN);
181 info = IEEE80211_SKB_CB(skb);
182 memset(info, 0, sizeof(*info));
183 info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS;
184 info->control.vif = vif;
185 info->control.rates[0].idx = 0;
186 info->control.rates[0].count = 4;
187 info->control.rates[1].idx = -1;
189 memset(&txctl, 0, sizeof(struct ath_tx_control));
190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
194 goto exit;
196 return 0;
197 exit:
198 dev_kfree_skb_any(skb);
199 return -1;
202 static bool __ath9k_wiphy_pausing(struct ath_softc *sc)
204 int i;
205 if (sc->pri_wiphy->state == ATH_WIPHY_PAUSING)
206 return true;
207 for (i = 0; i < sc->num_sec_wiphy; i++) {
208 if (sc->sec_wiphy[i] &&
209 sc->sec_wiphy[i]->state == ATH_WIPHY_PAUSING)
210 return true;
212 return false;
215 static bool ath9k_wiphy_pausing(struct ath_softc *sc)
217 bool ret;
218 spin_lock_bh(&sc->wiphy_lock);
219 ret = __ath9k_wiphy_pausing(sc);
220 spin_unlock_bh(&sc->wiphy_lock);
221 return ret;
224 static bool __ath9k_wiphy_scanning(struct ath_softc *sc)
226 int i;
227 if (sc->pri_wiphy->state == ATH_WIPHY_SCAN)
228 return true;
229 for (i = 0; i < sc->num_sec_wiphy; i++) {
230 if (sc->sec_wiphy[i] &&
231 sc->sec_wiphy[i]->state == ATH_WIPHY_SCAN)
232 return true;
234 return false;
237 bool ath9k_wiphy_scanning(struct ath_softc *sc)
239 bool ret;
240 spin_lock_bh(&sc->wiphy_lock);
241 ret = __ath9k_wiphy_scanning(sc);
242 spin_unlock_bh(&sc->wiphy_lock);
243 return ret;
246 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy);
248 /* caller must hold wiphy_lock */
249 static void __ath9k_wiphy_unpause_ch(struct ath_wiphy *aphy)
251 if (aphy == NULL)
252 return;
253 if (aphy->chan_idx != aphy->sc->chan_idx)
254 return; /* wiphy not on the selected channel */
255 __ath9k_wiphy_unpause(aphy);
258 static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
260 int i;
261 spin_lock_bh(&sc->wiphy_lock);
262 __ath9k_wiphy_unpause_ch(sc->pri_wiphy);
263 for (i = 0; i < sc->num_sec_wiphy; i++)
264 __ath9k_wiphy_unpause_ch(sc->sec_wiphy[i]);
265 spin_unlock_bh(&sc->wiphy_lock);
268 void ath9k_wiphy_chan_work(struct work_struct *work)
270 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
271 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
272 struct ath_wiphy *aphy = sc->next_wiphy;
274 if (aphy == NULL)
275 return;
278 * All pending interfaces paused; ready to change
279 * channels.
282 /* Change channels */
283 mutex_lock(&sc->mutex);
284 /* XXX: remove me eventually */
285 ath9k_update_ichannel(sc, aphy->hw,
286 &sc->sc_ah->channels[sc->chan_idx]);
288 /* sync hw configuration for hw code */
289 common->hw = aphy->hw;
291 if (ath_set_channel(sc, aphy->hw,
292 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
293 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
294 "virtual wiphy\n");
295 mutex_unlock(&sc->mutex);
296 return;
298 mutex_unlock(&sc->mutex);
300 ath9k_wiphy_unpause_channel(sc);
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver.
307 void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
309 struct ath_wiphy *aphy = hw->priv;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
312 if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
313 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
314 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
315 "frame\n", wiphy_name(hw->wiphy));
317 * The AP did not reply; ignore this to allow us to
318 * continue.
321 aphy->state = ATH_WIPHY_PAUSED;
322 if (!ath9k_wiphy_pausing(aphy->sc)) {
324 * Drop from tasklet to work to allow mutex for channel
325 * change.
327 ieee80211_queue_work(aphy->sc->hw,
328 &aphy->sc->chan_work);
332 dev_kfree_skb(skb);
335 static void ath9k_mark_paused(struct ath_wiphy *aphy)
337 struct ath_softc *sc = aphy->sc;
338 aphy->state = ATH_WIPHY_PAUSED;
339 if (!__ath9k_wiphy_pausing(sc))
340 ieee80211_queue_work(sc->hw, &sc->chan_work);
343 static void ath9k_pause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
345 struct ath_wiphy *aphy = data;
346 struct ath_vif *avp = (void *) vif->drv_priv;
348 switch (vif->type) {
349 case NL80211_IFTYPE_STATION:
350 if (!vif->bss_conf.assoc) {
351 ath9k_mark_paused(aphy);
352 break;
354 /* TODO: could avoid this if already in PS mode */
355 if (ath9k_send_nullfunc(aphy, vif, avp->bssid, 1)) {
356 printk(KERN_DEBUG "%s: failed to send PS nullfunc\n",
357 __func__);
358 ath9k_mark_paused(aphy);
360 break;
361 case NL80211_IFTYPE_AP:
362 /* Beacon transmission is paused by aphy->state change */
363 ath9k_mark_paused(aphy);
364 break;
365 default:
366 break;
370 /* caller must hold wiphy_lock */
371 static int __ath9k_wiphy_pause(struct ath_wiphy *aphy)
373 ieee80211_stop_queues(aphy->hw);
374 aphy->state = ATH_WIPHY_PAUSING;
376 * TODO: handle PAUSING->PAUSED for the case where there are multiple
377 * active vifs (now we do it on the first vif getting ready; should be
378 * on the last)
380 ieee80211_iterate_active_interfaces_atomic(aphy->hw, ath9k_pause_iter,
381 aphy);
382 return 0;
385 int ath9k_wiphy_pause(struct ath_wiphy *aphy)
387 int ret;
388 spin_lock_bh(&aphy->sc->wiphy_lock);
389 ret = __ath9k_wiphy_pause(aphy);
390 spin_unlock_bh(&aphy->sc->wiphy_lock);
391 return ret;
394 static void ath9k_unpause_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
396 struct ath_wiphy *aphy = data;
397 struct ath_vif *avp = (void *) vif->drv_priv;
399 switch (vif->type) {
400 case NL80211_IFTYPE_STATION:
401 if (!vif->bss_conf.assoc)
402 break;
403 ath9k_send_nullfunc(aphy, vif, avp->bssid, 0);
404 break;
405 case NL80211_IFTYPE_AP:
406 /* Beacon transmission is re-enabled by aphy->state change */
407 break;
408 default:
409 break;
413 /* caller must hold wiphy_lock */
414 static int __ath9k_wiphy_unpause(struct ath_wiphy *aphy)
416 ieee80211_iterate_active_interfaces_atomic(aphy->hw,
417 ath9k_unpause_iter, aphy);
418 aphy->state = ATH_WIPHY_ACTIVE;
419 ieee80211_wake_queues(aphy->hw);
420 return 0;
423 int ath9k_wiphy_unpause(struct ath_wiphy *aphy)
425 int ret;
426 spin_lock_bh(&aphy->sc->wiphy_lock);
427 ret = __ath9k_wiphy_unpause(aphy);
428 spin_unlock_bh(&aphy->sc->wiphy_lock);
429 return ret;
432 static void __ath9k_wiphy_mark_all_paused(struct ath_softc *sc)
434 int i;
435 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE)
436 sc->pri_wiphy->state = ATH_WIPHY_PAUSED;
437 for (i = 0; i < sc->num_sec_wiphy; i++) {
438 if (sc->sec_wiphy[i] &&
439 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE)
440 sc->sec_wiphy[i]->state = ATH_WIPHY_PAUSED;
444 /* caller must hold wiphy_lock */
445 static void __ath9k_wiphy_pause_all(struct ath_softc *sc)
447 int i;
448 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
449 __ath9k_wiphy_pause(sc->pri_wiphy);
450 for (i = 0; i < sc->num_sec_wiphy; i++) {
451 if (sc->sec_wiphy[i] &&
452 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
453 __ath9k_wiphy_pause(sc->sec_wiphy[i]);
457 int ath9k_wiphy_select(struct ath_wiphy *aphy)
459 struct ath_softc *sc = aphy->sc;
460 bool now;
462 spin_lock_bh(&sc->wiphy_lock);
463 if (__ath9k_wiphy_scanning(sc)) {
465 * For now, we are using mac80211 sw scan and it expects to
466 * have full control over channel changes, so avoid wiphy
467 * scheduling during a scan. This could be optimized if the
468 * scanning control were moved into the driver.
470 spin_unlock_bh(&sc->wiphy_lock);
471 return -EBUSY;
473 if (__ath9k_wiphy_pausing(sc)) {
474 if (sc->wiphy_select_failures == 0)
475 sc->wiphy_select_first_fail = jiffies;
476 sc->wiphy_select_failures++;
477 if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
479 printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
480 "out; disable/enable hw to recover\n");
481 __ath9k_wiphy_mark_all_paused(sc);
483 * TODO: this workaround to fix hardware is unlikely to
484 * be specific to virtual wiphy changes. It can happen
485 * on normal channel change, too, and as such, this
486 * should really be made more generic. For example,
487 * tricker radio disable/enable on GTT interrupt burst
488 * (say, 10 GTT interrupts received without any TX
489 * frame being completed)
491 spin_unlock_bh(&sc->wiphy_lock);
492 ath_radio_disable(sc, aphy->hw);
493 ath_radio_enable(sc, aphy->hw);
494 /* Only the primary wiphy hw is used for queuing work */
495 ieee80211_queue_work(aphy->sc->hw,
496 &aphy->sc->chan_work);
497 return -EBUSY; /* previous select still in progress */
499 spin_unlock_bh(&sc->wiphy_lock);
500 return -EBUSY; /* previous select still in progress */
502 sc->wiphy_select_failures = 0;
504 /* Store the new channel */
505 sc->chan_idx = aphy->chan_idx;
506 sc->chan_is_ht = aphy->chan_is_ht;
507 sc->next_wiphy = aphy;
509 __ath9k_wiphy_pause_all(sc);
510 now = !__ath9k_wiphy_pausing(aphy->sc);
511 spin_unlock_bh(&sc->wiphy_lock);
513 if (now) {
514 /* Ready to request channel change immediately */
515 ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
519 * wiphys will be unpaused in ath9k_tx_status() once channel has been
520 * changed if any wiphy needs time to become paused.
523 return 0;
526 bool ath9k_wiphy_started(struct ath_softc *sc)
528 int i;
529 spin_lock_bh(&sc->wiphy_lock);
530 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) {
531 spin_unlock_bh(&sc->wiphy_lock);
532 return true;
534 for (i = 0; i < sc->num_sec_wiphy; i++) {
535 if (sc->sec_wiphy[i] &&
536 sc->sec_wiphy[i]->state != ATH_WIPHY_INACTIVE) {
537 spin_unlock_bh(&sc->wiphy_lock);
538 return true;
541 spin_unlock_bh(&sc->wiphy_lock);
542 return false;
545 static void ath9k_wiphy_pause_chan(struct ath_wiphy *aphy,
546 struct ath_wiphy *selected)
548 if (selected->state == ATH_WIPHY_SCAN) {
549 if (aphy == selected)
550 return;
552 * Pause all other wiphys for the duration of the scan even if
553 * they are on the current channel now.
555 } else if (aphy->chan_idx == selected->chan_idx)
556 return;
557 aphy->state = ATH_WIPHY_PAUSED;
558 ieee80211_stop_queues(aphy->hw);
561 void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
562 struct ath_wiphy *selected)
564 int i;
565 spin_lock_bh(&sc->wiphy_lock);
566 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE)
567 ath9k_wiphy_pause_chan(sc->pri_wiphy, selected);
568 for (i = 0; i < sc->num_sec_wiphy; i++) {
569 if (sc->sec_wiphy[i] &&
570 sc->sec_wiphy[i]->state == ATH_WIPHY_ACTIVE)
571 ath9k_wiphy_pause_chan(sc->sec_wiphy[i], selected);
573 spin_unlock_bh(&sc->wiphy_lock);
576 void ath9k_wiphy_work(struct work_struct *work)
578 struct ath_softc *sc = container_of(work, struct ath_softc,
579 wiphy_work.work);
580 struct ath_wiphy *aphy = NULL;
581 bool first = true;
583 spin_lock_bh(&sc->wiphy_lock);
585 if (sc->wiphy_scheduler_int == 0) {
586 /* wiphy scheduler is disabled */
587 spin_unlock_bh(&sc->wiphy_lock);
588 return;
591 try_again:
592 sc->wiphy_scheduler_index++;
593 while (sc->wiphy_scheduler_index <= sc->num_sec_wiphy) {
594 aphy = sc->sec_wiphy[sc->wiphy_scheduler_index - 1];
595 if (aphy && aphy->state != ATH_WIPHY_INACTIVE)
596 break;
598 sc->wiphy_scheduler_index++;
599 aphy = NULL;
601 if (aphy == NULL) {
602 sc->wiphy_scheduler_index = 0;
603 if (sc->pri_wiphy->state == ATH_WIPHY_INACTIVE) {
604 if (first) {
605 first = false;
606 goto try_again;
608 /* No wiphy is ready to be scheduled */
609 } else
610 aphy = sc->pri_wiphy;
613 spin_unlock_bh(&sc->wiphy_lock);
615 if (aphy &&
616 aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN &&
617 ath9k_wiphy_select(aphy)) {
618 printk(KERN_DEBUG "ath9k: Failed to schedule virtual wiphy "
619 "change\n");
622 ieee80211_queue_delayed_work(sc->hw,
623 &sc->wiphy_work,
624 sc->wiphy_scheduler_int);
627 void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
629 cancel_delayed_work_sync(&sc->wiphy_work);
630 sc->wiphy_scheduler_int = msecs_to_jiffies(msec_int);
631 if (sc->wiphy_scheduler_int)
632 ieee80211_queue_delayed_work(sc->hw, &sc->wiphy_work,
633 sc->wiphy_scheduler_int);
636 /* caller must hold wiphy_lock */
637 bool ath9k_all_wiphys_idle(struct ath_softc *sc)
639 unsigned int i;
640 if (!sc->pri_wiphy->idle)
641 return false;
642 for (i = 0; i < sc->num_sec_wiphy; i++) {
643 struct ath_wiphy *aphy = sc->sec_wiphy[i];
644 if (!aphy)
645 continue;
646 if (!aphy->idle)
647 return false;
649 return true;
652 /* caller must hold wiphy_lock */
653 void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
655 struct ath_softc *sc = aphy->sc;
657 aphy->idle = idle;
658 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
659 "Marking %s as %sidle\n",
660 wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
662 /* Only bother starting a queue on an active virtual wiphy */
663 bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
665 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
666 unsigned int i;
667 bool txq_started = false;
669 spin_lock_bh(&sc->wiphy_lock);
671 /* Start the primary wiphy */
672 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
673 ieee80211_wake_queue(hw, skb_queue);
674 txq_started = true;
675 goto unlock;
678 /* Now start the secondary wiphy queues */
679 for (i = 0; i < sc->num_sec_wiphy; i++) {
680 struct ath_wiphy *aphy = sc->sec_wiphy[i];
681 if (!aphy)
682 continue;
683 if (aphy->state != ATH_WIPHY_ACTIVE)
684 continue;
686 hw = aphy->hw;
687 ieee80211_wake_queue(hw, skb_queue);
688 txq_started = true;
689 break;
692 unlock:
693 spin_unlock_bh(&sc->wiphy_lock);
694 return txq_started;
697 /* Go ahead and propagate information to all virtual wiphys, it won't hurt */
698 void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
700 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
701 unsigned int i;
703 spin_lock_bh(&sc->wiphy_lock);
705 /* Stop the primary wiphy */
706 ieee80211_stop_queue(hw, skb_queue);
708 /* Now stop the secondary wiphy queues */
709 for (i = 0; i < sc->num_sec_wiphy; i++) {
710 struct ath_wiphy *aphy = sc->sec_wiphy[i];
711 if (!aphy)
712 continue;
713 hw = aphy->hw;
714 ieee80211_stop_queue(hw, skb_queue);
716 spin_unlock_bh(&sc->wiphy_lock);