1 /******************************************************************************
5 * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <net/mac80211.h>
35 #include "iwl-eeprom.h"
36 #include "iwl-dev.h" /* FIXME: remove */
37 #include "iwl-debug.h"
40 #include "iwl-power.h"
42 #include "iwl-helpers.h"
45 MODULE_DESCRIPTION("iwl core");
46 MODULE_VERSION(IWLWIFI_VERSION
);
47 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
48 MODULE_LICENSE("GPL");
50 #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
51 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
52 IWL_RATE_SISO_##s##M_PLCP, \
53 IWL_RATE_MIMO2_##s##M_PLCP,\
54 IWL_RATE_MIMO3_##s##M_PLCP,\
55 IWL_RATE_##r##M_IEEE, \
56 IWL_RATE_##ip##M_INDEX, \
57 IWL_RATE_##in##M_INDEX, \
58 IWL_RATE_##rp##M_INDEX, \
59 IWL_RATE_##rn##M_INDEX, \
60 IWL_RATE_##pp##M_INDEX, \
61 IWL_RATE_##np##M_INDEX }
64 EXPORT_SYMBOL(iwl_debug_level
);
66 static irqreturn_t
iwl_isr(int irq
, void *data
);
70 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
72 * If there isn't a valid next or previous rate then INV is used which
73 * maps to IWL_RATE_INVALID
76 const struct iwl_rate_info iwl_rates
[IWL_RATE_COUNT
] = {
77 IWL_DECLARE_RATE_INFO(1, INV
, INV
, 2, INV
, 2, INV
, 2), /* 1mbps */
78 IWL_DECLARE_RATE_INFO(2, INV
, 1, 5, 1, 5, 1, 5), /* 2mbps */
79 IWL_DECLARE_RATE_INFO(5, INV
, 2, 6, 2, 11, 2, 11), /*5.5mbps */
80 IWL_DECLARE_RATE_INFO(11, INV
, 9, 12, 9, 12, 5, 18), /* 11mbps */
81 IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
82 IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
83 IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
84 IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
85 IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
86 IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
87 IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
88 IWL_DECLARE_RATE_INFO(54, 54, 48, INV
, 48, INV
, 48, INV
),/* 54mbps */
89 IWL_DECLARE_RATE_INFO(60, 60, 48, INV
, 48, INV
, 48, INV
),/* 60mbps */
90 /* FIXME:RS: ^^ should be INV (legacy) */
92 EXPORT_SYMBOL(iwl_rates
);
95 * translate ucode response to mac80211 tx status control values
97 void iwl_hwrate_to_tx_control(struct iwl_priv
*priv
, u32 rate_n_flags
,
98 struct ieee80211_tx_info
*info
)
100 struct ieee80211_tx_rate
*r
= &info
->control
.rates
[0];
102 info
->antenna_sel_tx
=
103 ((rate_n_flags
& RATE_MCS_ANT_ABC_MSK
) >> RATE_MCS_ANT_POS
);
104 if (rate_n_flags
& RATE_MCS_HT_MSK
)
105 r
->flags
|= IEEE80211_TX_RC_MCS
;
106 if (rate_n_flags
& RATE_MCS_GF_MSK
)
107 r
->flags
|= IEEE80211_TX_RC_GREEN_FIELD
;
108 if (rate_n_flags
& RATE_MCS_HT40_MSK
)
109 r
->flags
|= IEEE80211_TX_RC_40_MHZ_WIDTH
;
110 if (rate_n_flags
& RATE_MCS_DUP_MSK
)
111 r
->flags
|= IEEE80211_TX_RC_DUP_DATA
;
112 if (rate_n_flags
& RATE_MCS_SGI_MSK
)
113 r
->flags
|= IEEE80211_TX_RC_SHORT_GI
;
114 r
->idx
= iwl_hwrate_to_mac80211_idx(rate_n_flags
, info
->band
);
116 EXPORT_SYMBOL(iwl_hwrate_to_tx_control
);
118 int iwl_hwrate_to_plcp_idx(u32 rate_n_flags
)
123 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
124 idx
= (rate_n_flags
& 0xff);
126 if (idx
>= IWL_RATE_MIMO3_6M_PLCP
)
127 idx
= idx
- IWL_RATE_MIMO3_6M_PLCP
;
128 else if (idx
>= IWL_RATE_MIMO2_6M_PLCP
)
129 idx
= idx
- IWL_RATE_MIMO2_6M_PLCP
;
131 idx
+= IWL_FIRST_OFDM_RATE
;
132 /* skip 9M not supported in ht*/
133 if (idx
>= IWL_RATE_9M_INDEX
)
135 if ((idx
>= IWL_FIRST_OFDM_RATE
) && (idx
<= IWL_LAST_OFDM_RATE
))
138 /* legacy rate format, search for match in table */
140 for (idx
= 0; idx
< ARRAY_SIZE(iwl_rates
); idx
++)
141 if (iwl_rates
[idx
].plcp
== (rate_n_flags
& 0xFF))
147 EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx
);
149 int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags
, enum ieee80211_band band
)
154 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
155 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
156 idx
= (rate_n_flags
& 0xff);
158 /* Legacy rate format, search for match in table */
160 if (band
== IEEE80211_BAND_5GHZ
)
161 band_offset
= IWL_FIRST_OFDM_RATE
;
162 for (idx
= band_offset
; idx
< IWL_RATE_COUNT_LEGACY
; idx
++)
163 if (iwl_rates
[idx
].plcp
== (rate_n_flags
& 0xFF))
164 return idx
- band_offset
;
170 u8
iwl_toggle_tx_ant(struct iwl_priv
*priv
, u8 ant
)
174 for (i
= 0; i
< RATE_ANT_NUM
- 1; i
++) {
175 ind
= (ind
+ 1) < RATE_ANT_NUM
? ind
+ 1 : 0;
176 if (priv
->hw_params
.valid_tx_ant
& BIT(ind
))
182 const u8 iwl_bcast_addr
[ETH_ALEN
] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
183 EXPORT_SYMBOL(iwl_bcast_addr
);
186 /* This function both allocates and initializes hw and priv. */
187 struct ieee80211_hw
*iwl_alloc_all(struct iwl_cfg
*cfg
,
188 struct ieee80211_ops
*hw_ops
)
190 struct iwl_priv
*priv
;
192 /* mac80211 allocates memory for this device instance, including
193 * space for this driver's private structure */
194 struct ieee80211_hw
*hw
=
195 ieee80211_alloc_hw(sizeof(struct iwl_priv
), hw_ops
);
197 printk(KERN_ERR
"%s: Can not allocate network device\n",
208 EXPORT_SYMBOL(iwl_alloc_all
);
210 void iwl_hw_detect(struct iwl_priv
*priv
)
212 priv
->hw_rev
= _iwl_read32(priv
, CSR_HW_REV
);
213 priv
->hw_wa_rev
= _iwl_read32(priv
, CSR_HW_REV_WA_REG
);
214 pci_read_config_byte(priv
->pci_dev
, PCI_REVISION_ID
, &priv
->rev_id
);
216 EXPORT_SYMBOL(iwl_hw_detect
);
218 int iwl_hw_nic_init(struct iwl_priv
*priv
)
221 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
225 spin_lock_irqsave(&priv
->lock
, flags
);
226 priv
->cfg
->ops
->lib
->apm_ops
.init(priv
);
227 iwl_write32(priv
, CSR_INT_COALESCING
, 512 / 32);
228 spin_unlock_irqrestore(&priv
->lock
, flags
);
230 ret
= priv
->cfg
->ops
->lib
->apm_ops
.set_pwr_src(priv
, IWL_PWR_SRC_VMAIN
);
232 priv
->cfg
->ops
->lib
->apm_ops
.config(priv
);
234 /* Allocate the RX queue, or reset if it is already allocated */
236 ret
= iwl_rx_queue_alloc(priv
);
238 IWL_ERR(priv
, "Unable to initialize Rx queue\n");
242 iwl_rx_queue_reset(priv
, rxq
);
244 iwl_rx_replenish(priv
);
246 iwl_rx_init(priv
, rxq
);
248 spin_lock_irqsave(&priv
->lock
, flags
);
250 rxq
->need_update
= 1;
251 iwl_rx_queue_update_write_ptr(priv
, rxq
);
253 spin_unlock_irqrestore(&priv
->lock
, flags
);
255 /* Allocate and init all Tx and Command queues */
256 ret
= iwl_txq_ctx_reset(priv
);
260 set_bit(STATUS_INIT
, &priv
->status
);
264 EXPORT_SYMBOL(iwl_hw_nic_init
);
269 void iwl_activate_qos(struct iwl_priv
*priv
, u8 force
)
271 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
274 priv
->qos_data
.def_qos_parm
.qos_flags
= 0;
276 if (priv
->qos_data
.qos_cap
.q_AP
.queue_request
&&
277 !priv
->qos_data
.qos_cap
.q_AP
.txop_request
)
278 priv
->qos_data
.def_qos_parm
.qos_flags
|=
279 QOS_PARAM_FLG_TXOP_TYPE_MSK
;
280 if (priv
->qos_data
.qos_active
)
281 priv
->qos_data
.def_qos_parm
.qos_flags
|=
282 QOS_PARAM_FLG_UPDATE_EDCA_MSK
;
284 if (priv
->current_ht_config
.is_ht
)
285 priv
->qos_data
.def_qos_parm
.qos_flags
|= QOS_PARAM_FLG_TGN_MSK
;
287 if (force
|| iwl_is_associated(priv
)) {
288 IWL_DEBUG_QOS(priv
, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
289 priv
->qos_data
.qos_active
,
290 priv
->qos_data
.def_qos_parm
.qos_flags
);
292 iwl_send_cmd_pdu_async(priv
, REPLY_QOS_PARAM
,
293 sizeof(struct iwl_qosparam_cmd
),
294 &priv
->qos_data
.def_qos_parm
, NULL
);
297 EXPORT_SYMBOL(iwl_activate_qos
);
300 * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
301 * (802.11b) (802.11a/g)
302 * AC_BK 15 1023 7 0 0
303 * AC_BE 15 1023 3 0 0
304 * AC_VI 7 15 2 6.016ms 3.008ms
305 * AC_VO 3 7 2 3.264ms 1.504ms
307 void iwl_reset_qos(struct iwl_priv
*priv
)
312 bool is_legacy
= false;
316 spin_lock_irqsave(&priv
->lock
, flags
);
317 /* QoS always active in AP and ADHOC mode
318 * In STA mode wait for association
320 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
||
321 priv
->iw_mode
== NL80211_IFTYPE_AP
)
322 priv
->qos_data
.qos_active
= 1;
324 priv
->qos_data
.qos_active
= 0;
326 /* check for legacy mode */
327 if ((priv
->iw_mode
== NL80211_IFTYPE_ADHOC
&&
328 (priv
->active_rate
& IWL_OFDM_RATES_MASK
) == 0) ||
329 (priv
->iw_mode
== NL80211_IFTYPE_STATION
&&
330 (priv
->staging_rxon
.flags
& RXON_FLG_SHORT_SLOT_MSK
) == 0)) {
335 if (priv
->qos_data
.qos_active
)
339 priv
->qos_data
.def_qos_parm
.ac
[0].cw_min
= cpu_to_le16(cw_min
);
340 priv
->qos_data
.def_qos_parm
.ac
[0].cw_max
= cpu_to_le16(cw_max
);
341 priv
->qos_data
.def_qos_parm
.ac
[0].aifsn
= aifs
;
342 priv
->qos_data
.def_qos_parm
.ac
[0].edca_txop
= 0;
343 priv
->qos_data
.def_qos_parm
.ac
[0].reserved1
= 0;
345 if (priv
->qos_data
.qos_active
) {
348 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_min
= cpu_to_le16(cw_min
);
349 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_max
= cpu_to_le16(cw_max
);
350 priv
->qos_data
.def_qos_parm
.ac
[i
].aifsn
= 7;
351 priv
->qos_data
.def_qos_parm
.ac
[i
].edca_txop
= 0;
352 priv
->qos_data
.def_qos_parm
.ac
[i
].reserved1
= 0;
356 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_min
=
357 cpu_to_le16((cw_min
+ 1) / 2 - 1);
358 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_max
=
360 priv
->qos_data
.def_qos_parm
.ac
[i
].aifsn
= 2;
362 priv
->qos_data
.def_qos_parm
.ac
[i
].edca_txop
=
365 priv
->qos_data
.def_qos_parm
.ac
[i
].edca_txop
=
367 priv
->qos_data
.def_qos_parm
.ac
[i
].reserved1
= 0;
371 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_min
=
372 cpu_to_le16((cw_min
+ 1) / 4 - 1);
373 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_max
=
374 cpu_to_le16((cw_min
+ 1) / 2 - 1);
375 priv
->qos_data
.def_qos_parm
.ac
[i
].aifsn
= 2;
376 priv
->qos_data
.def_qos_parm
.ac
[i
].reserved1
= 0;
378 priv
->qos_data
.def_qos_parm
.ac
[i
].edca_txop
=
381 priv
->qos_data
.def_qos_parm
.ac
[i
].edca_txop
=
384 for (i
= 1; i
< 4; i
++) {
385 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_min
=
387 priv
->qos_data
.def_qos_parm
.ac
[i
].cw_max
=
389 priv
->qos_data
.def_qos_parm
.ac
[i
].aifsn
= aifs
;
390 priv
->qos_data
.def_qos_parm
.ac
[i
].edca_txop
= 0;
391 priv
->qos_data
.def_qos_parm
.ac
[i
].reserved1
= 0;
394 IWL_DEBUG_QOS(priv
, "set QoS to default \n");
396 spin_unlock_irqrestore(&priv
->lock
, flags
);
398 EXPORT_SYMBOL(iwl_reset_qos
);
400 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
401 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
402 static void iwlcore_init_ht_hw_capab(const struct iwl_priv
*priv
,
403 struct ieee80211_sta_ht_cap
*ht_info
,
404 enum ieee80211_band band
)
406 u16 max_bit_rate
= 0;
407 u8 rx_chains_num
= priv
->hw_params
.rx_chains_num
;
408 u8 tx_chains_num
= priv
->hw_params
.tx_chains_num
;
411 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
413 ht_info
->ht_supported
= true;
415 if (priv
->cfg
->ht_greenfield_support
)
416 ht_info
->cap
|= IEEE80211_HT_CAP_GRN_FLD
;
417 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
418 ht_info
->cap
|= (IEEE80211_HT_CAP_SM_PS
&
419 (WLAN_HT_CAP_SM_PS_DISABLED
<< 2));
421 max_bit_rate
= MAX_BIT_RATE_20_MHZ
;
422 if (priv
->hw_params
.ht40_channel
& BIT(band
)) {
423 ht_info
->cap
|= IEEE80211_HT_CAP_SUP_WIDTH_20_40
;
424 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_40
;
425 ht_info
->mcs
.rx_mask
[4] = 0x01;
426 max_bit_rate
= MAX_BIT_RATE_40_MHZ
;
429 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
430 ht_info
->cap
|= IEEE80211_HT_CAP_MAX_AMSDU
;
432 ht_info
->ampdu_factor
= CFG_HT_RX_AMPDU_FACTOR_DEF
;
433 ht_info
->ampdu_density
= CFG_HT_MPDU_DENSITY_DEF
;
435 ht_info
->mcs
.rx_mask
[0] = 0xFF;
436 if (rx_chains_num
>= 2)
437 ht_info
->mcs
.rx_mask
[1] = 0xFF;
438 if (rx_chains_num
>= 3)
439 ht_info
->mcs
.rx_mask
[2] = 0xFF;
441 /* Highest supported Rx data rate */
442 max_bit_rate
*= rx_chains_num
;
443 WARN_ON(max_bit_rate
& ~IEEE80211_HT_MCS_RX_HIGHEST_MASK
);
444 ht_info
->mcs
.rx_highest
= cpu_to_le16(max_bit_rate
);
446 /* Tx MCS capabilities */
447 ht_info
->mcs
.tx_params
= IEEE80211_HT_MCS_TX_DEFINED
;
448 if (tx_chains_num
!= rx_chains_num
) {
449 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
450 ht_info
->mcs
.tx_params
|= ((tx_chains_num
- 1) <<
451 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
455 static void iwlcore_init_hw_rates(struct iwl_priv
*priv
,
456 struct ieee80211_rate
*rates
)
460 for (i
= 0; i
< IWL_RATE_COUNT_LEGACY
; i
++) {
461 rates
[i
].bitrate
= iwl_rates
[i
].ieee
* 5;
462 rates
[i
].hw_value
= i
; /* Rate scaling will work on indexes */
463 rates
[i
].hw_value_short
= i
;
465 if ((i
>= IWL_FIRST_CCK_RATE
) && (i
<= IWL_LAST_CCK_RATE
)) {
467 * If CCK != 1M then set short preamble rate flag.
470 (iwl_rates
[i
].plcp
== IWL_RATE_1M_PLCP
) ?
471 0 : IEEE80211_RATE_SHORT_PREAMBLE
;
478 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
480 int iwlcore_init_geos(struct iwl_priv
*priv
)
482 struct iwl_channel_info
*ch
;
483 struct ieee80211_supported_band
*sband
;
484 struct ieee80211_channel
*channels
;
485 struct ieee80211_channel
*geo_ch
;
486 struct ieee80211_rate
*rates
;
489 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_bitrates
||
490 priv
->bands
[IEEE80211_BAND_5GHZ
].n_bitrates
) {
491 IWL_DEBUG_INFO(priv
, "Geography modes already initialized.\n");
492 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
496 channels
= kzalloc(sizeof(struct ieee80211_channel
) *
497 priv
->channel_count
, GFP_KERNEL
);
501 rates
= kzalloc((sizeof(struct ieee80211_rate
) * IWL_RATE_COUNT_LEGACY
),
508 /* 5.2GHz channels start after the 2.4GHz channels */
509 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
510 sband
->channels
= &channels
[ARRAY_SIZE(iwl_eeprom_band_1
)];
512 sband
->bitrates
= &rates
[IWL_FIRST_OFDM_RATE
];
513 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
- IWL_FIRST_OFDM_RATE
;
515 if (priv
->cfg
->sku
& IWL_SKU_N
)
516 iwlcore_init_ht_hw_capab(priv
, &sband
->ht_cap
,
517 IEEE80211_BAND_5GHZ
);
519 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
520 sband
->channels
= channels
;
522 sband
->bitrates
= rates
;
523 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
;
525 if (priv
->cfg
->sku
& IWL_SKU_N
)
526 iwlcore_init_ht_hw_capab(priv
, &sband
->ht_cap
,
527 IEEE80211_BAND_2GHZ
);
529 priv
->ieee_channels
= channels
;
530 priv
->ieee_rates
= rates
;
532 for (i
= 0; i
< priv
->channel_count
; i
++) {
533 ch
= &priv
->channel_info
[i
];
535 /* FIXME: might be removed if scan is OK */
536 if (!is_channel_valid(ch
))
539 if (is_channel_a_band(ch
))
540 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
542 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
544 geo_ch
= &sband
->channels
[sband
->n_channels
++];
546 geo_ch
->center_freq
=
547 ieee80211_channel_to_frequency(ch
->channel
);
548 geo_ch
->max_power
= ch
->max_power_avg
;
549 geo_ch
->max_antenna_gain
= 0xff;
550 geo_ch
->hw_value
= ch
->channel
;
552 if (is_channel_valid(ch
)) {
553 if (!(ch
->flags
& EEPROM_CHANNEL_IBSS
))
554 geo_ch
->flags
|= IEEE80211_CHAN_NO_IBSS
;
556 if (!(ch
->flags
& EEPROM_CHANNEL_ACTIVE
))
557 geo_ch
->flags
|= IEEE80211_CHAN_PASSIVE_SCAN
;
559 if (ch
->flags
& EEPROM_CHANNEL_RADAR
)
560 geo_ch
->flags
|= IEEE80211_CHAN_RADAR
;
562 geo_ch
->flags
|= ch
->ht40_extension_channel
;
564 if (ch
->max_power_avg
> priv
->tx_power_device_lmt
)
565 priv
->tx_power_device_lmt
= ch
->max_power_avg
;
567 geo_ch
->flags
|= IEEE80211_CHAN_DISABLED
;
570 IWL_DEBUG_INFO(priv
, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
571 ch
->channel
, geo_ch
->center_freq
,
572 is_channel_a_band(ch
) ? "5.2" : "2.4",
573 geo_ch
->flags
& IEEE80211_CHAN_DISABLED
?
574 "restricted" : "valid",
578 if ((priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
== 0) &&
579 priv
->cfg
->sku
& IWL_SKU_A
) {
580 IWL_INFO(priv
, "Incorrectly detected BG card as ABG. "
581 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
582 priv
->pci_dev
->device
,
583 priv
->pci_dev
->subsystem_device
);
584 priv
->cfg
->sku
&= ~IWL_SKU_A
;
587 IWL_INFO(priv
, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
588 priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
,
589 priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
);
591 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
595 EXPORT_SYMBOL(iwlcore_init_geos
);
598 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
600 void iwlcore_free_geos(struct iwl_priv
*priv
)
602 kfree(priv
->ieee_channels
);
603 kfree(priv
->ieee_rates
);
604 clear_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
606 EXPORT_SYMBOL(iwlcore_free_geos
);
609 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
612 void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info
*info
,
615 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
) {
616 *tx_flags
|= TX_CMD_FLG_RTS_MSK
;
617 *tx_flags
&= ~TX_CMD_FLG_CTS_MSK
;
618 } else if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_CTS_PROTECT
) {
619 *tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
620 *tx_flags
|= TX_CMD_FLG_CTS_MSK
;
623 EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag
);
625 static bool is_single_rx_stream(struct iwl_priv
*priv
)
627 return !priv
->current_ht_config
.is_ht
||
628 priv
->current_ht_config
.single_chain_sufficient
;
631 static u8
iwl_is_channel_extension(struct iwl_priv
*priv
,
632 enum ieee80211_band band
,
633 u16 channel
, u8 extension_chan_offset
)
635 const struct iwl_channel_info
*ch_info
;
637 ch_info
= iwl_get_channel_info(priv
, band
, channel
);
638 if (!is_channel_valid(ch_info
))
641 if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_ABOVE
)
642 return !(ch_info
->ht40_extension_channel
&
643 IEEE80211_CHAN_NO_HT40PLUS
);
644 else if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_BELOW
)
645 return !(ch_info
->ht40_extension_channel
&
646 IEEE80211_CHAN_NO_HT40MINUS
);
651 u8
iwl_is_ht40_tx_allowed(struct iwl_priv
*priv
,
652 struct ieee80211_sta_ht_cap
*sta_ht_inf
)
654 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
656 if (!ht_conf
->is_ht
|| !ht_conf
->is_40mhz
)
659 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
660 * the bit will not set if it is pure 40MHz case
663 if (!sta_ht_inf
->ht_supported
)
666 #ifdef CONFIG_IWLWIFI_DEBUG
667 if (priv
->disable_ht40
)
670 return iwl_is_channel_extension(priv
, priv
->band
,
671 le16_to_cpu(priv
->staging_rxon
.channel
),
672 ht_conf
->extension_chan_offset
);
674 EXPORT_SYMBOL(iwl_is_ht40_tx_allowed
);
676 static u16
iwl_adjust_beacon_interval(u16 beacon_val
, u16 max_beacon_val
)
679 u16 beacon_factor
= 0;
681 beacon_factor
= (beacon_val
+ max_beacon_val
) / max_beacon_val
;
682 new_val
= beacon_val
/ beacon_factor
;
685 new_val
= max_beacon_val
;
690 void iwl_setup_rxon_timing(struct iwl_priv
*priv
)
693 s32 interval_tm
, rem
;
695 struct ieee80211_conf
*conf
= NULL
;
698 conf
= ieee80211_get_hw_conf(priv
->hw
);
700 spin_lock_irqsave(&priv
->lock
, flags
);
701 priv
->rxon_timing
.timestamp
= cpu_to_le64(priv
->timestamp
);
702 priv
->rxon_timing
.listen_interval
= cpu_to_le16(conf
->listen_interval
);
704 if (priv
->iw_mode
== NL80211_IFTYPE_STATION
) {
705 beacon_int
= priv
->beacon_int
;
706 priv
->rxon_timing
.atim_window
= 0;
708 beacon_int
= priv
->vif
->bss_conf
.beacon_int
;
710 /* TODO: we need to get atim_window from upper stack
711 * for now we set to 0 */
712 priv
->rxon_timing
.atim_window
= 0;
715 beacon_int
= iwl_adjust_beacon_interval(beacon_int
,
716 priv
->hw_params
.max_beacon_itrvl
* 1024);
717 priv
->rxon_timing
.beacon_interval
= cpu_to_le16(beacon_int
);
719 tsf
= priv
->timestamp
; /* tsf is modifed by do_div: copy it */
720 interval_tm
= beacon_int
* 1024;
721 rem
= do_div(tsf
, interval_tm
);
722 priv
->rxon_timing
.beacon_init_val
= cpu_to_le32(interval_tm
- rem
);
724 spin_unlock_irqrestore(&priv
->lock
, flags
);
725 IWL_DEBUG_ASSOC(priv
,
726 "beacon interval %d beacon timer %d beacon tim %d\n",
727 le16_to_cpu(priv
->rxon_timing
.beacon_interval
),
728 le32_to_cpu(priv
->rxon_timing
.beacon_init_val
),
729 le16_to_cpu(priv
->rxon_timing
.atim_window
));
731 EXPORT_SYMBOL(iwl_setup_rxon_timing
);
733 void iwl_set_rxon_hwcrypto(struct iwl_priv
*priv
, int hw_decrypt
)
735 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
738 rxon
->filter_flags
&= ~RXON_FILTER_DIS_DECRYPT_MSK
;
740 rxon
->filter_flags
|= RXON_FILTER_DIS_DECRYPT_MSK
;
743 EXPORT_SYMBOL(iwl_set_rxon_hwcrypto
);
746 * iwl_check_rxon_cmd - validate RXON structure is valid
748 * NOTE: This is really only useful during development and can eventually
749 * be #ifdef'd out once the driver is stable and folks aren't actively
752 int iwl_check_rxon_cmd(struct iwl_priv
*priv
)
756 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
758 if (rxon
->flags
& RXON_FLG_BAND_24G_MSK
) {
759 error
|= le32_to_cpu(rxon
->flags
&
760 (RXON_FLG_TGJ_NARROW_BAND_MSK
|
761 RXON_FLG_RADAR_DETECT_MSK
));
763 IWL_WARN(priv
, "check 24G fields %d | %d\n",
766 error
|= (rxon
->flags
& RXON_FLG_SHORT_SLOT_MSK
) ?
767 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK
);
769 IWL_WARN(priv
, "check 52 fields %d | %d\n",
771 error
|= le32_to_cpu(rxon
->flags
& RXON_FLG_CCK_MSK
);
773 IWL_WARN(priv
, "check 52 CCK %d | %d\n",
776 error
|= (rxon
->node_addr
[0] | rxon
->bssid_addr
[0]) & 0x1;
778 IWL_WARN(priv
, "check mac addr %d | %d\n", counter
++, error
);
780 /* make sure basic rates 6Mbps and 1Mbps are supported */
781 error
|= (((rxon
->ofdm_basic_rates
& IWL_RATE_6M_MASK
) == 0) &&
782 ((rxon
->cck_basic_rates
& IWL_RATE_1M_MASK
) == 0));
784 IWL_WARN(priv
, "check basic rate %d | %d\n", counter
++, error
);
786 error
|= (le16_to_cpu(rxon
->assoc_id
) > 2007);
788 IWL_WARN(priv
, "check assoc id %d | %d\n", counter
++, error
);
790 error
|= ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
))
791 == (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
));
793 IWL_WARN(priv
, "check CCK and short slot %d | %d\n",
796 error
|= ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
))
797 == (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
));
799 IWL_WARN(priv
, "check CCK & auto detect %d | %d\n",
802 error
|= ((rxon
->flags
& (RXON_FLG_AUTO_DETECT_MSK
|
803 RXON_FLG_TGG_PROTECT_MSK
)) == RXON_FLG_TGG_PROTECT_MSK
);
805 IWL_WARN(priv
, "check TGG and auto detect %d | %d\n",
809 IWL_WARN(priv
, "Tuning to channel %d\n",
810 le16_to_cpu(rxon
->channel
));
813 IWL_ERR(priv
, "Not a valid iwl_rxon_assoc_cmd field values\n");
818 EXPORT_SYMBOL(iwl_check_rxon_cmd
);
821 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
822 * @priv: staging_rxon is compared to active_rxon
824 * If the RXON structure is changing enough to require a new tune,
825 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
826 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
828 int iwl_full_rxon_required(struct iwl_priv
*priv
)
831 /* These items are only settable from the full RXON command */
832 if (!(iwl_is_associated(priv
)) ||
833 compare_ether_addr(priv
->staging_rxon
.bssid_addr
,
834 priv
->active_rxon
.bssid_addr
) ||
835 compare_ether_addr(priv
->staging_rxon
.node_addr
,
836 priv
->active_rxon
.node_addr
) ||
837 compare_ether_addr(priv
->staging_rxon
.wlap_bssid_addr
,
838 priv
->active_rxon
.wlap_bssid_addr
) ||
839 (priv
->staging_rxon
.dev_type
!= priv
->active_rxon
.dev_type
) ||
840 (priv
->staging_rxon
.channel
!= priv
->active_rxon
.channel
) ||
841 (priv
->staging_rxon
.air_propagation
!=
842 priv
->active_rxon
.air_propagation
) ||
843 (priv
->staging_rxon
.ofdm_ht_single_stream_basic_rates
!=
844 priv
->active_rxon
.ofdm_ht_single_stream_basic_rates
) ||
845 (priv
->staging_rxon
.ofdm_ht_dual_stream_basic_rates
!=
846 priv
->active_rxon
.ofdm_ht_dual_stream_basic_rates
) ||
847 (priv
->staging_rxon
.ofdm_ht_triple_stream_basic_rates
!=
848 priv
->active_rxon
.ofdm_ht_triple_stream_basic_rates
) ||
849 (priv
->staging_rxon
.assoc_id
!= priv
->active_rxon
.assoc_id
))
852 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
853 * be updated with the RXON_ASSOC command -- however only some
854 * flag transitions are allowed using RXON_ASSOC */
856 /* Check if we are not switching bands */
857 if ((priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
) !=
858 (priv
->active_rxon
.flags
& RXON_FLG_BAND_24G_MSK
))
861 /* Check if we are switching association toggle */
862 if ((priv
->staging_rxon
.filter_flags
& RXON_FILTER_ASSOC_MSK
) !=
863 (priv
->active_rxon
.filter_flags
& RXON_FILTER_ASSOC_MSK
))
868 EXPORT_SYMBOL(iwl_full_rxon_required
);
870 u8
iwl_rate_get_lowest_plcp(struct iwl_priv
*priv
)
876 if (priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
)
877 rate_mask
= priv
->active_rate_basic
& IWL_CCK_RATES_MASK
;
879 rate_mask
= priv
->active_rate_basic
& IWL_OFDM_RATES_MASK
;
881 /* Find lowest valid rate */
882 for (i
= IWL_RATE_1M_INDEX
; i
!= IWL_RATE_INVALID
;
883 i
= iwl_rates
[i
].next_ieee
) {
884 if (rate_mask
& (1 << i
))
885 return iwl_rates
[i
].plcp
;
888 /* No valid rate was found. Assign the lowest one */
889 if (priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
)
890 return IWL_RATE_1M_PLCP
;
892 return IWL_RATE_6M_PLCP
;
894 EXPORT_SYMBOL(iwl_rate_get_lowest_plcp
);
896 void iwl_set_rxon_ht(struct iwl_priv
*priv
, struct iwl_ht_config
*ht_conf
)
898 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
900 if (!ht_conf
->is_ht
) {
901 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
902 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
|
903 RXON_FLG_HT40_PROT_MSK
|
904 RXON_FLG_HT_PROT_MSK
);
908 /* FIXME: if the definition of ht_protection changed, the "translation"
909 * will be needed for rxon->flags
911 rxon
->flags
|= cpu_to_le32(ht_conf
->ht_protection
<< RXON_FLG_HT_OPERATING_MODE_POS
);
913 /* Set up channel bandwidth:
914 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
915 /* clear the HT channel mode before set the mode */
916 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
917 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
918 if (iwl_is_ht40_tx_allowed(priv
, NULL
)) {
920 if (ht_conf
->ht_protection
== IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
) {
921 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_PURE_40
;
922 /* Note: control channel is opposite of extension channel */
923 switch (ht_conf
->extension_chan_offset
) {
924 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
925 rxon
->flags
&= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
927 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
928 rxon
->flags
|= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
932 /* Note: control channel is opposite of extension channel */
933 switch (ht_conf
->extension_chan_offset
) {
934 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
935 rxon
->flags
&= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
936 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
938 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
939 rxon
->flags
|= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
940 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
942 case IEEE80211_HT_PARAM_CHA_SEC_NONE
:
944 /* channel location only valid if in Mixed mode */
945 IWL_ERR(priv
, "invalid extension channel offset\n");
950 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_LEGACY
;
953 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
954 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
956 IWL_DEBUG_ASSOC(priv
, "rxon flags 0x%X operation mode :0x%X "
957 "extension channel offset 0x%x\n",
958 le32_to_cpu(rxon
->flags
), ht_conf
->ht_protection
,
959 ht_conf
->extension_chan_offset
);
962 EXPORT_SYMBOL(iwl_set_rxon_ht
);
964 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
965 #define IWL_NUM_RX_CHAINS_SINGLE 2
966 #define IWL_NUM_IDLE_CHAINS_DUAL 2
967 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
970 * Determine how many receiver/antenna chains to use.
972 * More provides better reception via diversity. Fewer saves power
973 * at the expense of throughput, but only when not in powersave to
976 * MIMO (dual stream) requires at least 2, but works better with 3.
977 * This does not determine *which* chains to use, just how many.
979 static int iwl_get_active_rx_chain_count(struct iwl_priv
*priv
)
981 /* # of Rx chains to use when expecting MIMO. */
982 if (is_single_rx_stream(priv
))
983 return IWL_NUM_RX_CHAINS_SINGLE
;
985 return IWL_NUM_RX_CHAINS_MULTIPLE
;
989 * When we are in power saving, there's no difference between
990 * using multiple chains or just a single chain, but due to the
991 * lack of SM PS we lose a lot of throughput if we use just a
994 * Therefore, use the active count here (which will use multiple
995 * chains unless connected to a legacy AP).
997 static int iwl_get_idle_rx_chain_count(struct iwl_priv
*priv
, int active_cnt
)
1002 /* up to 4 chains */
1003 static u8
iwl_count_chain_bitmap(u32 chain_bitmap
)
1006 res
= (chain_bitmap
& BIT(0)) >> 0;
1007 res
+= (chain_bitmap
& BIT(1)) >> 1;
1008 res
+= (chain_bitmap
& BIT(2)) >> 2;
1009 res
+= (chain_bitmap
& BIT(3)) >> 3;
1014 * iwl_is_monitor_mode - Determine if interface in monitor mode
1016 * priv->iw_mode is set in add_interface, but add_interface is
1017 * never called for monitor mode. The only way mac80211 informs us about
1018 * monitor mode is through configuring filters (call to configure_filter).
1020 bool iwl_is_monitor_mode(struct iwl_priv
*priv
)
1022 return !!(priv
->staging_rxon
.filter_flags
& RXON_FILTER_PROMISC_MSK
);
1024 EXPORT_SYMBOL(iwl_is_monitor_mode
);
1027 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1029 * Selects how many and which Rx receivers/antennas/chains to use.
1030 * This should not be used for scan command ... it puts data in wrong place.
1032 void iwl_set_rxon_chain(struct iwl_priv
*priv
)
1034 bool is_single
= is_single_rx_stream(priv
);
1035 bool is_cam
= !test_bit(STATUS_POWER_PMI
, &priv
->status
);
1036 u8 idle_rx_cnt
, active_rx_cnt
, valid_rx_cnt
;
1040 /* Tell uCode which antennas are actually connected.
1041 * Before first association, we assume all antennas are connected.
1042 * Just after first association, iwl_chain_noise_calibration()
1043 * checks which antennas actually *are* connected. */
1044 if (priv
->chain_noise_data
.active_chains
)
1045 active_chains
= priv
->chain_noise_data
.active_chains
;
1047 active_chains
= priv
->hw_params
.valid_rx_ant
;
1049 rx_chain
= active_chains
<< RXON_RX_CHAIN_VALID_POS
;
1051 /* How many receivers should we use? */
1052 active_rx_cnt
= iwl_get_active_rx_chain_count(priv
);
1053 idle_rx_cnt
= iwl_get_idle_rx_chain_count(priv
, active_rx_cnt
);
1056 /* correct rx chain count according hw settings
1057 * and chain noise calibration
1059 valid_rx_cnt
= iwl_count_chain_bitmap(active_chains
);
1060 if (valid_rx_cnt
< active_rx_cnt
)
1061 active_rx_cnt
= valid_rx_cnt
;
1063 if (valid_rx_cnt
< idle_rx_cnt
)
1064 idle_rx_cnt
= valid_rx_cnt
;
1066 rx_chain
|= active_rx_cnt
<< RXON_RX_CHAIN_MIMO_CNT_POS
;
1067 rx_chain
|= idle_rx_cnt
<< RXON_RX_CHAIN_CNT_POS
;
1069 /* copied from 'iwl_bg_request_scan()' */
1070 /* Force use of chains B and C (0x6) for Rx for 4965
1071 * Avoid A (0x1) because of its off-channel reception on A-band.
1072 * MIMO is not used here, but value is required */
1073 if (iwl_is_monitor_mode(priv
) &&
1074 !(priv
->staging_rxon
.flags
& RXON_FLG_BAND_24G_MSK
) &&
1075 ((priv
->hw_rev
& CSR_HW_REV_TYPE_MSK
) == CSR_HW_REV_TYPE_4965
)) {
1076 rx_chain
= ANT_ABC
<< RXON_RX_CHAIN_VALID_POS
;
1077 rx_chain
|= ANT_BC
<< RXON_RX_CHAIN_FORCE_SEL_POS
;
1078 rx_chain
|= ANT_ABC
<< RXON_RX_CHAIN_FORCE_MIMO_SEL_POS
;
1079 rx_chain
|= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS
;
1082 priv
->staging_rxon
.rx_chain
= cpu_to_le16(rx_chain
);
1084 if (!is_single
&& (active_rx_cnt
>= IWL_NUM_RX_CHAINS_SINGLE
) && is_cam
)
1085 priv
->staging_rxon
.rx_chain
|= RXON_RX_CHAIN_MIMO_FORCE_MSK
;
1087 priv
->staging_rxon
.rx_chain
&= ~RXON_RX_CHAIN_MIMO_FORCE_MSK
;
1089 IWL_DEBUG_ASSOC(priv
, "rx_chain=0x%X active=%d idle=%d\n",
1090 priv
->staging_rxon
.rx_chain
,
1091 active_rx_cnt
, idle_rx_cnt
);
1093 WARN_ON(active_rx_cnt
== 0 || idle_rx_cnt
== 0 ||
1094 active_rx_cnt
< idle_rx_cnt
);
1096 EXPORT_SYMBOL(iwl_set_rxon_chain
);
1099 * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON
1100 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
1101 * @channel: Any channel valid for the requested phymode
1103 * In addition to setting the staging RXON, priv->phymode is also set.
1105 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
1106 * in the staging RXON flag structure based on the phymode
1108 int iwl_set_rxon_channel(struct iwl_priv
*priv
, struct ieee80211_channel
*ch
)
1110 enum ieee80211_band band
= ch
->band
;
1111 u16 channel
= ieee80211_frequency_to_channel(ch
->center_freq
);
1113 if (!iwl_get_channel_info(priv
, band
, channel
)) {
1114 IWL_DEBUG_INFO(priv
, "Could not set channel to %d [%d]\n",
1119 if ((le16_to_cpu(priv
->staging_rxon
.channel
) == channel
) &&
1120 (priv
->band
== band
))
1123 priv
->staging_rxon
.channel
= cpu_to_le16(channel
);
1124 if (band
== IEEE80211_BAND_5GHZ
)
1125 priv
->staging_rxon
.flags
&= ~RXON_FLG_BAND_24G_MSK
;
1127 priv
->staging_rxon
.flags
|= RXON_FLG_BAND_24G_MSK
;
1131 IWL_DEBUG_INFO(priv
, "Staging channel set to %d [%d]\n", channel
, band
);
1135 EXPORT_SYMBOL(iwl_set_rxon_channel
);
1137 void iwl_set_flags_for_band(struct iwl_priv
*priv
,
1138 enum ieee80211_band band
)
1140 if (band
== IEEE80211_BAND_5GHZ
) {
1141 priv
->staging_rxon
.flags
&=
1142 ~(RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
1143 | RXON_FLG_CCK_MSK
);
1144 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
1146 /* Copied from iwl_post_associate() */
1147 if (priv
->assoc_capability
& WLAN_CAPABILITY_SHORT_SLOT_TIME
)
1148 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
1150 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
1152 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
)
1153 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
1155 priv
->staging_rxon
.flags
|= RXON_FLG_BAND_24G_MSK
;
1156 priv
->staging_rxon
.flags
|= RXON_FLG_AUTO_DETECT_MSK
;
1157 priv
->staging_rxon
.flags
&= ~RXON_FLG_CCK_MSK
;
1162 * initialize rxon structure with default values from eeprom
1164 void iwl_connection_init_rx_config(struct iwl_priv
*priv
, int mode
)
1166 const struct iwl_channel_info
*ch_info
;
1168 memset(&priv
->staging_rxon
, 0, sizeof(priv
->staging_rxon
));
1171 case NL80211_IFTYPE_AP
:
1172 priv
->staging_rxon
.dev_type
= RXON_DEV_TYPE_AP
;
1175 case NL80211_IFTYPE_STATION
:
1176 priv
->staging_rxon
.dev_type
= RXON_DEV_TYPE_ESS
;
1177 priv
->staging_rxon
.filter_flags
= RXON_FILTER_ACCEPT_GRP_MSK
;
1180 case NL80211_IFTYPE_ADHOC
:
1181 priv
->staging_rxon
.dev_type
= RXON_DEV_TYPE_IBSS
;
1182 priv
->staging_rxon
.flags
= RXON_FLG_SHORT_PREAMBLE_MSK
;
1183 priv
->staging_rxon
.filter_flags
= RXON_FILTER_BCON_AWARE_MSK
|
1184 RXON_FILTER_ACCEPT_GRP_MSK
;
1188 IWL_ERR(priv
, "Unsupported interface type %d\n", mode
);
1193 /* TODO: Figure out when short_preamble would be set and cache from
1195 if (!hw_to_local(priv
->hw
)->short_preamble
)
1196 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
1198 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
1201 ch_info
= iwl_get_channel_info(priv
, priv
->band
,
1202 le16_to_cpu(priv
->active_rxon
.channel
));
1205 ch_info
= &priv
->channel_info
[0];
1208 * in some case A channels are all non IBSS
1209 * in this case force B/G channel
1211 if ((priv
->iw_mode
== NL80211_IFTYPE_ADHOC
) &&
1212 !(is_channel_ibss(ch_info
)))
1213 ch_info
= &priv
->channel_info
[0];
1215 priv
->staging_rxon
.channel
= cpu_to_le16(ch_info
->channel
);
1216 priv
->band
= ch_info
->band
;
1218 iwl_set_flags_for_band(priv
, priv
->band
);
1220 priv
->staging_rxon
.ofdm_basic_rates
=
1221 (IWL_OFDM_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
1222 priv
->staging_rxon
.cck_basic_rates
=
1223 (IWL_CCK_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
1225 /* clear both MIX and PURE40 mode flag */
1226 priv
->staging_rxon
.flags
&= ~(RXON_FLG_CHANNEL_MODE_MIXED
|
1227 RXON_FLG_CHANNEL_MODE_PURE_40
);
1228 memcpy(priv
->staging_rxon
.node_addr
, priv
->mac_addr
, ETH_ALEN
);
1229 memcpy(priv
->staging_rxon
.wlap_bssid_addr
, priv
->mac_addr
, ETH_ALEN
);
1230 priv
->staging_rxon
.ofdm_ht_single_stream_basic_rates
= 0xff;
1231 priv
->staging_rxon
.ofdm_ht_dual_stream_basic_rates
= 0xff;
1232 priv
->staging_rxon
.ofdm_ht_triple_stream_basic_rates
= 0xff;
1234 EXPORT_SYMBOL(iwl_connection_init_rx_config
);
1236 static void iwl_set_rate(struct iwl_priv
*priv
)
1238 const struct ieee80211_supported_band
*hw
= NULL
;
1239 struct ieee80211_rate
*rate
;
1242 hw
= iwl_get_hw_mode(priv
, priv
->band
);
1244 IWL_ERR(priv
, "Failed to set rate: unable to get hw mode\n");
1248 priv
->active_rate
= 0;
1249 priv
->active_rate_basic
= 0;
1251 for (i
= 0; i
< hw
->n_bitrates
; i
++) {
1252 rate
= &(hw
->bitrates
[i
]);
1253 if (rate
->hw_value
< IWL_RATE_COUNT_LEGACY
)
1254 priv
->active_rate
|= (1 << rate
->hw_value
);
1257 IWL_DEBUG_RATE(priv
, "Set active_rate = %0x, active_rate_basic = %0x\n",
1258 priv
->active_rate
, priv
->active_rate_basic
);
1261 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK)
1262 * otherwise set it to the default of all CCK rates and 6, 12, 24 for
1265 if (priv
->active_rate_basic
& IWL_CCK_BASIC_RATES_MASK
)
1266 priv
->staging_rxon
.cck_basic_rates
=
1267 ((priv
->active_rate_basic
&
1268 IWL_CCK_RATES_MASK
) >> IWL_FIRST_CCK_RATE
) & 0xF;
1270 priv
->staging_rxon
.cck_basic_rates
=
1271 (IWL_CCK_BASIC_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
1273 if (priv
->active_rate_basic
& IWL_OFDM_BASIC_RATES_MASK
)
1274 priv
->staging_rxon
.ofdm_basic_rates
=
1275 ((priv
->active_rate_basic
&
1276 (IWL_OFDM_BASIC_RATES_MASK
| IWL_RATE_6M_MASK
)) >>
1277 IWL_FIRST_OFDM_RATE
) & 0xFF;
1279 priv
->staging_rxon
.ofdm_basic_rates
=
1280 (IWL_OFDM_BASIC_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
1283 void iwl_rx_csa(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
1285 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1286 struct iwl_rxon_cmd
*rxon
= (void *)&priv
->active_rxon
;
1287 struct iwl_csa_notification
*csa
= &(pkt
->u
.csa_notif
);
1288 IWL_DEBUG_11H(priv
, "CSA notif: channel %d, status %d\n",
1289 le16_to_cpu(csa
->channel
), le32_to_cpu(csa
->status
));
1290 rxon
->channel
= csa
->channel
;
1291 priv
->staging_rxon
.channel
= csa
->channel
;
1293 EXPORT_SYMBOL(iwl_rx_csa
);
1295 #ifdef CONFIG_IWLWIFI_DEBUG
1296 static void iwl_print_rx_config_cmd(struct iwl_priv
*priv
)
1298 struct iwl_rxon_cmd
*rxon
= &priv
->staging_rxon
;
1300 IWL_DEBUG_RADIO(priv
, "RX CONFIG:\n");
1301 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, (u8
*) rxon
, sizeof(*rxon
));
1302 IWL_DEBUG_RADIO(priv
, "u16 channel: 0x%x\n", le16_to_cpu(rxon
->channel
));
1303 IWL_DEBUG_RADIO(priv
, "u32 flags: 0x%08X\n", le32_to_cpu(rxon
->flags
));
1304 IWL_DEBUG_RADIO(priv
, "u32 filter_flags: 0x%08x\n",
1305 le32_to_cpu(rxon
->filter_flags
));
1306 IWL_DEBUG_RADIO(priv
, "u8 dev_type: 0x%x\n", rxon
->dev_type
);
1307 IWL_DEBUG_RADIO(priv
, "u8 ofdm_basic_rates: 0x%02x\n",
1308 rxon
->ofdm_basic_rates
);
1309 IWL_DEBUG_RADIO(priv
, "u8 cck_basic_rates: 0x%02x\n", rxon
->cck_basic_rates
);
1310 IWL_DEBUG_RADIO(priv
, "u8[6] node_addr: %pM\n", rxon
->node_addr
);
1311 IWL_DEBUG_RADIO(priv
, "u8[6] bssid_addr: %pM\n", rxon
->bssid_addr
);
1312 IWL_DEBUG_RADIO(priv
, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon
->assoc_id
));
1316 * iwl_irq_handle_error - called for HW or SW error interrupt from card
1318 void iwl_irq_handle_error(struct iwl_priv
*priv
)
1320 /* Set the FW error flag -- cleared on iwl_down */
1321 set_bit(STATUS_FW_ERROR
, &priv
->status
);
1323 /* Cancel currently queued command. */
1324 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
1326 #ifdef CONFIG_IWLWIFI_DEBUG
1327 if (iwl_get_debug_level(priv
) & IWL_DL_FW_ERRORS
) {
1328 priv
->cfg
->ops
->lib
->dump_nic_error_log(priv
);
1329 priv
->cfg
->ops
->lib
->dump_nic_event_log(priv
);
1330 iwl_print_rx_config_cmd(priv
);
1334 wake_up_interruptible(&priv
->wait_command_queue
);
1336 /* Keep the restart process from trying to send host
1337 * commands by clearing the INIT status bit */
1338 clear_bit(STATUS_READY
, &priv
->status
);
1340 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
1341 IWL_DEBUG(priv
, IWL_DL_FW_ERRORS
,
1342 "Restarting adapter due to uCode error.\n");
1344 if (priv
->cfg
->mod_params
->restart_fw
)
1345 queue_work(priv
->workqueue
, &priv
->restart
);
1348 EXPORT_SYMBOL(iwl_irq_handle_error
);
1350 int iwl_apm_stop_master(struct iwl_priv
*priv
)
1352 unsigned long flags
;
1354 spin_lock_irqsave(&priv
->lock
, flags
);
1356 /* set stop master bit */
1357 iwl_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
1359 iwl_poll_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_MASTER_DISABLED
,
1360 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
1362 spin_unlock_irqrestore(&priv
->lock
, flags
);
1363 IWL_DEBUG_INFO(priv
, "stop master\n");
1367 EXPORT_SYMBOL(iwl_apm_stop_master
);
1369 void iwl_apm_stop(struct iwl_priv
*priv
)
1371 unsigned long flags
;
1373 iwl_apm_stop_master(priv
);
1375 spin_lock_irqsave(&priv
->lock
, flags
);
1377 iwl_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1380 /* clear "init complete" move adapter D0A* --> D0U state */
1381 iwl_clear_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1382 spin_unlock_irqrestore(&priv
->lock
, flags
);
1384 EXPORT_SYMBOL(iwl_apm_stop
);
1386 void iwl_configure_filter(struct ieee80211_hw
*hw
,
1387 unsigned int changed_flags
,
1388 unsigned int *total_flags
,
1391 struct iwl_priv
*priv
= hw
->priv
;
1392 __le32
*filter_flags
= &priv
->staging_rxon
.filter_flags
;
1394 IWL_DEBUG_MAC80211(priv
, "Enter: changed: 0x%x, total: 0x%x\n",
1395 changed_flags
, *total_flags
);
1397 if (changed_flags
& (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
)) {
1398 if (*total_flags
& (FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
))
1399 *filter_flags
|= RXON_FILTER_PROMISC_MSK
;
1401 *filter_flags
&= ~RXON_FILTER_PROMISC_MSK
;
1403 if (changed_flags
& FIF_ALLMULTI
) {
1404 if (*total_flags
& FIF_ALLMULTI
)
1405 *filter_flags
|= RXON_FILTER_ACCEPT_GRP_MSK
;
1407 *filter_flags
&= ~RXON_FILTER_ACCEPT_GRP_MSK
;
1409 if (changed_flags
& FIF_CONTROL
) {
1410 if (*total_flags
& FIF_CONTROL
)
1411 *filter_flags
|= RXON_FILTER_CTL2HOST_MSK
;
1413 *filter_flags
&= ~RXON_FILTER_CTL2HOST_MSK
;
1415 if (changed_flags
& FIF_BCN_PRBRESP_PROMISC
) {
1416 if (*total_flags
& FIF_BCN_PRBRESP_PROMISC
)
1417 *filter_flags
|= RXON_FILTER_BCON_AWARE_MSK
;
1419 *filter_flags
&= ~RXON_FILTER_BCON_AWARE_MSK
;
1422 /* We avoid iwl_commit_rxon here to commit the new filter flags
1423 * since mac80211 will call ieee80211_hw_config immediately.
1424 * (mc_list is not supported at this time). Otherwise, we need to
1425 * queue a background iwl_commit_rxon work.
1428 *total_flags
&= FIF_OTHER_BSS
| FIF_ALLMULTI
| FIF_PROMISC_IN_BSS
|
1429 FIF_BCN_PRBRESP_PROMISC
| FIF_CONTROL
;
1431 EXPORT_SYMBOL(iwl_configure_filter
);
1433 int iwl_setup_mac(struct iwl_priv
*priv
)
1436 struct ieee80211_hw
*hw
= priv
->hw
;
1437 hw
->rate_control_algorithm
= "iwl-agn-rs";
1439 /* Tell mac80211 our characteristics */
1440 hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
1441 IEEE80211_HW_NOISE_DBM
|
1442 IEEE80211_HW_AMPDU_AGGREGATION
|
1443 IEEE80211_HW_SPECTRUM_MGMT
;
1445 if (!priv
->cfg
->broken_powersave
)
1446 hw
->flags
|= IEEE80211_HW_SUPPORTS_PS
|
1447 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
;
1449 hw
->wiphy
->interface_modes
=
1450 BIT(NL80211_IFTYPE_STATION
) |
1451 BIT(NL80211_IFTYPE_ADHOC
);
1453 hw
->wiphy
->custom_regulatory
= true;
1455 /* Firmware does not support this */
1456 hw
->wiphy
->disable_beacon_hints
= true;
1459 * For now, disable PS by default because it affects
1460 * RX performance significantly.
1462 hw
->wiphy
->ps_default
= false;
1464 hw
->wiphy
->max_scan_ssids
= PROBE_OPTION_MAX
;
1465 /* we create the 802.11 header and a zero-length SSID element */
1466 hw
->wiphy
->max_scan_ie_len
= IWL_MAX_PROBE_REQUEST
- 24 - 2;
1468 /* Default value; 4 EDCA QOS priorities */
1471 hw
->max_listen_interval
= IWL_CONN_MAX_LISTEN_INTERVAL
;
1473 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
)
1474 priv
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
1475 &priv
->bands
[IEEE80211_BAND_2GHZ
];
1476 if (priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
)
1477 priv
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
1478 &priv
->bands
[IEEE80211_BAND_5GHZ
];
1480 ret
= ieee80211_register_hw(priv
->hw
);
1482 IWL_ERR(priv
, "Failed to register hw (error %d)\n", ret
);
1485 priv
->mac80211_registered
= 1;
1489 EXPORT_SYMBOL(iwl_setup_mac
);
1491 int iwl_set_hw_params(struct iwl_priv
*priv
)
1493 priv
->hw_params
.max_rxq_size
= RX_QUEUE_SIZE
;
1494 priv
->hw_params
.max_rxq_log
= RX_QUEUE_SIZE_LOG
;
1495 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
1496 priv
->hw_params
.rx_page_order
= get_order(IWL_RX_BUF_SIZE_8K
);
1498 priv
->hw_params
.rx_page_order
= get_order(IWL_RX_BUF_SIZE_4K
);
1500 priv
->hw_params
.max_beacon_itrvl
= IWL_MAX_UCODE_BEACON_INTERVAL
;
1502 if (priv
->cfg
->mod_params
->disable_11n
)
1503 priv
->cfg
->sku
&= ~IWL_SKU_N
;
1505 /* Device-specific setup */
1506 return priv
->cfg
->ops
->lib
->set_hw_params(priv
);
1508 EXPORT_SYMBOL(iwl_set_hw_params
);
1510 int iwl_init_drv(struct iwl_priv
*priv
)
1514 priv
->ibss_beacon
= NULL
;
1516 spin_lock_init(&priv
->lock
);
1517 spin_lock_init(&priv
->sta_lock
);
1518 spin_lock_init(&priv
->hcmd_lock
);
1520 INIT_LIST_HEAD(&priv
->free_frames
);
1522 mutex_init(&priv
->mutex
);
1524 /* Clear the driver's (not device's) station table */
1525 iwl_clear_stations_table(priv
);
1527 priv
->ieee_channels
= NULL
;
1528 priv
->ieee_rates
= NULL
;
1529 priv
->band
= IEEE80211_BAND_2GHZ
;
1531 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
1533 /* Choose which receivers/antennas to use */
1534 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
1535 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
1537 iwl_init_scan_params(priv
);
1539 iwl_reset_qos(priv
);
1541 priv
->qos_data
.qos_active
= 0;
1542 priv
->qos_data
.qos_cap
.val
= 0;
1544 priv
->rates_mask
= IWL_RATES_MASK
;
1545 /* Set the tx_power_user_lmt to the lowest power level
1546 * this value will get overwritten by channel max power avg
1548 priv
->tx_power_user_lmt
= IWL_TX_POWER_TARGET_POWER_MIN
;
1550 ret
= iwl_init_channel_map(priv
);
1552 IWL_ERR(priv
, "initializing regulatory failed: %d\n", ret
);
1556 ret
= iwlcore_init_geos(priv
);
1558 IWL_ERR(priv
, "initializing geos failed: %d\n", ret
);
1559 goto err_free_channel_map
;
1561 iwlcore_init_hw_rates(priv
, priv
->ieee_rates
);
1565 err_free_channel_map
:
1566 iwl_free_channel_map(priv
);
1570 EXPORT_SYMBOL(iwl_init_drv
);
1572 int iwl_set_tx_power(struct iwl_priv
*priv
, s8 tx_power
, bool force
)
1575 s8 prev_tx_power
= priv
->tx_power_user_lmt
;
1577 if (tx_power
< IWL_TX_POWER_TARGET_POWER_MIN
) {
1578 IWL_WARN(priv
, "Requested user TXPOWER %d below lower limit %d.\n",
1580 IWL_TX_POWER_TARGET_POWER_MIN
);
1584 if (tx_power
> priv
->tx_power_device_lmt
) {
1586 "Requested user TXPOWER %d above upper limit %d.\n",
1587 tx_power
, priv
->tx_power_device_lmt
);
1591 if (priv
->tx_power_user_lmt
!= tx_power
)
1594 /* if nic is not up don't send command */
1595 if (iwl_is_ready_rf(priv
)) {
1596 priv
->tx_power_user_lmt
= tx_power
;
1597 if (force
&& priv
->cfg
->ops
->lib
->send_tx_power
)
1598 ret
= priv
->cfg
->ops
->lib
->send_tx_power(priv
);
1599 else if (!priv
->cfg
->ops
->lib
->send_tx_power
)
1602 * if fail to set tx_power, restore the orig. tx power
1605 priv
->tx_power_user_lmt
= prev_tx_power
;
1609 * Even this is an async host command, the command
1610 * will always report success from uCode
1611 * So once driver can placing the command into the queue
1612 * successfully, driver can use priv->tx_power_user_lmt
1613 * to reflect the current tx power
1617 EXPORT_SYMBOL(iwl_set_tx_power
);
1619 void iwl_uninit_drv(struct iwl_priv
*priv
)
1621 iwl_calib_free_results(priv
);
1622 iwlcore_free_geos(priv
);
1623 iwl_free_channel_map(priv
);
1626 EXPORT_SYMBOL(iwl_uninit_drv
);
1628 #define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1630 /* Free dram table */
1631 void iwl_free_isr_ict(struct iwl_priv
*priv
)
1633 if (priv
->ict_tbl_vir
) {
1634 pci_free_consistent(priv
->pci_dev
, (sizeof(u32
) * ICT_COUNT
) +
1635 PAGE_SIZE
, priv
->ict_tbl_vir
,
1637 priv
->ict_tbl_vir
= NULL
;
1640 EXPORT_SYMBOL(iwl_free_isr_ict
);
1643 /* allocate dram shared table it is a PAGE_SIZE aligned
1644 * also reset all data related to ICT table interrupt.
1646 int iwl_alloc_isr_ict(struct iwl_priv
*priv
)
1649 if (priv
->cfg
->use_isr_legacy
)
1651 /* allocate shrared data table */
1652 priv
->ict_tbl_vir
= pci_alloc_consistent(priv
->pci_dev
, (sizeof(u32
) *
1653 ICT_COUNT
) + PAGE_SIZE
,
1654 &priv
->ict_tbl_dma
);
1655 if (!priv
->ict_tbl_vir
)
1658 /* align table to PAGE_SIZE boundry */
1659 priv
->aligned_ict_tbl_dma
= ALIGN(priv
->ict_tbl_dma
, PAGE_SIZE
);
1661 IWL_DEBUG_ISR(priv
, "ict dma addr %Lx dma aligned %Lx diff %d\n",
1662 (unsigned long long)priv
->ict_tbl_dma
,
1663 (unsigned long long)priv
->aligned_ict_tbl_dma
,
1664 (int)(priv
->aligned_ict_tbl_dma
- priv
->ict_tbl_dma
));
1666 priv
->ict_tbl
= priv
->ict_tbl_vir
+
1667 (priv
->aligned_ict_tbl_dma
- priv
->ict_tbl_dma
);
1669 IWL_DEBUG_ISR(priv
, "ict vir addr %p vir aligned %p diff %d\n",
1670 priv
->ict_tbl
, priv
->ict_tbl_vir
,
1671 (int)(priv
->aligned_ict_tbl_dma
- priv
->ict_tbl_dma
));
1673 /* reset table and index to all 0 */
1674 memset(priv
->ict_tbl_vir
,0, (sizeof(u32
) * ICT_COUNT
) + PAGE_SIZE
);
1675 priv
->ict_index
= 0;
1677 /* add periodic RX interrupt */
1678 priv
->inta_mask
|= CSR_INT_BIT_RX_PERIODIC
;
1681 EXPORT_SYMBOL(iwl_alloc_isr_ict
);
1683 /* Device is going up inform it about using ICT interrupt table,
1684 * also we need to tell the driver to start using ICT interrupt.
1686 int iwl_reset_ict(struct iwl_priv
*priv
)
1689 unsigned long flags
;
1691 if (!priv
->ict_tbl_vir
)
1694 spin_lock_irqsave(&priv
->lock
, flags
);
1695 iwl_disable_interrupts(priv
);
1697 memset(&priv
->ict_tbl
[0], 0, sizeof(u32
) * ICT_COUNT
);
1699 val
= priv
->aligned_ict_tbl_dma
>> PAGE_SHIFT
;
1701 val
|= CSR_DRAM_INT_TBL_ENABLE
;
1702 val
|= CSR_DRAM_INIT_TBL_WRAP_CHECK
;
1704 IWL_DEBUG_ISR(priv
, "CSR_DRAM_INT_TBL_REG =0x%X "
1705 "aligned dma address %Lx\n",
1706 val
, (unsigned long long)priv
->aligned_ict_tbl_dma
);
1708 iwl_write32(priv
, CSR_DRAM_INT_TBL_REG
, val
);
1709 priv
->use_ict
= true;
1710 priv
->ict_index
= 0;
1711 iwl_write32(priv
, CSR_INT
, priv
->inta_mask
);
1712 iwl_enable_interrupts(priv
);
1713 spin_unlock_irqrestore(&priv
->lock
, flags
);
1717 EXPORT_SYMBOL(iwl_reset_ict
);
1719 /* Device is going down disable ict interrupt usage */
1720 void iwl_disable_ict(struct iwl_priv
*priv
)
1722 unsigned long flags
;
1724 spin_lock_irqsave(&priv
->lock
, flags
);
1725 priv
->use_ict
= false;
1726 spin_unlock_irqrestore(&priv
->lock
, flags
);
1728 EXPORT_SYMBOL(iwl_disable_ict
);
1730 /* interrupt handler using ict table, with this interrupt driver will
1731 * stop using INTA register to get device's interrupt, reading this register
1732 * is expensive, device will write interrupts in ICT dram table, increment
1733 * index then will fire interrupt to driver, driver will OR all ICT table
1734 * entries from current index up to table entry with 0 value. the result is
1735 * the interrupt we need to service, driver will set the entries back to 0 and
1738 irqreturn_t
iwl_isr_ict(int irq
, void *data
)
1740 struct iwl_priv
*priv
= data
;
1741 u32 inta
, inta_mask
;
1747 /* dram interrupt table not set yet,
1748 * use legacy interrupt.
1751 return iwl_isr(irq
, data
);
1753 spin_lock(&priv
->lock
);
1755 /* Disable (but don't clear!) interrupts here to avoid
1756 * back-to-back ISRs and sporadic interrupts from our NIC.
1757 * If we have something to service, the tasklet will re-enable ints.
1758 * If we *don't* have something, we'll re-enable before leaving here.
1760 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
); /* just for debug */
1761 iwl_write32(priv
, CSR_INT_MASK
, 0x00000000);
1764 /* Ignore interrupt if there's nothing in NIC to service.
1765 * This may be due to IRQ shared with another device,
1766 * or due to sporadic interrupts thrown from our NIC. */
1767 if (!priv
->ict_tbl
[priv
->ict_index
]) {
1768 IWL_DEBUG_ISR(priv
, "Ignore interrupt, inta == 0\n");
1772 /* read all entries that not 0 start with ict_index */
1773 while (priv
->ict_tbl
[priv
->ict_index
]) {
1775 val
|= le32_to_cpu(priv
->ict_tbl
[priv
->ict_index
]);
1776 IWL_DEBUG_ISR(priv
, "ICT index %d value 0x%08X\n",
1778 le32_to_cpu(priv
->ict_tbl
[priv
->ict_index
]));
1779 priv
->ict_tbl
[priv
->ict_index
] = 0;
1780 priv
->ict_index
= iwl_queue_inc_wrap(priv
->ict_index
,
1785 /* We should not get this value, just ignore it. */
1786 if (val
== 0xffffffff)
1789 inta
= (0xff & val
) | ((0xff00 & val
) << 16);
1790 IWL_DEBUG_ISR(priv
, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1791 inta
, inta_mask
, val
);
1793 inta
&= priv
->inta_mask
;
1796 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1798 tasklet_schedule(&priv
->irq_tasklet
);
1799 else if (test_bit(STATUS_INT_ENABLED
, &priv
->status
) && !priv
->inta
) {
1800 /* Allow interrupt if was disabled by this handler and
1801 * no tasklet was schedules, We should not enable interrupt,
1802 * tasklet will enable it.
1804 iwl_enable_interrupts(priv
);
1807 spin_unlock(&priv
->lock
);
1811 /* re-enable interrupts here since we don't have anything to service.
1812 * only Re-enable if disabled by irq.
1814 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
) && !priv
->inta
)
1815 iwl_enable_interrupts(priv
);
1817 spin_unlock(&priv
->lock
);
1820 EXPORT_SYMBOL(iwl_isr_ict
);
1823 static irqreturn_t
iwl_isr(int irq
, void *data
)
1825 struct iwl_priv
*priv
= data
;
1826 u32 inta
, inta_mask
;
1827 #ifdef CONFIG_IWLWIFI_DEBUG
1833 spin_lock(&priv
->lock
);
1835 /* Disable (but don't clear!) interrupts here to avoid
1836 * back-to-back ISRs and sporadic interrupts from our NIC.
1837 * If we have something to service, the tasklet will re-enable ints.
1838 * If we *don't* have something, we'll re-enable before leaving here. */
1839 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
); /* just for debug */
1840 iwl_write32(priv
, CSR_INT_MASK
, 0x00000000);
1842 /* Discover which interrupts are active/pending */
1843 inta
= iwl_read32(priv
, CSR_INT
);
1845 /* Ignore interrupt if there's nothing in NIC to service.
1846 * This may be due to IRQ shared with another device,
1847 * or due to sporadic interrupts thrown from our NIC. */
1849 IWL_DEBUG_ISR(priv
, "Ignore interrupt, inta == 0\n");
1853 if ((inta
== 0xFFFFFFFF) || ((inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
1854 /* Hardware disappeared. It might have already raised
1856 IWL_WARN(priv
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
1860 #ifdef CONFIG_IWLWIFI_DEBUG
1861 if (iwl_get_debug_level(priv
) & (IWL_DL_ISR
)) {
1862 inta_fh
= iwl_read32(priv
, CSR_FH_INT_STATUS
);
1863 IWL_DEBUG_ISR(priv
, "ISR inta 0x%08x, enabled 0x%08x, "
1864 "fh 0x%08x\n", inta
, inta_mask
, inta_fh
);
1869 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1871 tasklet_schedule(&priv
->irq_tasklet
);
1872 else if (test_bit(STATUS_INT_ENABLED
, &priv
->status
) && !priv
->inta
)
1873 iwl_enable_interrupts(priv
);
1876 spin_unlock(&priv
->lock
);
1880 /* re-enable interrupts here since we don't have anything to service. */
1881 /* only Re-enable if diabled by irq and no schedules tasklet. */
1882 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
) && !priv
->inta
)
1883 iwl_enable_interrupts(priv
);
1885 spin_unlock(&priv
->lock
);
1889 irqreturn_t
iwl_isr_legacy(int irq
, void *data
)
1891 struct iwl_priv
*priv
= data
;
1892 u32 inta
, inta_mask
;
1897 spin_lock(&priv
->lock
);
1899 /* Disable (but don't clear!) interrupts here to avoid
1900 * back-to-back ISRs and sporadic interrupts from our NIC.
1901 * If we have something to service, the tasklet will re-enable ints.
1902 * If we *don't* have something, we'll re-enable before leaving here. */
1903 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
); /* just for debug */
1904 iwl_write32(priv
, CSR_INT_MASK
, 0x00000000);
1906 /* Discover which interrupts are active/pending */
1907 inta
= iwl_read32(priv
, CSR_INT
);
1908 inta_fh
= iwl_read32(priv
, CSR_FH_INT_STATUS
);
1910 /* Ignore interrupt if there's nothing in NIC to service.
1911 * This may be due to IRQ shared with another device,
1912 * or due to sporadic interrupts thrown from our NIC. */
1913 if (!inta
&& !inta_fh
) {
1914 IWL_DEBUG_ISR(priv
, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1918 if ((inta
== 0xFFFFFFFF) || ((inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
1919 /* Hardware disappeared. It might have already raised
1921 IWL_WARN(priv
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
1925 IWL_DEBUG_ISR(priv
, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1926 inta
, inta_mask
, inta_fh
);
1928 inta
&= ~CSR_INT_BIT_SCD
;
1930 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1931 if (likely(inta
|| inta_fh
))
1932 tasklet_schedule(&priv
->irq_tasklet
);
1935 spin_unlock(&priv
->lock
);
1939 /* re-enable interrupts here since we don't have anything to service. */
1940 /* only Re-enable if diabled by irq */
1941 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
))
1942 iwl_enable_interrupts(priv
);
1943 spin_unlock(&priv
->lock
);
1946 EXPORT_SYMBOL(iwl_isr_legacy
);
1948 int iwl_send_bt_config(struct iwl_priv
*priv
)
1950 struct iwl_bt_cmd bt_cmd
= {
1958 return iwl_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1959 sizeof(struct iwl_bt_cmd
), &bt_cmd
);
1961 EXPORT_SYMBOL(iwl_send_bt_config
);
1963 int iwl_send_statistics_request(struct iwl_priv
*priv
, u8 flags
)
1966 struct iwl_host_cmd cmd
= {
1967 .id
= REPLY_STATISTICS_CMD
,
1969 .len
= sizeof(stat_flags
),
1970 .data
= (u8
*) &stat_flags
,
1972 return iwl_send_cmd(priv
, &cmd
);
1974 EXPORT_SYMBOL(iwl_send_statistics_request
);
1977 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
1978 * using sample data 100 bytes apart. If these sample points are good,
1979 * it's a pretty good bet that everything between them is good, too.
1981 static int iwlcore_verify_inst_sparse(struct iwl_priv
*priv
, __le32
*image
, u32 len
)
1988 IWL_DEBUG_INFO(priv
, "ucode inst image size is %u\n", len
);
1990 for (i
= 0; i
< len
; i
+= 100, image
+= 100/sizeof(u32
)) {
1991 /* read data comes through single port, auto-incr addr */
1992 /* NOTE: Use the debugless read so we don't flood kernel log
1993 * if IWL_DL_IO is set */
1994 iwl_write_direct32(priv
, HBUS_TARG_MEM_RADDR
,
1995 i
+ IWL49_RTC_INST_LOWER_BOUND
);
1996 val
= _iwl_read_direct32(priv
, HBUS_TARG_MEM_RDAT
);
1997 if (val
!= le32_to_cpu(*image
)) {
2009 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
2010 * looking at all data.
2012 static int iwl_verify_inst_full(struct iwl_priv
*priv
, __le32
*image
,
2020 IWL_DEBUG_INFO(priv
, "ucode inst image size is %u\n", len
);
2022 iwl_write_direct32(priv
, HBUS_TARG_MEM_RADDR
,
2023 IWL49_RTC_INST_LOWER_BOUND
);
2026 for (; len
> 0; len
-= sizeof(u32
), image
++) {
2027 /* read data comes through single port, auto-incr addr */
2028 /* NOTE: Use the debugless read so we don't flood kernel log
2029 * if IWL_DL_IO is set */
2030 val
= _iwl_read_direct32(priv
, HBUS_TARG_MEM_RDAT
);
2031 if (val
!= le32_to_cpu(*image
)) {
2032 IWL_ERR(priv
, "uCode INST section is invalid at "
2033 "offset 0x%x, is 0x%x, s/b 0x%x\n",
2034 save_len
- len
, val
, le32_to_cpu(*image
));
2043 IWL_DEBUG_INFO(priv
,
2044 "ucode image in INSTRUCTION memory is good\n");
2050 * iwl_verify_ucode - determine which instruction image is in SRAM,
2051 * and verify its contents
2053 int iwl_verify_ucode(struct iwl_priv
*priv
)
2060 image
= (__le32
*)priv
->ucode_boot
.v_addr
;
2061 len
= priv
->ucode_boot
.len
;
2062 ret
= iwlcore_verify_inst_sparse(priv
, image
, len
);
2064 IWL_DEBUG_INFO(priv
, "Bootstrap uCode is good in inst SRAM\n");
2068 /* Try initialize */
2069 image
= (__le32
*)priv
->ucode_init
.v_addr
;
2070 len
= priv
->ucode_init
.len
;
2071 ret
= iwlcore_verify_inst_sparse(priv
, image
, len
);
2073 IWL_DEBUG_INFO(priv
, "Initialize uCode is good in inst SRAM\n");
2077 /* Try runtime/protocol */
2078 image
= (__le32
*)priv
->ucode_code
.v_addr
;
2079 len
= priv
->ucode_code
.len
;
2080 ret
= iwlcore_verify_inst_sparse(priv
, image
, len
);
2082 IWL_DEBUG_INFO(priv
, "Runtime uCode is good in inst SRAM\n");
2086 IWL_ERR(priv
, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
2088 /* Since nothing seems to match, show first several data entries in
2089 * instruction SRAM, so maybe visual inspection will give a clue.
2090 * Selection of bootstrap image (vs. other images) is arbitrary. */
2091 image
= (__le32
*)priv
->ucode_boot
.v_addr
;
2092 len
= priv
->ucode_boot
.len
;
2093 ret
= iwl_verify_inst_full(priv
, image
, len
);
2097 EXPORT_SYMBOL(iwl_verify_ucode
);
2100 void iwl_rf_kill_ct_config(struct iwl_priv
*priv
)
2102 struct iwl_ct_kill_config cmd
;
2103 struct iwl_ct_kill_throttling_config adv_cmd
;
2104 unsigned long flags
;
2107 spin_lock_irqsave(&priv
->lock
, flags
);
2108 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
,
2109 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
2110 spin_unlock_irqrestore(&priv
->lock
, flags
);
2111 priv
->thermal_throttle
.ct_kill_toggle
= false;
2113 switch (priv
->hw_rev
& CSR_HW_REV_TYPE_MSK
) {
2114 case CSR_HW_REV_TYPE_1000
:
2115 case CSR_HW_REV_TYPE_6x00
:
2116 case CSR_HW_REV_TYPE_6x50
:
2117 adv_cmd
.critical_temperature_enter
=
2118 cpu_to_le32(priv
->hw_params
.ct_kill_threshold
);
2119 adv_cmd
.critical_temperature_exit
=
2120 cpu_to_le32(priv
->hw_params
.ct_kill_exit_threshold
);
2122 ret
= iwl_send_cmd_pdu(priv
, REPLY_CT_KILL_CONFIG_CMD
,
2123 sizeof(adv_cmd
), &adv_cmd
);
2125 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2127 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
2129 "critical temperature enter is %d,"
2131 priv
->hw_params
.ct_kill_threshold
,
2132 priv
->hw_params
.ct_kill_exit_threshold
);
2135 cmd
.critical_temperature_R
=
2136 cpu_to_le32(priv
->hw_params
.ct_kill_threshold
);
2138 ret
= iwl_send_cmd_pdu(priv
, REPLY_CT_KILL_CONFIG_CMD
,
2141 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
2143 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
2145 "critical temperature is %d\n",
2146 priv
->hw_params
.ct_kill_threshold
);
2150 EXPORT_SYMBOL(iwl_rf_kill_ct_config
);
2156 * Use: Sets the device's internal card state to enable, disable, or halt
2158 * When in the 'enable' state the card operates as normal.
2159 * When in the 'disable' state, the card enters into a low power mode.
2160 * When in the 'halt' state, the card is shut down and must be fully
2161 * restarted to come back on.
2163 int iwl_send_card_state(struct iwl_priv
*priv
, u32 flags
, u8 meta_flag
)
2165 struct iwl_host_cmd cmd
= {
2166 .id
= REPLY_CARD_STATE_CMD
,
2172 return iwl_send_cmd(priv
, &cmd
);
2175 void iwl_rx_pm_sleep_notif(struct iwl_priv
*priv
,
2176 struct iwl_rx_mem_buffer
*rxb
)
2178 #ifdef CONFIG_IWLWIFI_DEBUG
2179 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
2180 struct iwl_sleep_notification
*sleep
= &(pkt
->u
.sleep_notif
);
2181 IWL_DEBUG_RX(priv
, "sleep mode: %d, src: %d\n",
2182 sleep
->pm_sleep_mode
, sleep
->pm_wakeup_src
);
2185 EXPORT_SYMBOL(iwl_rx_pm_sleep_notif
);
2187 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv
*priv
,
2188 struct iwl_rx_mem_buffer
*rxb
)
2190 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
2191 u32 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
2192 IWL_DEBUG_RADIO(priv
, "Dumping %d bytes of unhandled "
2193 "notification for %s:\n", len
,
2194 get_cmd_string(pkt
->hdr
.cmd
));
2195 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, pkt
->u
.raw
, len
);
2197 EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif
);
2199 void iwl_rx_reply_error(struct iwl_priv
*priv
,
2200 struct iwl_rx_mem_buffer
*rxb
)
2202 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
2204 IWL_ERR(priv
, "Error Reply type 0x%08X cmd %s (0x%02X) "
2205 "seq 0x%04X ser 0x%08X\n",
2206 le32_to_cpu(pkt
->u
.err_resp
.error_type
),
2207 get_cmd_string(pkt
->u
.err_resp
.cmd_id
),
2208 pkt
->u
.err_resp
.cmd_id
,
2209 le16_to_cpu(pkt
->u
.err_resp
.bad_cmd_seq_num
),
2210 le32_to_cpu(pkt
->u
.err_resp
.error_info
));
2212 EXPORT_SYMBOL(iwl_rx_reply_error
);
2214 void iwl_clear_isr_stats(struct iwl_priv
*priv
)
2216 memset(&priv
->isr_stats
, 0, sizeof(priv
->isr_stats
));
2219 int iwl_mac_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
2220 const struct ieee80211_tx_queue_params
*params
)
2222 struct iwl_priv
*priv
= hw
->priv
;
2223 unsigned long flags
;
2226 IWL_DEBUG_MAC80211(priv
, "enter\n");
2228 if (!iwl_is_ready_rf(priv
)) {
2229 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
2233 if (queue
>= AC_NUM
) {
2234 IWL_DEBUG_MAC80211(priv
, "leave - queue >= AC_NUM %d\n", queue
);
2238 q
= AC_NUM
- 1 - queue
;
2240 spin_lock_irqsave(&priv
->lock
, flags
);
2242 priv
->qos_data
.def_qos_parm
.ac
[q
].cw_min
= cpu_to_le16(params
->cw_min
);
2243 priv
->qos_data
.def_qos_parm
.ac
[q
].cw_max
= cpu_to_le16(params
->cw_max
);
2244 priv
->qos_data
.def_qos_parm
.ac
[q
].aifsn
= params
->aifs
;
2245 priv
->qos_data
.def_qos_parm
.ac
[q
].edca_txop
=
2246 cpu_to_le16((params
->txop
* 32));
2248 priv
->qos_data
.def_qos_parm
.ac
[q
].reserved1
= 0;
2249 priv
->qos_data
.qos_active
= 1;
2251 if (priv
->iw_mode
== NL80211_IFTYPE_AP
)
2252 iwl_activate_qos(priv
, 1);
2253 else if (priv
->assoc_id
&& iwl_is_associated(priv
))
2254 iwl_activate_qos(priv
, 0);
2256 spin_unlock_irqrestore(&priv
->lock
, flags
);
2258 IWL_DEBUG_MAC80211(priv
, "leave\n");
2261 EXPORT_SYMBOL(iwl_mac_conf_tx
);
2263 static void iwl_ht_conf(struct iwl_priv
*priv
,
2264 struct ieee80211_bss_conf
*bss_conf
)
2266 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2267 struct ieee80211_sta
*sta
;
2269 IWL_DEBUG_MAC80211(priv
, "enter: \n");
2271 if (!ht_conf
->is_ht
)
2274 ht_conf
->ht_protection
=
2275 bss_conf
->ht_operation_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
2276 ht_conf
->non_GF_STA_present
=
2277 !!(bss_conf
->ht_operation_mode
& IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
2279 ht_conf
->single_chain_sufficient
= false;
2281 switch (priv
->iw_mode
) {
2282 case NL80211_IFTYPE_STATION
:
2284 sta
= ieee80211_find_sta(priv
->hw
, priv
->bssid
);
2286 struct ieee80211_sta_ht_cap
*ht_cap
= &sta
->ht_cap
;
2289 maxstreams
= (ht_cap
->mcs
.tx_params
&
2290 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK
)
2291 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
;
2294 if ((ht_cap
->mcs
.rx_mask
[1] == 0) &&
2295 (ht_cap
->mcs
.rx_mask
[2] == 0))
2296 ht_conf
->single_chain_sufficient
= true;
2297 if (maxstreams
<= 1)
2298 ht_conf
->single_chain_sufficient
= true;
2301 * If at all, this can only happen through a race
2302 * when the AP disconnects us while we're still
2303 * setting up the connection, in that case mac80211
2304 * will soon tell us about that.
2306 ht_conf
->single_chain_sufficient
= true;
2310 case NL80211_IFTYPE_ADHOC
:
2311 ht_conf
->single_chain_sufficient
= true;
2317 IWL_DEBUG_MAC80211(priv
, "leave\n");
2320 #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
2321 void iwl_bss_info_changed(struct ieee80211_hw
*hw
,
2322 struct ieee80211_vif
*vif
,
2323 struct ieee80211_bss_conf
*bss_conf
,
2326 struct iwl_priv
*priv
= hw
->priv
;
2329 IWL_DEBUG_MAC80211(priv
, "changes = 0x%X\n", changes
);
2331 if (!iwl_is_alive(priv
))
2334 mutex_lock(&priv
->mutex
);
2336 if (changes
& BSS_CHANGED_BEACON
&&
2337 priv
->iw_mode
== NL80211_IFTYPE_AP
) {
2338 dev_kfree_skb(priv
->ibss_beacon
);
2339 priv
->ibss_beacon
= ieee80211_beacon_get(hw
, vif
);
2342 if (changes
& BSS_CHANGED_BEACON_INT
) {
2343 priv
->beacon_int
= bss_conf
->beacon_int
;
2344 /* TODO: in AP mode, do something to make this take effect */
2347 if (changes
& BSS_CHANGED_BSSID
) {
2348 IWL_DEBUG_MAC80211(priv
, "BSSID %pM\n", bss_conf
->bssid
);
2351 * If there is currently a HW scan going on in the
2352 * background then we need to cancel it else the RXON
2353 * below/in post_associate will fail.
2355 if (iwl_scan_cancel_timeout(priv
, 100)) {
2356 IWL_WARN(priv
, "Aborted scan still in progress after 100ms\n");
2357 IWL_DEBUG_MAC80211(priv
, "leaving - scan abort failed.\n");
2358 mutex_unlock(&priv
->mutex
);
2362 /* mac80211 only sets assoc when in STATION mode */
2363 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
||
2365 memcpy(priv
->staging_rxon
.bssid_addr
,
2366 bss_conf
->bssid
, ETH_ALEN
);
2368 /* currently needed in a few places */
2369 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2371 priv
->staging_rxon
.filter_flags
&=
2372 ~RXON_FILTER_ASSOC_MSK
;
2378 * This needs to be after setting the BSSID in case
2379 * mac80211 decides to do both changes at once because
2380 * it will invoke post_associate.
2382 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
&&
2383 changes
& BSS_CHANGED_BEACON
) {
2384 struct sk_buff
*beacon
= ieee80211_beacon_get(hw
, vif
);
2387 iwl_mac_beacon_update(hw
, beacon
);
2390 if (changes
& BSS_CHANGED_ERP_PREAMBLE
) {
2391 IWL_DEBUG_MAC80211(priv
, "ERP_PREAMBLE %d\n",
2392 bss_conf
->use_short_preamble
);
2393 if (bss_conf
->use_short_preamble
)
2394 priv
->staging_rxon
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
2396 priv
->staging_rxon
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
2399 if (changes
& BSS_CHANGED_ERP_CTS_PROT
) {
2400 IWL_DEBUG_MAC80211(priv
, "ERP_CTS %d\n", bss_conf
->use_cts_prot
);
2401 if (bss_conf
->use_cts_prot
&& (priv
->band
!= IEEE80211_BAND_5GHZ
))
2402 priv
->staging_rxon
.flags
|= RXON_FLG_TGG_PROTECT_MSK
;
2404 priv
->staging_rxon
.flags
&= ~RXON_FLG_TGG_PROTECT_MSK
;
2407 if (changes
& BSS_CHANGED_BASIC_RATES
) {
2408 /* XXX use this information
2410 * To do that, remove code from iwl_set_rate() and put something
2414 priv->staging_rxon.ofdm_basic_rates =
2415 bss_conf->basic_rates;
2417 priv->staging_rxon.ofdm_basic_rates =
2418 bss_conf->basic_rates >> 4;
2419 priv->staging_rxon.cck_basic_rates =
2420 bss_conf->basic_rates & 0xF;
2424 if (changes
& BSS_CHANGED_HT
) {
2425 iwl_ht_conf(priv
, bss_conf
);
2427 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2428 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
2431 if (changes
& BSS_CHANGED_ASSOC
) {
2432 IWL_DEBUG_MAC80211(priv
, "ASSOC %d\n", bss_conf
->assoc
);
2433 if (bss_conf
->assoc
) {
2434 priv
->assoc_id
= bss_conf
->aid
;
2435 priv
->beacon_int
= bss_conf
->beacon_int
;
2436 priv
->timestamp
= bss_conf
->timestamp
;
2437 priv
->assoc_capability
= bss_conf
->assoc_capability
;
2439 iwl_led_associate(priv
);
2442 * We have just associated, don't start scan too early
2443 * leave time for EAPOL exchange to complete.
2445 * XXX: do this in mac80211
2447 priv
->next_scan_jiffies
= jiffies
+
2448 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC
;
2449 if (!iwl_is_rfkill(priv
))
2450 priv
->cfg
->ops
->lib
->post_associate(priv
);
2453 iwl_led_disassociate(priv
);
2457 if (changes
&& iwl_is_associated(priv
) && priv
->assoc_id
) {
2458 IWL_DEBUG_MAC80211(priv
, "Changes (%#x) while associated\n",
2460 ret
= iwl_send_rxon_assoc(priv
);
2462 /* Sync active_rxon with latest change. */
2463 memcpy((void *)&priv
->active_rxon
,
2464 &priv
->staging_rxon
,
2465 sizeof(struct iwl_rxon_cmd
));
2469 mutex_unlock(&priv
->mutex
);
2471 IWL_DEBUG_MAC80211(priv
, "leave\n");
2473 EXPORT_SYMBOL(iwl_bss_info_changed
);
2475 int iwl_mac_beacon_update(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
2477 struct iwl_priv
*priv
= hw
->priv
;
2478 unsigned long flags
;
2481 IWL_DEBUG_MAC80211(priv
, "enter\n");
2483 if (!iwl_is_ready_rf(priv
)) {
2484 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
2488 if (priv
->iw_mode
!= NL80211_IFTYPE_ADHOC
) {
2489 IWL_DEBUG_MAC80211(priv
, "leave - not IBSS\n");
2493 spin_lock_irqsave(&priv
->lock
, flags
);
2495 if (priv
->ibss_beacon
)
2496 dev_kfree_skb(priv
->ibss_beacon
);
2498 priv
->ibss_beacon
= skb
;
2501 timestamp
= ((struct ieee80211_mgmt
*)skb
->data
)->u
.beacon
.timestamp
;
2502 priv
->timestamp
= le64_to_cpu(timestamp
);
2504 IWL_DEBUG_MAC80211(priv
, "leave\n");
2505 spin_unlock_irqrestore(&priv
->lock
, flags
);
2507 iwl_reset_qos(priv
);
2509 priv
->cfg
->ops
->lib
->post_associate(priv
);
2514 EXPORT_SYMBOL(iwl_mac_beacon_update
);
2516 int iwl_set_mode(struct iwl_priv
*priv
, int mode
)
2518 if (mode
== NL80211_IFTYPE_ADHOC
) {
2519 const struct iwl_channel_info
*ch_info
;
2521 ch_info
= iwl_get_channel_info(priv
,
2523 le16_to_cpu(priv
->staging_rxon
.channel
));
2525 if (!ch_info
|| !is_channel_ibss(ch_info
)) {
2526 IWL_ERR(priv
, "channel %d not IBSS channel\n",
2527 le16_to_cpu(priv
->staging_rxon
.channel
));
2532 iwl_connection_init_rx_config(priv
, mode
);
2534 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2535 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
2537 memcpy(priv
->staging_rxon
.node_addr
, priv
->mac_addr
, ETH_ALEN
);
2539 iwl_clear_stations_table(priv
);
2541 /* dont commit rxon if rf-kill is on*/
2542 if (!iwl_is_ready_rf(priv
))
2545 iwlcore_commit_rxon(priv
);
2549 EXPORT_SYMBOL(iwl_set_mode
);
2551 int iwl_mac_add_interface(struct ieee80211_hw
*hw
,
2552 struct ieee80211_if_init_conf
*conf
)
2554 struct iwl_priv
*priv
= hw
->priv
;
2555 unsigned long flags
;
2557 IWL_DEBUG_MAC80211(priv
, "enter: type %d\n", conf
->type
);
2560 IWL_DEBUG_MAC80211(priv
, "leave - vif != NULL\n");
2564 spin_lock_irqsave(&priv
->lock
, flags
);
2565 priv
->vif
= conf
->vif
;
2566 priv
->iw_mode
= conf
->type
;
2568 spin_unlock_irqrestore(&priv
->lock
, flags
);
2570 mutex_lock(&priv
->mutex
);
2572 if (conf
->mac_addr
) {
2573 IWL_DEBUG_MAC80211(priv
, "Set %pM\n", conf
->mac_addr
);
2574 memcpy(priv
->mac_addr
, conf
->mac_addr
, ETH_ALEN
);
2577 if (iwl_set_mode(priv
, conf
->type
) == -EAGAIN
)
2578 /* we are not ready, will run again when ready */
2579 set_bit(STATUS_MODE_PENDING
, &priv
->status
);
2581 mutex_unlock(&priv
->mutex
);
2583 IWL_DEBUG_MAC80211(priv
, "leave\n");
2586 EXPORT_SYMBOL(iwl_mac_add_interface
);
2588 void iwl_mac_remove_interface(struct ieee80211_hw
*hw
,
2589 struct ieee80211_if_init_conf
*conf
)
2591 struct iwl_priv
*priv
= hw
->priv
;
2593 IWL_DEBUG_MAC80211(priv
, "enter\n");
2595 mutex_lock(&priv
->mutex
);
2597 if (iwl_is_ready_rf(priv
)) {
2598 iwl_scan_cancel_timeout(priv
, 100);
2599 priv
->staging_rxon
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2600 iwlcore_commit_rxon(priv
);
2602 if (priv
->vif
== conf
->vif
) {
2604 memset(priv
->bssid
, 0, ETH_ALEN
);
2606 mutex_unlock(&priv
->mutex
);
2608 IWL_DEBUG_MAC80211(priv
, "leave\n");
2611 EXPORT_SYMBOL(iwl_mac_remove_interface
);
2614 * iwl_mac_config - mac80211 config callback
2616 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2617 * be set inappropriately and the driver currently sets the hardware up to
2618 * use it whenever needed.
2620 int iwl_mac_config(struct ieee80211_hw
*hw
, u32 changed
)
2622 struct iwl_priv
*priv
= hw
->priv
;
2623 const struct iwl_channel_info
*ch_info
;
2624 struct ieee80211_conf
*conf
= &hw
->conf
;
2625 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2626 unsigned long flags
= 0;
2629 int scan_active
= 0;
2631 mutex_lock(&priv
->mutex
);
2633 IWL_DEBUG_MAC80211(priv
, "enter to channel %d changed 0x%X\n",
2634 conf
->channel
->hw_value
, changed
);
2636 if (unlikely(!priv
->cfg
->mod_params
->disable_hw_scan
&&
2637 test_bit(STATUS_SCANNING
, &priv
->status
))) {
2639 IWL_DEBUG_MAC80211(priv
, "leave - scanning\n");
2643 /* during scanning mac80211 will delay channel setting until
2644 * scan finish with changed = 0
2646 if (!changed
|| (changed
& IEEE80211_CONF_CHANGE_CHANNEL
)) {
2650 ch
= ieee80211_frequency_to_channel(conf
->channel
->center_freq
);
2651 ch_info
= iwl_get_channel_info(priv
, conf
->channel
->band
, ch
);
2652 if (!is_channel_valid(ch_info
)) {
2653 IWL_DEBUG_MAC80211(priv
, "leave - invalid channel\n");
2658 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
&&
2659 !is_channel_ibss(ch_info
)) {
2660 IWL_ERR(priv
, "channel %d in band %d not "
2662 conf
->channel
->hw_value
, conf
->channel
->band
);
2667 spin_lock_irqsave(&priv
->lock
, flags
);
2669 /* Configure HT40 channels */
2670 ht_conf
->is_ht
= conf_is_ht(conf
);
2671 if (ht_conf
->is_ht
) {
2672 if (conf_is_ht40_minus(conf
)) {
2673 ht_conf
->extension_chan_offset
=
2674 IEEE80211_HT_PARAM_CHA_SEC_BELOW
;
2675 ht_conf
->is_40mhz
= true;
2676 } else if (conf_is_ht40_plus(conf
)) {
2677 ht_conf
->extension_chan_offset
=
2678 IEEE80211_HT_PARAM_CHA_SEC_ABOVE
;
2679 ht_conf
->is_40mhz
= true;
2681 ht_conf
->extension_chan_offset
=
2682 IEEE80211_HT_PARAM_CHA_SEC_NONE
;
2683 ht_conf
->is_40mhz
= false;
2686 ht_conf
->is_40mhz
= false;
2687 /* Default to no protection. Protection mode will later be set
2688 * from BSS config in iwl_ht_conf */
2689 ht_conf
->ht_protection
= IEEE80211_HT_OP_MODE_PROTECTION_NONE
;
2691 /* if we are switching from ht to 2.4 clear flags
2692 * from any ht related info since 2.4 does not
2694 if ((le16_to_cpu(priv
->staging_rxon
.channel
) != ch
))
2695 priv
->staging_rxon
.flags
= 0;
2697 iwl_set_rxon_channel(priv
, conf
->channel
);
2699 iwl_set_flags_for_band(priv
, conf
->channel
->band
);
2700 spin_unlock_irqrestore(&priv
->lock
, flags
);
2702 /* The list of supported rates and rate mask can be different
2703 * for each band; since the band may have changed, reset
2704 * the rate mask to what mac80211 lists */
2708 if (changed
& (IEEE80211_CONF_CHANGE_PS
|
2709 IEEE80211_CONF_CHANGE_IDLE
)) {
2710 ret
= iwl_power_update_mode(priv
, false);
2712 IWL_DEBUG_MAC80211(priv
, "Error setting sleep level\n");
2715 if (changed
& IEEE80211_CONF_CHANGE_POWER
) {
2716 IWL_DEBUG_MAC80211(priv
, "TX Power old=%d new=%d\n",
2717 priv
->tx_power_user_lmt
, conf
->power_level
);
2719 iwl_set_tx_power(priv
, conf
->power_level
, false);
2722 /* call to ensure that 4965 rx_chain is set properly in monitor mode */
2723 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2724 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
);
2726 if (!iwl_is_ready(priv
)) {
2727 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2734 if (memcmp(&priv
->active_rxon
,
2735 &priv
->staging_rxon
, sizeof(priv
->staging_rxon
)))
2736 iwlcore_commit_rxon(priv
);
2738 IWL_DEBUG_INFO(priv
, "Not re-sending same RXON configuration.\n");
2742 IWL_DEBUG_MAC80211(priv
, "leave\n");
2743 mutex_unlock(&priv
->mutex
);
2746 EXPORT_SYMBOL(iwl_mac_config
);
2748 int iwl_mac_get_tx_stats(struct ieee80211_hw
*hw
,
2749 struct ieee80211_tx_queue_stats
*stats
)
2751 struct iwl_priv
*priv
= hw
->priv
;
2753 struct iwl_tx_queue
*txq
;
2754 struct iwl_queue
*q
;
2755 unsigned long flags
;
2757 IWL_DEBUG_MAC80211(priv
, "enter\n");
2759 if (!iwl_is_ready_rf(priv
)) {
2760 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
2764 spin_lock_irqsave(&priv
->lock
, flags
);
2766 for (i
= 0; i
< AC_NUM
; i
++) {
2767 txq
= &priv
->txq
[i
];
2769 avail
= iwl_queue_space(q
);
2771 stats
[i
].len
= q
->n_window
- avail
;
2772 stats
[i
].limit
= q
->n_window
- q
->high_mark
;
2773 stats
[i
].count
= q
->n_window
;
2776 spin_unlock_irqrestore(&priv
->lock
, flags
);
2778 IWL_DEBUG_MAC80211(priv
, "leave\n");
2782 EXPORT_SYMBOL(iwl_mac_get_tx_stats
);
2784 void iwl_mac_reset_tsf(struct ieee80211_hw
*hw
)
2786 struct iwl_priv
*priv
= hw
->priv
;
2787 unsigned long flags
;
2789 mutex_lock(&priv
->mutex
);
2790 IWL_DEBUG_MAC80211(priv
, "enter\n");
2792 spin_lock_irqsave(&priv
->lock
, flags
);
2793 memset(&priv
->current_ht_config
, 0, sizeof(struct iwl_ht_config
));
2794 spin_unlock_irqrestore(&priv
->lock
, flags
);
2796 iwl_reset_qos(priv
);
2798 spin_lock_irqsave(&priv
->lock
, flags
);
2800 priv
->assoc_capability
= 0;
2801 priv
->assoc_station_added
= 0;
2803 /* new association get rid of ibss beacon skb */
2804 if (priv
->ibss_beacon
)
2805 dev_kfree_skb(priv
->ibss_beacon
);
2807 priv
->ibss_beacon
= NULL
;
2809 priv
->beacon_int
= priv
->vif
->bss_conf
.beacon_int
;
2810 priv
->timestamp
= 0;
2811 if ((priv
->iw_mode
== NL80211_IFTYPE_STATION
))
2812 priv
->beacon_int
= 0;
2814 spin_unlock_irqrestore(&priv
->lock
, flags
);
2816 if (!iwl_is_ready_rf(priv
)) {
2817 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2818 mutex_unlock(&priv
->mutex
);
2822 /* we are restarting association process
2823 * clear RXON_FILTER_ASSOC_MSK bit
2825 if (priv
->iw_mode
!= NL80211_IFTYPE_AP
) {
2826 iwl_scan_cancel_timeout(priv
, 100);
2827 priv
->staging_rxon
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2828 iwlcore_commit_rxon(priv
);
2831 if (priv
->iw_mode
!= NL80211_IFTYPE_ADHOC
) {
2832 IWL_DEBUG_MAC80211(priv
, "leave - not in IBSS\n");
2833 mutex_unlock(&priv
->mutex
);
2839 mutex_unlock(&priv
->mutex
);
2841 IWL_DEBUG_MAC80211(priv
, "leave\n");
2843 EXPORT_SYMBOL(iwl_mac_reset_tsf
);
2845 int iwl_alloc_txq_mem(struct iwl_priv
*priv
)
2848 priv
->txq
= kzalloc(
2849 sizeof(struct iwl_tx_queue
) * priv
->cfg
->num_of_queues
,
2852 IWL_ERR(priv
, "Not enough memory for txq \n");
2857 EXPORT_SYMBOL(iwl_alloc_txq_mem
);
2859 void iwl_free_txq_mem(struct iwl_priv
*priv
)
2864 EXPORT_SYMBOL(iwl_free_txq_mem
);
2866 #ifdef CONFIG_IWLWIFI_DEBUGFS
2868 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
2870 void iwl_reset_traffic_log(struct iwl_priv
*priv
)
2872 priv
->tx_traffic_idx
= 0;
2873 priv
->rx_traffic_idx
= 0;
2874 if (priv
->tx_traffic
)
2875 memset(priv
->tx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
2876 if (priv
->rx_traffic
)
2877 memset(priv
->rx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
2880 int iwl_alloc_traffic_mem(struct iwl_priv
*priv
)
2882 u32 traffic_size
= IWL_TRAFFIC_DUMP_SIZE
;
2884 if (iwl_debug_level
& IWL_DL_TX
) {
2885 if (!priv
->tx_traffic
) {
2887 kzalloc(traffic_size
, GFP_KERNEL
);
2888 if (!priv
->tx_traffic
)
2892 if (iwl_debug_level
& IWL_DL_RX
) {
2893 if (!priv
->rx_traffic
) {
2895 kzalloc(traffic_size
, GFP_KERNEL
);
2896 if (!priv
->rx_traffic
)
2900 iwl_reset_traffic_log(priv
);
2903 EXPORT_SYMBOL(iwl_alloc_traffic_mem
);
2905 void iwl_free_traffic_mem(struct iwl_priv
*priv
)
2907 kfree(priv
->tx_traffic
);
2908 priv
->tx_traffic
= NULL
;
2910 kfree(priv
->rx_traffic
);
2911 priv
->rx_traffic
= NULL
;
2913 EXPORT_SYMBOL(iwl_free_traffic_mem
);
2915 void iwl_dbg_log_tx_data_frame(struct iwl_priv
*priv
,
2916 u16 length
, struct ieee80211_hdr
*header
)
2921 if (likely(!(iwl_debug_level
& IWL_DL_TX
)))
2924 if (!priv
->tx_traffic
)
2927 fc
= header
->frame_control
;
2928 if (ieee80211_is_data(fc
)) {
2929 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
2930 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
2931 memcpy((priv
->tx_traffic
+
2932 (priv
->tx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
2934 priv
->tx_traffic_idx
=
2935 (priv
->tx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
2938 EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame
);
2940 void iwl_dbg_log_rx_data_frame(struct iwl_priv
*priv
,
2941 u16 length
, struct ieee80211_hdr
*header
)
2946 if (likely(!(iwl_debug_level
& IWL_DL_RX
)))
2949 if (!priv
->rx_traffic
)
2952 fc
= header
->frame_control
;
2953 if (ieee80211_is_data(fc
)) {
2954 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
2955 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
2956 memcpy((priv
->rx_traffic
+
2957 (priv
->rx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
2959 priv
->rx_traffic_idx
=
2960 (priv
->rx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
2963 EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame
);
2965 const char *get_mgmt_string(int cmd
)
2968 IWL_CMD(MANAGEMENT_ASSOC_REQ
);
2969 IWL_CMD(MANAGEMENT_ASSOC_RESP
);
2970 IWL_CMD(MANAGEMENT_REASSOC_REQ
);
2971 IWL_CMD(MANAGEMENT_REASSOC_RESP
);
2972 IWL_CMD(MANAGEMENT_PROBE_REQ
);
2973 IWL_CMD(MANAGEMENT_PROBE_RESP
);
2974 IWL_CMD(MANAGEMENT_BEACON
);
2975 IWL_CMD(MANAGEMENT_ATIM
);
2976 IWL_CMD(MANAGEMENT_DISASSOC
);
2977 IWL_CMD(MANAGEMENT_AUTH
);
2978 IWL_CMD(MANAGEMENT_DEAUTH
);
2979 IWL_CMD(MANAGEMENT_ACTION
);
2986 const char *get_ctrl_string(int cmd
)
2989 IWL_CMD(CONTROL_BACK_REQ
);
2990 IWL_CMD(CONTROL_BACK
);
2991 IWL_CMD(CONTROL_PSPOLL
);
2992 IWL_CMD(CONTROL_RTS
);
2993 IWL_CMD(CONTROL_CTS
);
2994 IWL_CMD(CONTROL_ACK
);
2995 IWL_CMD(CONTROL_CFEND
);
2996 IWL_CMD(CONTROL_CFENDACK
);
3003 void iwl_clear_tx_stats(struct iwl_priv
*priv
)
3005 memset(&priv
->tx_stats
, 0, sizeof(struct traffic_stats
));
3009 void iwl_clear_rx_stats(struct iwl_priv
*priv
)
3011 memset(&priv
->rx_stats
, 0, sizeof(struct traffic_stats
));
3015 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
3016 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
3017 * Use debugFs to display the rx/rx_statistics
3018 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
3019 * information will be recorded, but DATA pkt still will be recorded
3020 * for the reason of iwl_led.c need to control the led blinking based on
3021 * number of tx and rx data.
3024 void iwl_update_stats(struct iwl_priv
*priv
, bool is_tx
, __le16 fc
, u16 len
)
3026 struct traffic_stats
*stats
;
3029 stats
= &priv
->tx_stats
;
3031 stats
= &priv
->rx_stats
;
3033 if (ieee80211_is_mgmt(fc
)) {
3034 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
3035 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
3036 stats
->mgmt
[MANAGEMENT_ASSOC_REQ
]++;
3038 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP
):
3039 stats
->mgmt
[MANAGEMENT_ASSOC_RESP
]++;
3041 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
3042 stats
->mgmt
[MANAGEMENT_REASSOC_REQ
]++;
3044 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP
):
3045 stats
->mgmt
[MANAGEMENT_REASSOC_RESP
]++;
3047 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ
):
3048 stats
->mgmt
[MANAGEMENT_PROBE_REQ
]++;
3050 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP
):
3051 stats
->mgmt
[MANAGEMENT_PROBE_RESP
]++;
3053 case cpu_to_le16(IEEE80211_STYPE_BEACON
):
3054 stats
->mgmt
[MANAGEMENT_BEACON
]++;
3056 case cpu_to_le16(IEEE80211_STYPE_ATIM
):
3057 stats
->mgmt
[MANAGEMENT_ATIM
]++;
3059 case cpu_to_le16(IEEE80211_STYPE_DISASSOC
):
3060 stats
->mgmt
[MANAGEMENT_DISASSOC
]++;
3062 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
3063 stats
->mgmt
[MANAGEMENT_AUTH
]++;
3065 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
3066 stats
->mgmt
[MANAGEMENT_DEAUTH
]++;
3068 case cpu_to_le16(IEEE80211_STYPE_ACTION
):
3069 stats
->mgmt
[MANAGEMENT_ACTION
]++;
3072 } else if (ieee80211_is_ctl(fc
)) {
3073 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
3074 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ
):
3075 stats
->ctrl
[CONTROL_BACK_REQ
]++;
3077 case cpu_to_le16(IEEE80211_STYPE_BACK
):
3078 stats
->ctrl
[CONTROL_BACK
]++;
3080 case cpu_to_le16(IEEE80211_STYPE_PSPOLL
):
3081 stats
->ctrl
[CONTROL_PSPOLL
]++;
3083 case cpu_to_le16(IEEE80211_STYPE_RTS
):
3084 stats
->ctrl
[CONTROL_RTS
]++;
3086 case cpu_to_le16(IEEE80211_STYPE_CTS
):
3087 stats
->ctrl
[CONTROL_CTS
]++;
3089 case cpu_to_le16(IEEE80211_STYPE_ACK
):
3090 stats
->ctrl
[CONTROL_ACK
]++;
3092 case cpu_to_le16(IEEE80211_STYPE_CFEND
):
3093 stats
->ctrl
[CONTROL_CFEND
]++;
3095 case cpu_to_le16(IEEE80211_STYPE_CFENDACK
):
3096 stats
->ctrl
[CONTROL_CFENDACK
]++;
3102 stats
->data_bytes
+= len
;
3105 EXPORT_SYMBOL(iwl_update_stats
);
3110 int iwl_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3112 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
3115 * This function is called when system goes into suspend state
3116 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
3117 * first but since iwl_mac_stop() has no knowledge of who the caller is,
3118 * it will not call apm_ops.stop() to stop the DMA operation.
3119 * Calling apm_ops.stop here to make sure we stop the DMA.
3121 priv
->cfg
->ops
->lib
->apm_ops
.stop(priv
);
3123 pci_save_state(pdev
);
3124 pci_disable_device(pdev
);
3125 pci_set_power_state(pdev
, PCI_D3hot
);
3129 EXPORT_SYMBOL(iwl_pci_suspend
);
3131 int iwl_pci_resume(struct pci_dev
*pdev
)
3133 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
3136 pci_set_power_state(pdev
, PCI_D0
);
3137 ret
= pci_enable_device(pdev
);
3140 pci_restore_state(pdev
);
3141 iwl_enable_interrupts(priv
);
3145 EXPORT_SYMBOL(iwl_pci_resume
);
3147 #endif /* CONFIG_PM */