1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
36 #include "iwl-eeprom.h"
38 #include "iwl-debug.h"
41 #include "iwl-power.h"
43 #include "iwl-helpers.h"
46 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47 MODULE_VERSION(IWLWIFI_VERSION
);
48 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
49 MODULE_LICENSE("GPL");
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
67 static bool bt_coex_active
= true;
68 module_param(bt_coex_active
, bool, S_IRUGO
);
69 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bluetooth co-exist");
71 u32 iwlegacy_debug_level
;
72 EXPORT_SYMBOL(iwlegacy_debug_level
);
74 const u8 iwlegacy_bcast_addr
[ETH_ALEN
] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75 EXPORT_SYMBOL(iwlegacy_bcast_addr
);
78 /* This function both allocates and initializes hw and priv. */
79 struct ieee80211_hw
*iwl_legacy_alloc_all(struct iwl_cfg
*cfg
)
81 struct iwl_priv
*priv
;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw
*hw
;
86 hw
= ieee80211_alloc_hw(sizeof(struct iwl_priv
),
87 cfg
->ops
->ieee80211_ops
);
89 pr_err("%s: Can not allocate network device\n",
100 EXPORT_SYMBOL(iwl_legacy_alloc_all
);
102 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104 static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv
*priv
,
105 struct ieee80211_sta_ht_cap
*ht_info
,
106 enum ieee80211_band band
)
108 u16 max_bit_rate
= 0;
109 u8 rx_chains_num
= priv
->hw_params
.rx_chains_num
;
110 u8 tx_chains_num
= priv
->hw_params
.tx_chains_num
;
113 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
115 ht_info
->ht_supported
= true;
117 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
118 max_bit_rate
= MAX_BIT_RATE_20_MHZ
;
119 if (priv
->hw_params
.ht40_channel
& BIT(band
)) {
120 ht_info
->cap
|= IEEE80211_HT_CAP_SUP_WIDTH_20_40
;
121 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_40
;
122 ht_info
->mcs
.rx_mask
[4] = 0x01;
123 max_bit_rate
= MAX_BIT_RATE_40_MHZ
;
126 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
127 ht_info
->cap
|= IEEE80211_HT_CAP_MAX_AMSDU
;
129 ht_info
->ampdu_factor
= CFG_HT_RX_AMPDU_FACTOR_DEF
;
130 ht_info
->ampdu_density
= CFG_HT_MPDU_DENSITY_DEF
;
132 ht_info
->mcs
.rx_mask
[0] = 0xFF;
133 if (rx_chains_num
>= 2)
134 ht_info
->mcs
.rx_mask
[1] = 0xFF;
135 if (rx_chains_num
>= 3)
136 ht_info
->mcs
.rx_mask
[2] = 0xFF;
138 /* Highest supported Rx data rate */
139 max_bit_rate
*= rx_chains_num
;
140 WARN_ON(max_bit_rate
& ~IEEE80211_HT_MCS_RX_HIGHEST_MASK
);
141 ht_info
->mcs
.rx_highest
= cpu_to_le16(max_bit_rate
);
143 /* Tx MCS capabilities */
144 ht_info
->mcs
.tx_params
= IEEE80211_HT_MCS_TX_DEFINED
;
145 if (tx_chains_num
!= rx_chains_num
) {
146 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
147 ht_info
->mcs
.tx_params
|= ((tx_chains_num
- 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
155 int iwl_legacy_init_geos(struct iwl_priv
*priv
)
157 struct iwl_channel_info
*ch
;
158 struct ieee80211_supported_band
*sband
;
159 struct ieee80211_channel
*channels
;
160 struct ieee80211_channel
*geo_ch
;
161 struct ieee80211_rate
*rates
;
165 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_bitrates
||
166 priv
->bands
[IEEE80211_BAND_5GHZ
].n_bitrates
) {
167 IWL_DEBUG_INFO(priv
, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
172 channels
= kzalloc(sizeof(struct ieee80211_channel
) *
173 priv
->channel_count
, GFP_KERNEL
);
177 rates
= kzalloc((sizeof(struct ieee80211_rate
) * IWL_RATE_COUNT_LEGACY
),
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
186 sband
->channels
= &channels
[ARRAY_SIZE(iwlegacy_eeprom_band_1
)];
188 sband
->bitrates
= &rates
[IWL_FIRST_OFDM_RATE
];
189 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
- IWL_FIRST_OFDM_RATE
;
191 if (priv
->cfg
->sku
& IWL_SKU_N
)
192 iwl_legacy_init_ht_hw_capab(priv
, &sband
->ht_cap
,
193 IEEE80211_BAND_5GHZ
);
195 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
196 sband
->channels
= channels
;
198 sband
->bitrates
= rates
;
199 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
;
201 if (priv
->cfg
->sku
& IWL_SKU_N
)
202 iwl_legacy_init_ht_hw_capab(priv
, &sband
->ht_cap
,
203 IEEE80211_BAND_2GHZ
);
205 priv
->ieee_channels
= channels
;
206 priv
->ieee_rates
= rates
;
208 for (i
= 0; i
< priv
->channel_count
; i
++) {
209 ch
= &priv
->channel_info
[i
];
211 if (!iwl_legacy_is_channel_valid(ch
))
214 sband
= &priv
->bands
[ch
->band
];
216 geo_ch
= &sband
->channels
[sband
->n_channels
++];
218 geo_ch
->center_freq
=
219 ieee80211_channel_to_frequency(ch
->channel
, ch
->band
);
220 geo_ch
->max_power
= ch
->max_power_avg
;
221 geo_ch
->max_antenna_gain
= 0xff;
222 geo_ch
->hw_value
= ch
->channel
;
224 if (iwl_legacy_is_channel_valid(ch
)) {
225 if (!(ch
->flags
& EEPROM_CHANNEL_IBSS
))
226 geo_ch
->flags
|= IEEE80211_CHAN_NO_IBSS
;
228 if (!(ch
->flags
& EEPROM_CHANNEL_ACTIVE
))
229 geo_ch
->flags
|= IEEE80211_CHAN_PASSIVE_SCAN
;
231 if (ch
->flags
& EEPROM_CHANNEL_RADAR
)
232 geo_ch
->flags
|= IEEE80211_CHAN_RADAR
;
234 geo_ch
->flags
|= ch
->ht40_extension_channel
;
236 if (ch
->max_power_avg
> max_tx_power
)
237 max_tx_power
= ch
->max_power_avg
;
239 geo_ch
->flags
|= IEEE80211_CHAN_DISABLED
;
242 IWL_DEBUG_INFO(priv
, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
243 ch
->channel
, geo_ch
->center_freq
,
244 iwl_legacy_is_channel_a_band(ch
) ? "5.2" : "2.4",
245 geo_ch
->flags
& IEEE80211_CHAN_DISABLED
?
246 "restricted" : "valid",
250 priv
->tx_power_device_lmt
= max_tx_power
;
251 priv
->tx_power_user_lmt
= max_tx_power
;
252 priv
->tx_power_next
= max_tx_power
;
254 if ((priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
== 0) &&
255 priv
->cfg
->sku
& IWL_SKU_A
) {
256 IWL_INFO(priv
, "Incorrectly detected BG card as ABG. "
257 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
258 priv
->pci_dev
->device
,
259 priv
->pci_dev
->subsystem_device
);
260 priv
->cfg
->sku
&= ~IWL_SKU_A
;
263 IWL_INFO(priv
, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
264 priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
,
265 priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
);
267 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
271 EXPORT_SYMBOL(iwl_legacy_init_geos
);
274 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
276 void iwl_legacy_free_geos(struct iwl_priv
*priv
)
278 kfree(priv
->ieee_channels
);
279 kfree(priv
->ieee_rates
);
280 clear_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
282 EXPORT_SYMBOL(iwl_legacy_free_geos
);
284 static bool iwl_legacy_is_channel_extension(struct iwl_priv
*priv
,
285 enum ieee80211_band band
,
286 u16 channel
, u8 extension_chan_offset
)
288 const struct iwl_channel_info
*ch_info
;
290 ch_info
= iwl_legacy_get_channel_info(priv
, band
, channel
);
291 if (!iwl_legacy_is_channel_valid(ch_info
))
294 if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_ABOVE
)
295 return !(ch_info
->ht40_extension_channel
&
296 IEEE80211_CHAN_NO_HT40PLUS
);
297 else if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_BELOW
)
298 return !(ch_info
->ht40_extension_channel
&
299 IEEE80211_CHAN_NO_HT40MINUS
);
304 bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv
*priv
,
305 struct iwl_rxon_context
*ctx
,
306 struct ieee80211_sta_ht_cap
*ht_cap
)
308 if (!ctx
->ht
.enabled
|| !ctx
->ht
.is_40mhz
)
312 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
313 * the bit will not set if it is pure 40MHz case
315 if (ht_cap
&& !ht_cap
->ht_supported
)
318 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
319 if (priv
->disable_ht40
)
323 return iwl_legacy_is_channel_extension(priv
, priv
->band
,
324 le16_to_cpu(ctx
->staging
.channel
),
325 ctx
->ht
.extension_chan_offset
);
327 EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed
);
329 static u16
iwl_legacy_adjust_beacon_interval(u16 beacon_val
, u16 max_beacon_val
)
335 * If mac80211 hasn't given us a beacon interval, program
336 * the default into the device.
339 return DEFAULT_BEACON_INTERVAL
;
342 * If the beacon interval we obtained from the peer
343 * is too large, we'll have to wake up more often
344 * (and in IBSS case, we'll beacon too much)
346 * For example, if max_beacon_val is 4096, and the
347 * requested beacon interval is 7000, we'll have to
348 * use 3500 to be able to wake up on the beacons.
350 * This could badly influence beacon detection stats.
353 beacon_factor
= (beacon_val
+ max_beacon_val
) / max_beacon_val
;
354 new_val
= beacon_val
/ beacon_factor
;
357 new_val
= max_beacon_val
;
363 iwl_legacy_send_rxon_timing(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
366 s32 interval_tm
, rem
;
367 struct ieee80211_conf
*conf
= NULL
;
369 struct ieee80211_vif
*vif
= ctx
->vif
;
371 conf
= iwl_legacy_ieee80211_get_hw_conf(priv
->hw
);
373 lockdep_assert_held(&priv
->mutex
);
375 memset(&ctx
->timing
, 0, sizeof(struct iwl_rxon_time_cmd
));
377 ctx
->timing
.timestamp
= cpu_to_le64(priv
->timestamp
);
378 ctx
->timing
.listen_interval
= cpu_to_le16(conf
->listen_interval
);
380 beacon_int
= vif
? vif
->bss_conf
.beacon_int
: 0;
383 * TODO: For IBSS we need to get atim_window from mac80211,
384 * for now just always use 0
386 ctx
->timing
.atim_window
= 0;
388 beacon_int
= iwl_legacy_adjust_beacon_interval(beacon_int
,
389 priv
->hw_params
.max_beacon_itrvl
* TIME_UNIT
);
390 ctx
->timing
.beacon_interval
= cpu_to_le16(beacon_int
);
392 tsf
= priv
->timestamp
; /* tsf is modifed by do_div: copy it */
393 interval_tm
= beacon_int
* TIME_UNIT
;
394 rem
= do_div(tsf
, interval_tm
);
395 ctx
->timing
.beacon_init_val
= cpu_to_le32(interval_tm
- rem
);
397 ctx
->timing
.dtim_period
= vif
? (vif
->bss_conf
.dtim_period
?: 1) : 1;
399 IWL_DEBUG_ASSOC(priv
,
400 "beacon interval %d beacon timer %d beacon tim %d\n",
401 le16_to_cpu(ctx
->timing
.beacon_interval
),
402 le32_to_cpu(ctx
->timing
.beacon_init_val
),
403 le16_to_cpu(ctx
->timing
.atim_window
));
405 return iwl_legacy_send_cmd_pdu(priv
, ctx
->rxon_timing_cmd
,
406 sizeof(ctx
->timing
), &ctx
->timing
);
408 EXPORT_SYMBOL(iwl_legacy_send_rxon_timing
);
411 iwl_legacy_set_rxon_hwcrypto(struct iwl_priv
*priv
,
412 struct iwl_rxon_context
*ctx
,
415 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
418 rxon
->filter_flags
&= ~RXON_FILTER_DIS_DECRYPT_MSK
;
420 rxon
->filter_flags
|= RXON_FILTER_DIS_DECRYPT_MSK
;
423 EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto
);
425 /* validate RXON structure is valid */
427 iwl_legacy_check_rxon_cmd(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
429 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
432 if (rxon
->flags
& RXON_FLG_BAND_24G_MSK
) {
433 if (rxon
->flags
& RXON_FLG_TGJ_NARROW_BAND_MSK
) {
434 IWL_WARN(priv
, "check 2.4G: wrong narrow\n");
437 if (rxon
->flags
& RXON_FLG_RADAR_DETECT_MSK
) {
438 IWL_WARN(priv
, "check 2.4G: wrong radar\n");
442 if (!(rxon
->flags
& RXON_FLG_SHORT_SLOT_MSK
)) {
443 IWL_WARN(priv
, "check 5.2G: not short slot!\n");
446 if (rxon
->flags
& RXON_FLG_CCK_MSK
) {
447 IWL_WARN(priv
, "check 5.2G: CCK!\n");
451 if ((rxon
->node_addr
[0] | rxon
->bssid_addr
[0]) & 0x1) {
452 IWL_WARN(priv
, "mac/bssid mcast!\n");
456 /* make sure basic rates 6Mbps and 1Mbps are supported */
457 if ((rxon
->ofdm_basic_rates
& IWL_RATE_6M_MASK
) == 0 &&
458 (rxon
->cck_basic_rates
& IWL_RATE_1M_MASK
) == 0) {
459 IWL_WARN(priv
, "neither 1 nor 6 are basic\n");
463 if (le16_to_cpu(rxon
->assoc_id
) > 2007) {
464 IWL_WARN(priv
, "aid > 2007\n");
468 if ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
))
469 == (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
)) {
470 IWL_WARN(priv
, "CCK and short slot\n");
474 if ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
))
475 == (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
)) {
476 IWL_WARN(priv
, "CCK and auto detect");
480 if ((rxon
->flags
& (RXON_FLG_AUTO_DETECT_MSK
|
481 RXON_FLG_TGG_PROTECT_MSK
)) ==
482 RXON_FLG_TGG_PROTECT_MSK
) {
483 IWL_WARN(priv
, "TGg but no auto-detect\n");
488 IWL_WARN(priv
, "Tuning to channel %d\n",
489 le16_to_cpu(rxon
->channel
));
492 IWL_ERR(priv
, "Invalid RXON\n");
497 EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd
);
500 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
501 * @priv: staging_rxon is compared to active_rxon
503 * If the RXON structure is changing enough to require a new tune,
504 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
505 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
507 int iwl_legacy_full_rxon_required(struct iwl_priv
*priv
,
508 struct iwl_rxon_context
*ctx
)
510 const struct iwl_legacy_rxon_cmd
*staging
= &ctx
->staging
;
511 const struct iwl_legacy_rxon_cmd
*active
= &ctx
->active
;
515 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
519 #define CHK_NEQ(c1, c2) \
520 if ((c1) != (c2)) { \
521 IWL_DEBUG_INFO(priv, "need full RXON - " \
522 #c1 " != " #c2 " - %d != %d\n", \
527 /* These items are only settable from the full RXON command */
528 CHK(!iwl_legacy_is_associated_ctx(ctx
));
529 CHK(compare_ether_addr(staging
->bssid_addr
, active
->bssid_addr
));
530 CHK(compare_ether_addr(staging
->node_addr
, active
->node_addr
));
531 CHK(compare_ether_addr(staging
->wlap_bssid_addr
,
532 active
->wlap_bssid_addr
));
533 CHK_NEQ(staging
->dev_type
, active
->dev_type
);
534 CHK_NEQ(staging
->channel
, active
->channel
);
535 CHK_NEQ(staging
->air_propagation
, active
->air_propagation
);
536 CHK_NEQ(staging
->ofdm_ht_single_stream_basic_rates
,
537 active
->ofdm_ht_single_stream_basic_rates
);
538 CHK_NEQ(staging
->ofdm_ht_dual_stream_basic_rates
,
539 active
->ofdm_ht_dual_stream_basic_rates
);
540 CHK_NEQ(staging
->assoc_id
, active
->assoc_id
);
542 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
543 * be updated with the RXON_ASSOC command -- however only some
544 * flag transitions are allowed using RXON_ASSOC */
546 /* Check if we are not switching bands */
547 CHK_NEQ(staging
->flags
& RXON_FLG_BAND_24G_MSK
,
548 active
->flags
& RXON_FLG_BAND_24G_MSK
);
550 /* Check if we are switching association toggle */
551 CHK_NEQ(staging
->filter_flags
& RXON_FILTER_ASSOC_MSK
,
552 active
->filter_flags
& RXON_FILTER_ASSOC_MSK
);
559 EXPORT_SYMBOL(iwl_legacy_full_rxon_required
);
561 u8
iwl_legacy_get_lowest_plcp(struct iwl_priv
*priv
,
562 struct iwl_rxon_context
*ctx
)
565 * Assign the lowest rate -- should really get this from
566 * the beacon skb from mac80211.
568 if (ctx
->staging
.flags
& RXON_FLG_BAND_24G_MSK
)
569 return IWL_RATE_1M_PLCP
;
571 return IWL_RATE_6M_PLCP
;
573 EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp
);
575 static void _iwl_legacy_set_rxon_ht(struct iwl_priv
*priv
,
576 struct iwl_ht_config
*ht_conf
,
577 struct iwl_rxon_context
*ctx
)
579 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
581 if (!ctx
->ht
.enabled
) {
582 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
583 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
|
584 RXON_FLG_HT40_PROT_MSK
|
585 RXON_FLG_HT_PROT_MSK
);
589 rxon
->flags
|= cpu_to_le32(ctx
->ht
.protection
<<
590 RXON_FLG_HT_OPERATING_MODE_POS
);
592 /* Set up channel bandwidth:
593 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
594 /* clear the HT channel mode before set the mode */
595 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
596 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
597 if (iwl_legacy_is_ht40_tx_allowed(priv
, ctx
, NULL
)) {
599 if (ctx
->ht
.protection
==
600 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
) {
601 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_PURE_40
;
602 /* Note: control channel is opposite of extension channel */
603 switch (ctx
->ht
.extension_chan_offset
) {
604 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
606 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
608 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
614 /* Note: control channel is opposite of extension channel */
615 switch (ctx
->ht
.extension_chan_offset
) {
616 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
618 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
619 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
621 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
623 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
624 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
626 case IEEE80211_HT_PARAM_CHA_SEC_NONE
:
628 /* channel location only valid if in Mixed mode */
630 "invalid extension channel offset\n");
635 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_LEGACY
;
638 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
639 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
641 IWL_DEBUG_ASSOC(priv
, "rxon flags 0x%X operation mode :0x%X "
642 "extension channel offset 0x%x\n",
643 le32_to_cpu(rxon
->flags
), ctx
->ht
.protection
,
644 ctx
->ht
.extension_chan_offset
);
647 void iwl_legacy_set_rxon_ht(struct iwl_priv
*priv
, struct iwl_ht_config
*ht_conf
)
649 struct iwl_rxon_context
*ctx
;
651 for_each_context(priv
, ctx
)
652 _iwl_legacy_set_rxon_ht(priv
, ht_conf
, ctx
);
654 EXPORT_SYMBOL(iwl_legacy_set_rxon_ht
);
656 /* Return valid, unused, channel for a passive scan to reset the RF */
657 u8
iwl_legacy_get_single_channel_number(struct iwl_priv
*priv
,
658 enum ieee80211_band band
)
660 const struct iwl_channel_info
*ch_info
;
664 struct iwl_rxon_context
*ctx
;
666 if (band
== IEEE80211_BAND_5GHZ
) {
668 max
= priv
->channel_count
;
674 for (i
= min
; i
< max
; i
++) {
677 for_each_context(priv
, ctx
) {
678 busy
= priv
->channel_info
[i
].channel
==
679 le16_to_cpu(ctx
->staging
.channel
);
687 channel
= priv
->channel_info
[i
].channel
;
688 ch_info
= iwl_legacy_get_channel_info(priv
, band
, channel
);
689 if (iwl_legacy_is_channel_valid(ch_info
))
695 EXPORT_SYMBOL(iwl_legacy_get_single_channel_number
);
698 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
699 * @ch: requested channel as a pointer to struct ieee80211_channel
701 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
702 * in the staging RXON flag structure based on the ch->band
705 iwl_legacy_set_rxon_channel(struct iwl_priv
*priv
, struct ieee80211_channel
*ch
,
706 struct iwl_rxon_context
*ctx
)
708 enum ieee80211_band band
= ch
->band
;
709 u16 channel
= ch
->hw_value
;
711 if ((le16_to_cpu(ctx
->staging
.channel
) == channel
) &&
712 (priv
->band
== band
))
715 ctx
->staging
.channel
= cpu_to_le16(channel
);
716 if (band
== IEEE80211_BAND_5GHZ
)
717 ctx
->staging
.flags
&= ~RXON_FLG_BAND_24G_MSK
;
719 ctx
->staging
.flags
|= RXON_FLG_BAND_24G_MSK
;
723 IWL_DEBUG_INFO(priv
, "Staging channel set to %d [%d]\n", channel
, band
);
727 EXPORT_SYMBOL(iwl_legacy_set_rxon_channel
);
729 void iwl_legacy_set_flags_for_band(struct iwl_priv
*priv
,
730 struct iwl_rxon_context
*ctx
,
731 enum ieee80211_band band
,
732 struct ieee80211_vif
*vif
)
734 if (band
== IEEE80211_BAND_5GHZ
) {
735 ctx
->staging
.flags
&=
736 ~(RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
738 ctx
->staging
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
740 /* Copied from iwl_post_associate() */
741 if (vif
&& vif
->bss_conf
.use_short_slot
)
742 ctx
->staging
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
744 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
746 ctx
->staging
.flags
|= RXON_FLG_BAND_24G_MSK
;
747 ctx
->staging
.flags
|= RXON_FLG_AUTO_DETECT_MSK
;
748 ctx
->staging
.flags
&= ~RXON_FLG_CCK_MSK
;
751 EXPORT_SYMBOL(iwl_legacy_set_flags_for_band
);
754 * initialize rxon structure with default values from eeprom
756 void iwl_legacy_connection_init_rx_config(struct iwl_priv
*priv
,
757 struct iwl_rxon_context
*ctx
)
759 const struct iwl_channel_info
*ch_info
;
761 memset(&ctx
->staging
, 0, sizeof(ctx
->staging
));
764 ctx
->staging
.dev_type
= ctx
->unused_devtype
;
766 switch (ctx
->vif
->type
) {
768 case NL80211_IFTYPE_STATION
:
769 ctx
->staging
.dev_type
= ctx
->station_devtype
;
770 ctx
->staging
.filter_flags
= RXON_FILTER_ACCEPT_GRP_MSK
;
773 case NL80211_IFTYPE_ADHOC
:
774 ctx
->staging
.dev_type
= ctx
->ibss_devtype
;
775 ctx
->staging
.flags
= RXON_FLG_SHORT_PREAMBLE_MSK
;
776 ctx
->staging
.filter_flags
= RXON_FILTER_BCON_AWARE_MSK
|
777 RXON_FILTER_ACCEPT_GRP_MSK
;
781 IWL_ERR(priv
, "Unsupported interface type %d\n",
787 /* TODO: Figure out when short_preamble would be set and cache from
789 if (!hw_to_local(priv
->hw
)->short_preamble
)
790 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
792 ctx
->staging
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
795 ch_info
= iwl_legacy_get_channel_info(priv
, priv
->band
,
796 le16_to_cpu(ctx
->active
.channel
));
799 ch_info
= &priv
->channel_info
[0];
801 ctx
->staging
.channel
= cpu_to_le16(ch_info
->channel
);
802 priv
->band
= ch_info
->band
;
804 iwl_legacy_set_flags_for_band(priv
, ctx
, priv
->band
, ctx
->vif
);
806 ctx
->staging
.ofdm_basic_rates
=
807 (IWL_OFDM_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
808 ctx
->staging
.cck_basic_rates
=
809 (IWL_CCK_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
811 /* clear both MIX and PURE40 mode flag */
812 ctx
->staging
.flags
&= ~(RXON_FLG_CHANNEL_MODE_MIXED
|
813 RXON_FLG_CHANNEL_MODE_PURE_40
);
815 memcpy(ctx
->staging
.node_addr
, ctx
->vif
->addr
, ETH_ALEN
);
817 ctx
->staging
.ofdm_ht_single_stream_basic_rates
= 0xff;
818 ctx
->staging
.ofdm_ht_dual_stream_basic_rates
= 0xff;
820 EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config
);
822 void iwl_legacy_set_rate(struct iwl_priv
*priv
)
824 const struct ieee80211_supported_band
*hw
= NULL
;
825 struct ieee80211_rate
*rate
;
826 struct iwl_rxon_context
*ctx
;
829 hw
= iwl_get_hw_mode(priv
, priv
->band
);
831 IWL_ERR(priv
, "Failed to set rate: unable to get hw mode\n");
835 priv
->active_rate
= 0;
837 for (i
= 0; i
< hw
->n_bitrates
; i
++) {
838 rate
= &(hw
->bitrates
[i
]);
839 if (rate
->hw_value
< IWL_RATE_COUNT_LEGACY
)
840 priv
->active_rate
|= (1 << rate
->hw_value
);
843 IWL_DEBUG_RATE(priv
, "Set active_rate = %0x\n", priv
->active_rate
);
845 for_each_context(priv
, ctx
) {
846 ctx
->staging
.cck_basic_rates
=
847 (IWL_CCK_BASIC_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
849 ctx
->staging
.ofdm_basic_rates
=
850 (IWL_OFDM_BASIC_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
853 EXPORT_SYMBOL(iwl_legacy_set_rate
);
855 void iwl_legacy_chswitch_done(struct iwl_priv
*priv
, bool is_success
)
857 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
859 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
862 if (priv
->switch_rxon
.switch_in_progress
) {
863 ieee80211_chswitch_done(ctx
->vif
, is_success
);
864 mutex_lock(&priv
->mutex
);
865 priv
->switch_rxon
.switch_in_progress
= false;
866 mutex_unlock(&priv
->mutex
);
869 EXPORT_SYMBOL(iwl_legacy_chswitch_done
);
871 void iwl_legacy_rx_csa(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
873 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
874 struct iwl_csa_notification
*csa
= &(pkt
->u
.csa_notif
);
876 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
877 struct iwl_legacy_rxon_cmd
*rxon
= (void *)&ctx
->active
;
879 if (priv
->switch_rxon
.switch_in_progress
) {
880 if (!le32_to_cpu(csa
->status
) &&
881 (csa
->channel
== priv
->switch_rxon
.channel
)) {
882 rxon
->channel
= csa
->channel
;
883 ctx
->staging
.channel
= csa
->channel
;
884 IWL_DEBUG_11H(priv
, "CSA notif: channel %d\n",
885 le16_to_cpu(csa
->channel
));
886 iwl_legacy_chswitch_done(priv
, true);
888 IWL_ERR(priv
, "CSA notif (fail) : channel %d\n",
889 le16_to_cpu(csa
->channel
));
890 iwl_legacy_chswitch_done(priv
, false);
894 EXPORT_SYMBOL(iwl_legacy_rx_csa
);
896 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
897 void iwl_legacy_print_rx_config_cmd(struct iwl_priv
*priv
,
898 struct iwl_rxon_context
*ctx
)
900 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
902 IWL_DEBUG_RADIO(priv
, "RX CONFIG:\n");
903 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, (u8
*) rxon
, sizeof(*rxon
));
904 IWL_DEBUG_RADIO(priv
, "u16 channel: 0x%x\n",
905 le16_to_cpu(rxon
->channel
));
906 IWL_DEBUG_RADIO(priv
, "u32 flags: 0x%08X\n", le32_to_cpu(rxon
->flags
));
907 IWL_DEBUG_RADIO(priv
, "u32 filter_flags: 0x%08x\n",
908 le32_to_cpu(rxon
->filter_flags
));
909 IWL_DEBUG_RADIO(priv
, "u8 dev_type: 0x%x\n", rxon
->dev_type
);
910 IWL_DEBUG_RADIO(priv
, "u8 ofdm_basic_rates: 0x%02x\n",
911 rxon
->ofdm_basic_rates
);
912 IWL_DEBUG_RADIO(priv
, "u8 cck_basic_rates: 0x%02x\n",
913 rxon
->cck_basic_rates
);
914 IWL_DEBUG_RADIO(priv
, "u8[6] node_addr: %pM\n", rxon
->node_addr
);
915 IWL_DEBUG_RADIO(priv
, "u8[6] bssid_addr: %pM\n", rxon
->bssid_addr
);
916 IWL_DEBUG_RADIO(priv
, "u16 assoc_id: 0x%x\n",
917 le16_to_cpu(rxon
->assoc_id
));
919 EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd
);
922 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
924 void iwl_legacy_irq_handle_error(struct iwl_priv
*priv
)
926 /* Set the FW error flag -- cleared on iwl_down */
927 set_bit(STATUS_FW_ERROR
, &priv
->status
);
929 /* Cancel currently queued command. */
930 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
932 IWL_ERR(priv
, "Loaded firmware version: %s\n",
933 priv
->hw
->wiphy
->fw_version
);
935 priv
->cfg
->ops
->lib
->dump_nic_error_log(priv
);
936 if (priv
->cfg
->ops
->lib
->dump_fh
)
937 priv
->cfg
->ops
->lib
->dump_fh(priv
, NULL
, false);
938 priv
->cfg
->ops
->lib
->dump_nic_event_log(priv
, false, NULL
, false);
939 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
940 if (iwl_legacy_get_debug_level(priv
) & IWL_DL_FW_ERRORS
)
941 iwl_legacy_print_rx_config_cmd(priv
,
942 &priv
->contexts
[IWL_RXON_CTX_BSS
]);
945 wake_up_interruptible(&priv
->wait_command_queue
);
947 /* Keep the restart process from trying to send host
948 * commands by clearing the INIT status bit */
949 clear_bit(STATUS_READY
, &priv
->status
);
951 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
952 IWL_DEBUG(priv
, IWL_DL_FW_ERRORS
,
953 "Restarting adapter due to uCode error.\n");
955 if (priv
->cfg
->mod_params
->restart_fw
)
956 queue_work(priv
->workqueue
, &priv
->restart
);
959 EXPORT_SYMBOL(iwl_legacy_irq_handle_error
);
961 static int iwl_legacy_apm_stop_master(struct iwl_priv
*priv
)
965 /* stop device's busmaster DMA activity */
966 iwl_legacy_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
968 ret
= iwl_poll_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_MASTER_DISABLED
,
969 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
971 IWL_WARN(priv
, "Master Disable Timed Out, 100 usec\n");
973 IWL_DEBUG_INFO(priv
, "stop master\n");
978 void iwl_legacy_apm_stop(struct iwl_priv
*priv
)
980 IWL_DEBUG_INFO(priv
, "Stop card, put in low power state\n");
982 /* Stop device's DMA activity */
983 iwl_legacy_apm_stop_master(priv
);
985 /* Reset the entire device */
986 iwl_legacy_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
991 * Clear "initialization complete" bit to move adapter from
992 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
994 iwl_legacy_clear_bit(priv
, CSR_GP_CNTRL
,
995 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
997 EXPORT_SYMBOL(iwl_legacy_apm_stop
);
1001 * Start up NIC's basic functionality after it has been reset
1002 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
1003 * NOTE: This does not load uCode nor start the embedded processor
1005 int iwl_legacy_apm_init(struct iwl_priv
*priv
)
1010 IWL_DEBUG_INFO(priv
, "Init card's basic functions\n");
1013 * Use "set_bit" below rather than "write", to preserve any hardware
1014 * bits already set by default after reset.
1017 /* Disable L0S exit timer (platform NMI Work/Around) */
1018 iwl_legacy_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1019 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
1022 * Disable L0s without affecting L1;
1023 * don't wait for ICH L0s (ICH bug W/A)
1025 iwl_legacy_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1026 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
1028 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1029 iwl_legacy_set_bit(priv
, CSR_DBG_HPET_MEM_REG
,
1030 CSR_DBG_HPET_MEM_REG_VAL
);
1033 * Enable HAP INTA (interrupt from management bus) to
1034 * wake device's PCI Express link L1a -> L0s
1035 * NOTE: This is no-op for 3945 (non-existent bit)
1037 iwl_legacy_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
1038 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
1041 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1042 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1043 * If so (likely), disable L0S, so device moves directly L0->L1;
1044 * costs negligible amount of power savings.
1045 * If not (unlikely), enable L0S, so there is at least some
1046 * power savings, even without L1.
1048 if (priv
->cfg
->base_params
->set_l0s
) {
1049 lctl
= iwl_legacy_pcie_link_ctl(priv
);
1050 if ((lctl
& PCI_CFG_LINK_CTRL_VAL_L1_EN
) ==
1051 PCI_CFG_LINK_CTRL_VAL_L1_EN
) {
1052 /* L1-ASPM enabled; disable(!) L0S */
1053 iwl_legacy_set_bit(priv
, CSR_GIO_REG
,
1054 CSR_GIO_REG_VAL_L0S_ENABLED
);
1055 IWL_DEBUG_POWER(priv
, "L1 Enabled; Disabling L0S\n");
1057 /* L1-ASPM disabled; enable(!) L0S */
1058 iwl_legacy_clear_bit(priv
, CSR_GIO_REG
,
1059 CSR_GIO_REG_VAL_L0S_ENABLED
);
1060 IWL_DEBUG_POWER(priv
, "L1 Disabled; Enabling L0S\n");
1064 /* Configure analog phase-lock-loop before activating to D0A */
1065 if (priv
->cfg
->base_params
->pll_cfg_val
)
1066 iwl_legacy_set_bit(priv
, CSR_ANA_PLL_CFG
,
1067 priv
->cfg
->base_params
->pll_cfg_val
);
1070 * Set "initialization complete" bit to move adapter from
1071 * D0U* --> D0A* (powered-up active) state.
1073 iwl_legacy_set_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1076 * Wait for clock stabilization; once stabilized, access to
1077 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1078 * and accesses to uCode SRAM.
1080 ret
= iwl_poll_bit(priv
, CSR_GP_CNTRL
,
1081 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1082 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
1084 IWL_DEBUG_INFO(priv
, "Failed to init the card\n");
1089 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1090 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1092 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1093 * do not disable clocks. This preserves any hardware bits already
1094 * set by default in "CLK_CTRL_REG" after reset.
1096 if (priv
->cfg
->base_params
->use_bsm
)
1097 iwl_legacy_write_prph(priv
, APMG_CLK_EN_REG
,
1098 APMG_CLK_VAL_DMA_CLK_RQT
| APMG_CLK_VAL_BSM_CLK_RQT
);
1100 iwl_legacy_write_prph(priv
, APMG_CLK_EN_REG
,
1101 APMG_CLK_VAL_DMA_CLK_RQT
);
1104 /* Disable L1-Active */
1105 iwl_legacy_set_bits_prph(priv
, APMG_PCIDEV_STT_REG
,
1106 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
1111 EXPORT_SYMBOL(iwl_legacy_apm_init
);
1114 int iwl_legacy_set_tx_power(struct iwl_priv
*priv
, s8 tx_power
, bool force
)
1119 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1121 lockdep_assert_held(&priv
->mutex
);
1123 if (priv
->tx_power_user_lmt
== tx_power
&& !force
)
1126 if (!priv
->cfg
->ops
->lib
->send_tx_power
)
1129 /* 0 dBm mean 1 milliwatt */
1132 "Requested user TXPOWER %d below 1 mW.\n",
1137 if (tx_power
> priv
->tx_power_device_lmt
) {
1139 "Requested user TXPOWER %d above upper limit %d.\n",
1140 tx_power
, priv
->tx_power_device_lmt
);
1144 if (!iwl_legacy_is_ready_rf(priv
))
1147 /* scan complete and commit_rxon use tx_power_next value,
1148 * it always need to be updated for newest request */
1149 priv
->tx_power_next
= tx_power
;
1151 /* do not set tx power when scanning or channel changing */
1152 defer
= test_bit(STATUS_SCANNING
, &priv
->status
) ||
1153 memcmp(&ctx
->active
, &ctx
->staging
, sizeof(ctx
->staging
));
1154 if (defer
&& !force
) {
1155 IWL_DEBUG_INFO(priv
, "Deferring tx power set\n");
1159 prev_tx_power
= priv
->tx_power_user_lmt
;
1160 priv
->tx_power_user_lmt
= tx_power
;
1162 ret
= priv
->cfg
->ops
->lib
->send_tx_power(priv
);
1164 /* if fail to set tx_power, restore the orig. tx power */
1166 priv
->tx_power_user_lmt
= prev_tx_power
;
1167 priv
->tx_power_next
= prev_tx_power
;
1171 EXPORT_SYMBOL(iwl_legacy_set_tx_power
);
1173 void iwl_legacy_send_bt_config(struct iwl_priv
*priv
)
1175 struct iwl_bt_cmd bt_cmd
= {
1176 .lead_time
= BT_LEAD_TIME_DEF
,
1177 .max_kill
= BT_MAX_KILL_DEF
,
1182 if (!bt_coex_active
)
1183 bt_cmd
.flags
= BT_COEX_DISABLE
;
1185 bt_cmd
.flags
= BT_COEX_ENABLE
;
1187 IWL_DEBUG_INFO(priv
, "BT coex %s\n",
1188 (bt_cmd
.flags
== BT_COEX_DISABLE
) ? "disable" : "active");
1190 if (iwl_legacy_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1191 sizeof(struct iwl_bt_cmd
), &bt_cmd
))
1192 IWL_ERR(priv
, "failed to send BT Coex Config\n");
1194 EXPORT_SYMBOL(iwl_legacy_send_bt_config
);
1196 int iwl_legacy_send_statistics_request(struct iwl_priv
*priv
, u8 flags
, bool clear
)
1198 struct iwl_statistics_cmd statistics_cmd
= {
1199 .configuration_flags
=
1200 clear
? IWL_STATS_CONF_CLEAR_STATS
: 0,
1203 if (flags
& CMD_ASYNC
)
1204 return iwl_legacy_send_cmd_pdu_async(priv
, REPLY_STATISTICS_CMD
,
1205 sizeof(struct iwl_statistics_cmd
),
1206 &statistics_cmd
, NULL
);
1208 return iwl_legacy_send_cmd_pdu(priv
, REPLY_STATISTICS_CMD
,
1209 sizeof(struct iwl_statistics_cmd
),
1212 EXPORT_SYMBOL(iwl_legacy_send_statistics_request
);
1214 void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv
*priv
,
1215 struct iwl_rx_mem_buffer
*rxb
)
1217 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1218 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1219 struct iwl_sleep_notification
*sleep
= &(pkt
->u
.sleep_notif
);
1220 IWL_DEBUG_RX(priv
, "sleep mode: %d, src: %d\n",
1221 sleep
->pm_sleep_mode
, sleep
->pm_wakeup_src
);
1224 EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif
);
1226 void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv
*priv
,
1227 struct iwl_rx_mem_buffer
*rxb
)
1229 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1230 u32 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
1231 IWL_DEBUG_RADIO(priv
, "Dumping %d bytes of unhandled "
1232 "notification for %s:\n", len
,
1233 iwl_legacy_get_cmd_string(pkt
->hdr
.cmd
));
1234 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, pkt
->u
.raw
, len
);
1236 EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif
);
1238 void iwl_legacy_rx_reply_error(struct iwl_priv
*priv
,
1239 struct iwl_rx_mem_buffer
*rxb
)
1241 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1243 IWL_ERR(priv
, "Error Reply type 0x%08X cmd %s (0x%02X) "
1244 "seq 0x%04X ser 0x%08X\n",
1245 le32_to_cpu(pkt
->u
.err_resp
.error_type
),
1246 iwl_legacy_get_cmd_string(pkt
->u
.err_resp
.cmd_id
),
1247 pkt
->u
.err_resp
.cmd_id
,
1248 le16_to_cpu(pkt
->u
.err_resp
.bad_cmd_seq_num
),
1249 le32_to_cpu(pkt
->u
.err_resp
.error_info
));
1251 EXPORT_SYMBOL(iwl_legacy_rx_reply_error
);
1253 void iwl_legacy_clear_isr_stats(struct iwl_priv
*priv
)
1255 memset(&priv
->isr_stats
, 0, sizeof(priv
->isr_stats
));
1258 int iwl_legacy_mac_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
1259 const struct ieee80211_tx_queue_params
*params
)
1261 struct iwl_priv
*priv
= hw
->priv
;
1262 struct iwl_rxon_context
*ctx
;
1263 unsigned long flags
;
1266 IWL_DEBUG_MAC80211(priv
, "enter\n");
1268 if (!iwl_legacy_is_ready_rf(priv
)) {
1269 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
1273 if (queue
>= AC_NUM
) {
1274 IWL_DEBUG_MAC80211(priv
, "leave - queue >= AC_NUM %d\n", queue
);
1278 q
= AC_NUM
- 1 - queue
;
1280 spin_lock_irqsave(&priv
->lock
, flags
);
1282 for_each_context(priv
, ctx
) {
1283 ctx
->qos_data
.def_qos_parm
.ac
[q
].cw_min
=
1284 cpu_to_le16(params
->cw_min
);
1285 ctx
->qos_data
.def_qos_parm
.ac
[q
].cw_max
=
1286 cpu_to_le16(params
->cw_max
);
1287 ctx
->qos_data
.def_qos_parm
.ac
[q
].aifsn
= params
->aifs
;
1288 ctx
->qos_data
.def_qos_parm
.ac
[q
].edca_txop
=
1289 cpu_to_le16((params
->txop
* 32));
1291 ctx
->qos_data
.def_qos_parm
.ac
[q
].reserved1
= 0;
1294 spin_unlock_irqrestore(&priv
->lock
, flags
);
1296 IWL_DEBUG_MAC80211(priv
, "leave\n");
1299 EXPORT_SYMBOL(iwl_legacy_mac_conf_tx
);
1301 int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw
*hw
)
1303 struct iwl_priv
*priv
= hw
->priv
;
1305 return priv
->ibss_manager
== IWL_IBSS_MANAGER
;
1307 EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon
);
1310 iwl_legacy_set_mode(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
1312 iwl_legacy_connection_init_rx_config(priv
, ctx
);
1314 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
1315 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
1317 return iwl_legacy_commit_rxon(priv
, ctx
);
1320 static int iwl_legacy_setup_interface(struct iwl_priv
*priv
,
1321 struct iwl_rxon_context
*ctx
)
1323 struct ieee80211_vif
*vif
= ctx
->vif
;
1326 lockdep_assert_held(&priv
->mutex
);
1329 * This variable will be correct only when there's just
1330 * a single context, but all code using it is for hardware
1331 * that supports only one context.
1333 priv
->iw_mode
= vif
->type
;
1335 ctx
->is_active
= true;
1337 err
= iwl_legacy_set_mode(priv
, ctx
);
1339 if (!ctx
->always_active
)
1340 ctx
->is_active
= false;
1348 iwl_legacy_mac_add_interface(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
)
1350 struct iwl_priv
*priv
= hw
->priv
;
1351 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
1352 struct iwl_rxon_context
*tmp
, *ctx
= NULL
;
1355 IWL_DEBUG_MAC80211(priv
, "enter: type %d, addr %pM\n",
1356 vif
->type
, vif
->addr
);
1358 mutex_lock(&priv
->mutex
);
1360 if (!iwl_legacy_is_ready_rf(priv
)) {
1361 IWL_WARN(priv
, "Try to add interface when device not ready\n");
1366 for_each_context(priv
, tmp
) {
1367 u32 possible_modes
=
1368 tmp
->interface_modes
| tmp
->exclusive_interface_modes
;
1371 /* check if this busy context is exclusive */
1372 if (tmp
->exclusive_interface_modes
&
1373 BIT(tmp
->vif
->type
)) {
1380 if (!(possible_modes
& BIT(vif
->type
)))
1383 /* have maybe usable context w/o interface */
1393 vif_priv
->ctx
= ctx
;
1396 err
= iwl_legacy_setup_interface(priv
, ctx
);
1401 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
1403 mutex_unlock(&priv
->mutex
);
1405 IWL_DEBUG_MAC80211(priv
, "leave\n");
1408 EXPORT_SYMBOL(iwl_legacy_mac_add_interface
);
1410 static void iwl_legacy_teardown_interface(struct iwl_priv
*priv
,
1411 struct ieee80211_vif
*vif
,
1414 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
1416 lockdep_assert_held(&priv
->mutex
);
1418 if (priv
->scan_vif
== vif
) {
1419 iwl_legacy_scan_cancel_timeout(priv
, 200);
1420 iwl_legacy_force_scan_end(priv
);
1424 iwl_legacy_set_mode(priv
, ctx
);
1425 if (!ctx
->always_active
)
1426 ctx
->is_active
= false;
1430 void iwl_legacy_mac_remove_interface(struct ieee80211_hw
*hw
,
1431 struct ieee80211_vif
*vif
)
1433 struct iwl_priv
*priv
= hw
->priv
;
1434 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
1436 IWL_DEBUG_MAC80211(priv
, "enter\n");
1438 mutex_lock(&priv
->mutex
);
1440 WARN_ON(ctx
->vif
!= vif
);
1443 iwl_legacy_teardown_interface(priv
, vif
, false);
1445 memset(priv
->bssid
, 0, ETH_ALEN
);
1446 mutex_unlock(&priv
->mutex
);
1448 IWL_DEBUG_MAC80211(priv
, "leave\n");
1451 EXPORT_SYMBOL(iwl_legacy_mac_remove_interface
);
1453 int iwl_legacy_alloc_txq_mem(struct iwl_priv
*priv
)
1456 priv
->txq
= kzalloc(
1457 sizeof(struct iwl_tx_queue
) *
1458 priv
->cfg
->base_params
->num_of_queues
,
1461 IWL_ERR(priv
, "Not enough memory for txq\n");
1466 EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem
);
1468 void iwl_legacy_txq_mem(struct iwl_priv
*priv
)
1473 EXPORT_SYMBOL(iwl_legacy_txq_mem
);
1475 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1477 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1479 void iwl_legacy_reset_traffic_log(struct iwl_priv
*priv
)
1481 priv
->tx_traffic_idx
= 0;
1482 priv
->rx_traffic_idx
= 0;
1483 if (priv
->tx_traffic
)
1484 memset(priv
->tx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
1485 if (priv
->rx_traffic
)
1486 memset(priv
->rx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
1489 int iwl_legacy_alloc_traffic_mem(struct iwl_priv
*priv
)
1491 u32 traffic_size
= IWL_TRAFFIC_DUMP_SIZE
;
1493 if (iwlegacy_debug_level
& IWL_DL_TX
) {
1494 if (!priv
->tx_traffic
) {
1496 kzalloc(traffic_size
, GFP_KERNEL
);
1497 if (!priv
->tx_traffic
)
1501 if (iwlegacy_debug_level
& IWL_DL_RX
) {
1502 if (!priv
->rx_traffic
) {
1504 kzalloc(traffic_size
, GFP_KERNEL
);
1505 if (!priv
->rx_traffic
)
1509 iwl_legacy_reset_traffic_log(priv
);
1512 EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem
);
1514 void iwl_legacy_free_traffic_mem(struct iwl_priv
*priv
)
1516 kfree(priv
->tx_traffic
);
1517 priv
->tx_traffic
= NULL
;
1519 kfree(priv
->rx_traffic
);
1520 priv
->rx_traffic
= NULL
;
1522 EXPORT_SYMBOL(iwl_legacy_free_traffic_mem
);
1524 void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv
*priv
,
1525 u16 length
, struct ieee80211_hdr
*header
)
1530 if (likely(!(iwlegacy_debug_level
& IWL_DL_TX
)))
1533 if (!priv
->tx_traffic
)
1536 fc
= header
->frame_control
;
1537 if (ieee80211_is_data(fc
)) {
1538 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
1539 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
1540 memcpy((priv
->tx_traffic
+
1541 (priv
->tx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
1543 priv
->tx_traffic_idx
=
1544 (priv
->tx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
1547 EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame
);
1549 void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv
*priv
,
1550 u16 length
, struct ieee80211_hdr
*header
)
1555 if (likely(!(iwlegacy_debug_level
& IWL_DL_RX
)))
1558 if (!priv
->rx_traffic
)
1561 fc
= header
->frame_control
;
1562 if (ieee80211_is_data(fc
)) {
1563 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
1564 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
1565 memcpy((priv
->rx_traffic
+
1566 (priv
->rx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
1568 priv
->rx_traffic_idx
=
1569 (priv
->rx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
1572 EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame
);
1574 const char *iwl_legacy_get_mgmt_string(int cmd
)
1577 IWL_CMD(MANAGEMENT_ASSOC_REQ
);
1578 IWL_CMD(MANAGEMENT_ASSOC_RESP
);
1579 IWL_CMD(MANAGEMENT_REASSOC_REQ
);
1580 IWL_CMD(MANAGEMENT_REASSOC_RESP
);
1581 IWL_CMD(MANAGEMENT_PROBE_REQ
);
1582 IWL_CMD(MANAGEMENT_PROBE_RESP
);
1583 IWL_CMD(MANAGEMENT_BEACON
);
1584 IWL_CMD(MANAGEMENT_ATIM
);
1585 IWL_CMD(MANAGEMENT_DISASSOC
);
1586 IWL_CMD(MANAGEMENT_AUTH
);
1587 IWL_CMD(MANAGEMENT_DEAUTH
);
1588 IWL_CMD(MANAGEMENT_ACTION
);
1595 const char *iwl_legacy_get_ctrl_string(int cmd
)
1598 IWL_CMD(CONTROL_BACK_REQ
);
1599 IWL_CMD(CONTROL_BACK
);
1600 IWL_CMD(CONTROL_PSPOLL
);
1601 IWL_CMD(CONTROL_RTS
);
1602 IWL_CMD(CONTROL_CTS
);
1603 IWL_CMD(CONTROL_ACK
);
1604 IWL_CMD(CONTROL_CFEND
);
1605 IWL_CMD(CONTROL_CFENDACK
);
1612 void iwl_legacy_clear_traffic_stats(struct iwl_priv
*priv
)
1614 memset(&priv
->tx_stats
, 0, sizeof(struct traffic_stats
));
1615 memset(&priv
->rx_stats
, 0, sizeof(struct traffic_stats
));
1619 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1620 * iwl_legacy_update_stats function will
1621 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1622 * Use debugFs to display the rx/rx_statistics
1623 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1624 * information will be recorded, but DATA pkt still will be recorded
1625 * for the reason of iwl_led.c need to control the led blinking based on
1626 * number of tx and rx data.
1630 iwl_legacy_update_stats(struct iwl_priv
*priv
, bool is_tx
, __le16 fc
, u16 len
)
1632 struct traffic_stats
*stats
;
1635 stats
= &priv
->tx_stats
;
1637 stats
= &priv
->rx_stats
;
1639 if (ieee80211_is_mgmt(fc
)) {
1640 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
1641 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
1642 stats
->mgmt
[MANAGEMENT_ASSOC_REQ
]++;
1644 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP
):
1645 stats
->mgmt
[MANAGEMENT_ASSOC_RESP
]++;
1647 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
1648 stats
->mgmt
[MANAGEMENT_REASSOC_REQ
]++;
1650 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP
):
1651 stats
->mgmt
[MANAGEMENT_REASSOC_RESP
]++;
1653 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ
):
1654 stats
->mgmt
[MANAGEMENT_PROBE_REQ
]++;
1656 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP
):
1657 stats
->mgmt
[MANAGEMENT_PROBE_RESP
]++;
1659 case cpu_to_le16(IEEE80211_STYPE_BEACON
):
1660 stats
->mgmt
[MANAGEMENT_BEACON
]++;
1662 case cpu_to_le16(IEEE80211_STYPE_ATIM
):
1663 stats
->mgmt
[MANAGEMENT_ATIM
]++;
1665 case cpu_to_le16(IEEE80211_STYPE_DISASSOC
):
1666 stats
->mgmt
[MANAGEMENT_DISASSOC
]++;
1668 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
1669 stats
->mgmt
[MANAGEMENT_AUTH
]++;
1671 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
1672 stats
->mgmt
[MANAGEMENT_DEAUTH
]++;
1674 case cpu_to_le16(IEEE80211_STYPE_ACTION
):
1675 stats
->mgmt
[MANAGEMENT_ACTION
]++;
1678 } else if (ieee80211_is_ctl(fc
)) {
1679 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
1680 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ
):
1681 stats
->ctrl
[CONTROL_BACK_REQ
]++;
1683 case cpu_to_le16(IEEE80211_STYPE_BACK
):
1684 stats
->ctrl
[CONTROL_BACK
]++;
1686 case cpu_to_le16(IEEE80211_STYPE_PSPOLL
):
1687 stats
->ctrl
[CONTROL_PSPOLL
]++;
1689 case cpu_to_le16(IEEE80211_STYPE_RTS
):
1690 stats
->ctrl
[CONTROL_RTS
]++;
1692 case cpu_to_le16(IEEE80211_STYPE_CTS
):
1693 stats
->ctrl
[CONTROL_CTS
]++;
1695 case cpu_to_le16(IEEE80211_STYPE_ACK
):
1696 stats
->ctrl
[CONTROL_ACK
]++;
1698 case cpu_to_le16(IEEE80211_STYPE_CFEND
):
1699 stats
->ctrl
[CONTROL_CFEND
]++;
1701 case cpu_to_le16(IEEE80211_STYPE_CFENDACK
):
1702 stats
->ctrl
[CONTROL_CFENDACK
]++;
1708 stats
->data_bytes
+= len
;
1711 EXPORT_SYMBOL(iwl_legacy_update_stats
);
1714 static void _iwl_legacy_force_rf_reset(struct iwl_priv
*priv
)
1716 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1719 if (!iwl_legacy_is_any_associated(priv
)) {
1720 IWL_DEBUG_SCAN(priv
, "force reset rejected: not associated\n");
1724 * There is no easy and better way to force reset the radio,
1725 * the only known method is switching channel which will force to
1726 * reset and tune the radio.
1727 * Use internal short scan (single channel) operation to should
1728 * achieve this objective.
1729 * Driver should reset the radio when number of consecutive missed
1730 * beacon, or any other uCode error condition detected.
1732 IWL_DEBUG_INFO(priv
, "perform radio reset.\n");
1733 iwl_legacy_internal_short_hw_scan(priv
);
1737 int iwl_legacy_force_reset(struct iwl_priv
*priv
, int mode
, bool external
)
1739 struct iwl_force_reset
*force_reset
;
1741 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1744 if (mode
>= IWL_MAX_FORCE_RESET
) {
1745 IWL_DEBUG_INFO(priv
, "invalid reset request.\n");
1748 force_reset
= &priv
->force_reset
[mode
];
1749 force_reset
->reset_request_count
++;
1751 if (force_reset
->last_force_reset_jiffies
&&
1752 time_after(force_reset
->last_force_reset_jiffies
+
1753 force_reset
->reset_duration
, jiffies
)) {
1754 IWL_DEBUG_INFO(priv
, "force reset rejected\n");
1755 force_reset
->reset_reject_count
++;
1759 force_reset
->reset_success_count
++;
1760 force_reset
->last_force_reset_jiffies
= jiffies
;
1761 IWL_DEBUG_INFO(priv
, "perform force reset (%d)\n", mode
);
1764 _iwl_legacy_force_rf_reset(priv
);
1768 * if the request is from external(ex: debugfs),
1769 * then always perform the request in regardless the module
1771 * if the request is from internal (uCode error or driver
1772 * detect failure), then fw_restart module parameter
1773 * need to be check before performing firmware reload
1775 if (!external
&& !priv
->cfg
->mod_params
->restart_fw
) {
1776 IWL_DEBUG_INFO(priv
, "Cancel firmware reload based on "
1777 "module parameter setting\n");
1780 IWL_ERR(priv
, "On demand firmware reload\n");
1781 /* Set the FW error flag -- cleared on iwl_down */
1782 set_bit(STATUS_FW_ERROR
, &priv
->status
);
1783 wake_up_interruptible(&priv
->wait_command_queue
);
1785 * Keep the restart process from trying to send host
1786 * commands by clearing the INIT status bit
1788 clear_bit(STATUS_READY
, &priv
->status
);
1789 queue_work(priv
->workqueue
, &priv
->restart
);
1796 iwl_legacy_mac_change_interface(struct ieee80211_hw
*hw
,
1797 struct ieee80211_vif
*vif
,
1798 enum nl80211_iftype newtype
, bool newp2p
)
1800 struct iwl_priv
*priv
= hw
->priv
;
1801 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
1802 struct iwl_rxon_context
*tmp
;
1803 u32 interface_modes
;
1806 newtype
= ieee80211_iftype_p2p(newtype
, newp2p
);
1808 mutex_lock(&priv
->mutex
);
1810 if (!ctx
->vif
|| !iwl_legacy_is_ready_rf(priv
)) {
1812 * Huh? But wait ... this can maybe happen when
1813 * we're in the middle of a firmware restart!
1819 interface_modes
= ctx
->interface_modes
| ctx
->exclusive_interface_modes
;
1821 if (!(interface_modes
& BIT(newtype
))) {
1826 if (ctx
->exclusive_interface_modes
& BIT(newtype
)) {
1827 for_each_context(priv
, tmp
) {
1835 * The current mode switch would be exclusive, but
1836 * another context is active ... refuse the switch.
1844 iwl_legacy_teardown_interface(priv
, vif
, true);
1845 vif
->type
= newtype
;
1847 err
= iwl_legacy_setup_interface(priv
, ctx
);
1850 * We've switched internally, but submitting to the
1851 * device may have failed for some reason. Mask this
1852 * error, because otherwise mac80211 will not switch
1853 * (and set the interface type back) and we'll be
1854 * out of sync with it.
1859 mutex_unlock(&priv
->mutex
);
1862 EXPORT_SYMBOL(iwl_legacy_mac_change_interface
);
1865 * On every watchdog tick we check (latest) time stamp. If it does not
1866 * change during timeout period and queue is not empty we reset firmware.
1868 static int iwl_legacy_check_stuck_queue(struct iwl_priv
*priv
, int cnt
)
1870 struct iwl_tx_queue
*txq
= &priv
->txq
[cnt
];
1871 struct iwl_queue
*q
= &txq
->q
;
1872 unsigned long timeout
;
1875 if (q
->read_ptr
== q
->write_ptr
) {
1876 txq
->time_stamp
= jiffies
;
1880 timeout
= txq
->time_stamp
+
1881 msecs_to_jiffies(priv
->cfg
->base_params
->wd_timeout
);
1883 if (time_after(jiffies
, timeout
)) {
1884 IWL_ERR(priv
, "Queue %d stuck for %u ms.\n",
1885 q
->id
, priv
->cfg
->base_params
->wd_timeout
);
1886 ret
= iwl_legacy_force_reset(priv
, IWL_FW_RESET
, false);
1887 return (ret
== -EAGAIN
) ? 0 : 1;
1894 * Making watchdog tick be a quarter of timeout assure we will
1895 * discover the queue hung between timeout and 1.25*timeout
1897 #define IWL_WD_TICK(timeout) ((timeout) / 4)
1900 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1901 * we reset the firmware. If everything is fine just rearm the timer.
1903 void iwl_legacy_bg_watchdog(unsigned long data
)
1905 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
1907 unsigned long timeout
;
1909 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1912 timeout
= priv
->cfg
->base_params
->wd_timeout
;
1916 /* monitor and check for stuck cmd queue */
1917 if (iwl_legacy_check_stuck_queue(priv
, priv
->cmd_queue
))
1920 /* monitor and check for other stuck queues */
1921 if (iwl_legacy_is_any_associated(priv
)) {
1922 for (cnt
= 0; cnt
< priv
->hw_params
.max_txq_num
; cnt
++) {
1923 /* skip as we already checked the command queue */
1924 if (cnt
== priv
->cmd_queue
)
1926 if (iwl_legacy_check_stuck_queue(priv
, cnt
))
1931 mod_timer(&priv
->watchdog
, jiffies
+
1932 msecs_to_jiffies(IWL_WD_TICK(timeout
)));
1934 EXPORT_SYMBOL(iwl_legacy_bg_watchdog
);
1936 void iwl_legacy_setup_watchdog(struct iwl_priv
*priv
)
1938 unsigned int timeout
= priv
->cfg
->base_params
->wd_timeout
;
1941 mod_timer(&priv
->watchdog
,
1942 jiffies
+ msecs_to_jiffies(IWL_WD_TICK(timeout
)));
1944 del_timer(&priv
->watchdog
);
1946 EXPORT_SYMBOL(iwl_legacy_setup_watchdog
);
1949 * extended beacon time format
1950 * time in usec will be changed into a 32-bit value in extended:internal format
1951 * the extended part is the beacon counts
1952 * the internal part is the time in usec within one beacon interval
1955 iwl_legacy_usecs_to_beacons(struct iwl_priv
*priv
,
1956 u32 usec
, u32 beacon_interval
)
1960 u32 interval
= beacon_interval
* TIME_UNIT
;
1962 if (!interval
|| !usec
)
1965 quot
= (usec
/ interval
) &
1966 (iwl_legacy_beacon_time_mask_high(priv
,
1967 priv
->hw_params
.beacon_time_tsf_bits
) >>
1968 priv
->hw_params
.beacon_time_tsf_bits
);
1969 rem
= (usec
% interval
) & iwl_legacy_beacon_time_mask_low(priv
,
1970 priv
->hw_params
.beacon_time_tsf_bits
);
1972 return (quot
<< priv
->hw_params
.beacon_time_tsf_bits
) + rem
;
1974 EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons
);
1976 /* base is usually what we get from ucode with each received frame,
1977 * the same as HW timer counter counting down
1979 __le32
iwl_legacy_add_beacon_time(struct iwl_priv
*priv
, u32 base
,
1980 u32 addon
, u32 beacon_interval
)
1982 u32 base_low
= base
& iwl_legacy_beacon_time_mask_low(priv
,
1983 priv
->hw_params
.beacon_time_tsf_bits
);
1984 u32 addon_low
= addon
& iwl_legacy_beacon_time_mask_low(priv
,
1985 priv
->hw_params
.beacon_time_tsf_bits
);
1986 u32 interval
= beacon_interval
* TIME_UNIT
;
1987 u32 res
= (base
& iwl_legacy_beacon_time_mask_high(priv
,
1988 priv
->hw_params
.beacon_time_tsf_bits
)) +
1989 (addon
& iwl_legacy_beacon_time_mask_high(priv
,
1990 priv
->hw_params
.beacon_time_tsf_bits
));
1992 if (base_low
> addon_low
)
1993 res
+= base_low
- addon_low
;
1994 else if (base_low
< addon_low
) {
1995 res
+= interval
+ base_low
- addon_low
;
1996 res
+= (1 << priv
->hw_params
.beacon_time_tsf_bits
);
1998 res
+= (1 << priv
->hw_params
.beacon_time_tsf_bits
);
2000 return cpu_to_le32(res
);
2002 EXPORT_SYMBOL(iwl_legacy_add_beacon_time
);
2006 int iwl_legacy_pci_suspend(struct device
*device
)
2008 struct pci_dev
*pdev
= to_pci_dev(device
);
2009 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
2012 * This function is called when system goes into suspend state
2013 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2014 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2015 * it will not call apm_ops.stop() to stop the DMA operation.
2016 * Calling apm_ops.stop here to make sure we stop the DMA.
2018 iwl_legacy_apm_stop(priv
);
2022 EXPORT_SYMBOL(iwl_legacy_pci_suspend
);
2024 int iwl_legacy_pci_resume(struct device
*device
)
2026 struct pci_dev
*pdev
= to_pci_dev(device
);
2027 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
2028 bool hw_rfkill
= false;
2031 * We disable the RETRY_TIMEOUT register (0x41) to keep
2032 * PCI Tx retries from interfering with C3 CPU state.
2034 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2036 iwl_legacy_enable_interrupts(priv
);
2038 if (!(iwl_read32(priv
, CSR_GP_CNTRL
) &
2039 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
))
2043 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2045 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2047 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
, hw_rfkill
);
2051 EXPORT_SYMBOL(iwl_legacy_pci_resume
);
2053 const struct dev_pm_ops iwl_legacy_pm_ops
= {
2054 .suspend
= iwl_legacy_pci_suspend
,
2055 .resume
= iwl_legacy_pci_resume
,
2056 .freeze
= iwl_legacy_pci_suspend
,
2057 .thaw
= iwl_legacy_pci_resume
,
2058 .poweroff
= iwl_legacy_pci_suspend
,
2059 .restore
= iwl_legacy_pci_resume
,
2061 EXPORT_SYMBOL(iwl_legacy_pm_ops
);
2063 #endif /* CONFIG_PM */
2066 iwl_legacy_update_qos(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
2068 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2071 if (!ctx
->is_active
)
2074 ctx
->qos_data
.def_qos_parm
.qos_flags
= 0;
2076 if (ctx
->qos_data
.qos_active
)
2077 ctx
->qos_data
.def_qos_parm
.qos_flags
|=
2078 QOS_PARAM_FLG_UPDATE_EDCA_MSK
;
2080 if (ctx
->ht
.enabled
)
2081 ctx
->qos_data
.def_qos_parm
.qos_flags
|= QOS_PARAM_FLG_TGN_MSK
;
2083 IWL_DEBUG_QOS(priv
, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2084 ctx
->qos_data
.qos_active
,
2085 ctx
->qos_data
.def_qos_parm
.qos_flags
);
2087 iwl_legacy_send_cmd_pdu_async(priv
, ctx
->qos_cmd
,
2088 sizeof(struct iwl_qosparam_cmd
),
2089 &ctx
->qos_data
.def_qos_parm
, NULL
);
2093 * iwl_legacy_mac_config - mac80211 config callback
2095 int iwl_legacy_mac_config(struct ieee80211_hw
*hw
, u32 changed
)
2097 struct iwl_priv
*priv
= hw
->priv
;
2098 const struct iwl_channel_info
*ch_info
;
2099 struct ieee80211_conf
*conf
= &hw
->conf
;
2100 struct ieee80211_channel
*channel
= conf
->channel
;
2101 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2102 struct iwl_rxon_context
*ctx
;
2103 unsigned long flags
= 0;
2106 int scan_active
= 0;
2107 bool ht_changed
[NUM_IWL_RXON_CTX
] = {};
2109 if (WARN_ON(!priv
->cfg
->ops
->legacy
))
2112 mutex_lock(&priv
->mutex
);
2114 IWL_DEBUG_MAC80211(priv
, "enter to channel %d changed 0x%X\n",
2115 channel
->hw_value
, changed
);
2117 if (unlikely(test_bit(STATUS_SCANNING
, &priv
->status
))) {
2119 IWL_DEBUG_MAC80211(priv
, "scan active\n");
2122 if (changed
& (IEEE80211_CONF_CHANGE_SMPS
|
2123 IEEE80211_CONF_CHANGE_CHANNEL
)) {
2124 /* mac80211 uses static for non-HT which is what we want */
2125 priv
->current_ht_config
.smps
= conf
->smps_mode
;
2128 * Recalculate chain counts.
2130 * If monitor mode is enabled then mac80211 will
2131 * set up the SM PS mode to OFF if an HT channel is
2134 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2135 for_each_context(priv
, ctx
)
2136 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
2139 /* during scanning mac80211 will delay channel setting until
2140 * scan finish with changed = 0
2142 if (!changed
|| (changed
& IEEE80211_CONF_CHANGE_CHANNEL
)) {
2146 ch
= channel
->hw_value
;
2147 ch_info
= iwl_legacy_get_channel_info(priv
, channel
->band
, ch
);
2148 if (!iwl_legacy_is_channel_valid(ch_info
)) {
2149 IWL_DEBUG_MAC80211(priv
, "leave - invalid channel\n");
2154 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
&&
2155 !iwl_legacy_is_channel_ibss(ch_info
)) {
2156 IWL_DEBUG_MAC80211(priv
, "leave - not IBSS channel\n");
2161 spin_lock_irqsave(&priv
->lock
, flags
);
2163 for_each_context(priv
, ctx
) {
2164 /* Configure HT40 channels */
2165 if (ctx
->ht
.enabled
!= conf_is_ht(conf
)) {
2166 ctx
->ht
.enabled
= conf_is_ht(conf
);
2167 ht_changed
[ctx
->ctxid
] = true;
2169 if (ctx
->ht
.enabled
) {
2170 if (conf_is_ht40_minus(conf
)) {
2171 ctx
->ht
.extension_chan_offset
=
2172 IEEE80211_HT_PARAM_CHA_SEC_BELOW
;
2173 ctx
->ht
.is_40mhz
= true;
2174 } else if (conf_is_ht40_plus(conf
)) {
2175 ctx
->ht
.extension_chan_offset
=
2176 IEEE80211_HT_PARAM_CHA_SEC_ABOVE
;
2177 ctx
->ht
.is_40mhz
= true;
2179 ctx
->ht
.extension_chan_offset
=
2180 IEEE80211_HT_PARAM_CHA_SEC_NONE
;
2181 ctx
->ht
.is_40mhz
= false;
2184 ctx
->ht
.is_40mhz
= false;
2187 * Default to no protection. Protection mode will
2188 * later be set from BSS config in iwl_ht_conf
2190 ctx
->ht
.protection
=
2191 IEEE80211_HT_OP_MODE_PROTECTION_NONE
;
2193 /* if we are switching from ht to 2.4 clear flags
2194 * from any ht related info since 2.4 does not
2196 if ((le16_to_cpu(ctx
->staging
.channel
) != ch
))
2197 ctx
->staging
.flags
= 0;
2199 iwl_legacy_set_rxon_channel(priv
, channel
, ctx
);
2200 iwl_legacy_set_rxon_ht(priv
, ht_conf
);
2202 iwl_legacy_set_flags_for_band(priv
, ctx
, channel
->band
,
2206 spin_unlock_irqrestore(&priv
->lock
, flags
);
2208 if (priv
->cfg
->ops
->legacy
->update_bcast_stations
)
2210 priv
->cfg
->ops
->legacy
->update_bcast_stations(priv
);
2213 /* The list of supported rates and rate mask can be different
2214 * for each band; since the band may have changed, reset
2215 * the rate mask to what mac80211 lists */
2216 iwl_legacy_set_rate(priv
);
2219 if (changed
& (IEEE80211_CONF_CHANGE_PS
|
2220 IEEE80211_CONF_CHANGE_IDLE
)) {
2221 ret
= iwl_legacy_power_update_mode(priv
, false);
2223 IWL_DEBUG_MAC80211(priv
, "Error setting sleep level\n");
2226 if (changed
& IEEE80211_CONF_CHANGE_POWER
) {
2227 IWL_DEBUG_MAC80211(priv
, "TX Power old=%d new=%d\n",
2228 priv
->tx_power_user_lmt
, conf
->power_level
);
2230 iwl_legacy_set_tx_power(priv
, conf
->power_level
, false);
2233 if (!iwl_legacy_is_ready(priv
)) {
2234 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2241 for_each_context(priv
, ctx
) {
2242 if (memcmp(&ctx
->active
, &ctx
->staging
, sizeof(ctx
->staging
)))
2243 iwl_legacy_commit_rxon(priv
, ctx
);
2245 IWL_DEBUG_INFO(priv
,
2246 "Not re-sending same RXON configuration.\n");
2247 if (ht_changed
[ctx
->ctxid
])
2248 iwl_legacy_update_qos(priv
, ctx
);
2252 IWL_DEBUG_MAC80211(priv
, "leave\n");
2253 mutex_unlock(&priv
->mutex
);
2256 EXPORT_SYMBOL(iwl_legacy_mac_config
);
2258 void iwl_legacy_mac_reset_tsf(struct ieee80211_hw
*hw
)
2260 struct iwl_priv
*priv
= hw
->priv
;
2261 unsigned long flags
;
2262 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2263 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
2265 if (WARN_ON(!priv
->cfg
->ops
->legacy
))
2268 mutex_lock(&priv
->mutex
);
2269 IWL_DEBUG_MAC80211(priv
, "enter\n");
2271 spin_lock_irqsave(&priv
->lock
, flags
);
2272 memset(&priv
->current_ht_config
, 0, sizeof(struct iwl_ht_config
));
2273 spin_unlock_irqrestore(&priv
->lock
, flags
);
2275 spin_lock_irqsave(&priv
->lock
, flags
);
2277 /* new association get rid of ibss beacon skb */
2278 if (priv
->beacon_skb
)
2279 dev_kfree_skb(priv
->beacon_skb
);
2281 priv
->beacon_skb
= NULL
;
2283 priv
->timestamp
= 0;
2285 spin_unlock_irqrestore(&priv
->lock
, flags
);
2287 iwl_legacy_scan_cancel_timeout(priv
, 100);
2288 if (!iwl_legacy_is_ready_rf(priv
)) {
2289 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2290 mutex_unlock(&priv
->mutex
);
2294 /* we are restarting association process
2295 * clear RXON_FILTER_ASSOC_MSK bit
2297 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2298 iwl_legacy_commit_rxon(priv
, ctx
);
2300 iwl_legacy_set_rate(priv
);
2302 mutex_unlock(&priv
->mutex
);
2304 IWL_DEBUG_MAC80211(priv
, "leave\n");
2306 EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf
);
2308 static void iwl_legacy_ht_conf(struct iwl_priv
*priv
,
2309 struct ieee80211_vif
*vif
)
2311 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2312 struct ieee80211_sta
*sta
;
2313 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
2314 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
2316 IWL_DEBUG_ASSOC(priv
, "enter:\n");
2318 if (!ctx
->ht
.enabled
)
2321 ctx
->ht
.protection
=
2322 bss_conf
->ht_operation_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
2323 ctx
->ht
.non_gf_sta_present
=
2324 !!(bss_conf
->ht_operation_mode
&
2325 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
2327 ht_conf
->single_chain_sufficient
= false;
2329 switch (vif
->type
) {
2330 case NL80211_IFTYPE_STATION
:
2332 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
2334 struct ieee80211_sta_ht_cap
*ht_cap
= &sta
->ht_cap
;
2337 maxstreams
= (ht_cap
->mcs
.tx_params
&
2338 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK
)
2339 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
;
2342 if ((ht_cap
->mcs
.rx_mask
[1] == 0) &&
2343 (ht_cap
->mcs
.rx_mask
[2] == 0))
2344 ht_conf
->single_chain_sufficient
= true;
2345 if (maxstreams
<= 1)
2346 ht_conf
->single_chain_sufficient
= true;
2349 * If at all, this can only happen through a race
2350 * when the AP disconnects us while we're still
2351 * setting up the connection, in that case mac80211
2352 * will soon tell us about that.
2354 ht_conf
->single_chain_sufficient
= true;
2358 case NL80211_IFTYPE_ADHOC
:
2359 ht_conf
->single_chain_sufficient
= true;
2365 IWL_DEBUG_ASSOC(priv
, "leave\n");
2368 static inline void iwl_legacy_set_no_assoc(struct iwl_priv
*priv
,
2369 struct ieee80211_vif
*vif
)
2371 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
2374 * inform the ucode that there is no longer an
2375 * association and that no more packets should be
2378 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2379 ctx
->staging
.assoc_id
= 0;
2380 iwl_legacy_commit_rxon(priv
, ctx
);
2383 static void iwl_legacy_beacon_update(struct ieee80211_hw
*hw
,
2384 struct ieee80211_vif
*vif
)
2386 struct iwl_priv
*priv
= hw
->priv
;
2387 unsigned long flags
;
2389 struct sk_buff
*skb
= ieee80211_beacon_get(hw
, vif
);
2394 IWL_DEBUG_MAC80211(priv
, "enter\n");
2396 lockdep_assert_held(&priv
->mutex
);
2398 if (!priv
->beacon_ctx
) {
2399 IWL_ERR(priv
, "update beacon but no beacon context!\n");
2404 spin_lock_irqsave(&priv
->lock
, flags
);
2406 if (priv
->beacon_skb
)
2407 dev_kfree_skb(priv
->beacon_skb
);
2409 priv
->beacon_skb
= skb
;
2411 timestamp
= ((struct ieee80211_mgmt
*)skb
->data
)->u
.beacon
.timestamp
;
2412 priv
->timestamp
= le64_to_cpu(timestamp
);
2414 IWL_DEBUG_MAC80211(priv
, "leave\n");
2415 spin_unlock_irqrestore(&priv
->lock
, flags
);
2417 if (!iwl_legacy_is_ready_rf(priv
)) {
2418 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
2422 priv
->cfg
->ops
->legacy
->post_associate(priv
);
2425 void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw
*hw
,
2426 struct ieee80211_vif
*vif
,
2427 struct ieee80211_bss_conf
*bss_conf
,
2430 struct iwl_priv
*priv
= hw
->priv
;
2431 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
2434 if (WARN_ON(!priv
->cfg
->ops
->legacy
))
2437 IWL_DEBUG_MAC80211(priv
, "changes = 0x%X\n", changes
);
2439 mutex_lock(&priv
->mutex
);
2441 if (!iwl_legacy_is_alive(priv
)) {
2442 mutex_unlock(&priv
->mutex
);
2446 if (changes
& BSS_CHANGED_QOS
) {
2447 unsigned long flags
;
2449 spin_lock_irqsave(&priv
->lock
, flags
);
2450 ctx
->qos_data
.qos_active
= bss_conf
->qos
;
2451 iwl_legacy_update_qos(priv
, ctx
);
2452 spin_unlock_irqrestore(&priv
->lock
, flags
);
2455 if (changes
& BSS_CHANGED_BEACON_ENABLED
) {
2457 * the add_interface code must make sure we only ever
2458 * have a single interface that could be beaconing at
2461 if (vif
->bss_conf
.enable_beacon
)
2462 priv
->beacon_ctx
= ctx
;
2464 priv
->beacon_ctx
= NULL
;
2467 if (changes
& BSS_CHANGED_BSSID
) {
2468 IWL_DEBUG_MAC80211(priv
, "BSSID %pM\n", bss_conf
->bssid
);
2471 * If there is currently a HW scan going on in the
2472 * background then we need to cancel it else the RXON
2473 * below/in post_associate will fail.
2475 if (iwl_legacy_scan_cancel_timeout(priv
, 100)) {
2477 "Aborted scan still in progress after 100ms\n");
2478 IWL_DEBUG_MAC80211(priv
,
2479 "leaving - scan abort failed.\n");
2480 mutex_unlock(&priv
->mutex
);
2484 /* mac80211 only sets assoc when in STATION mode */
2485 if (vif
->type
== NL80211_IFTYPE_ADHOC
|| bss_conf
->assoc
) {
2486 memcpy(ctx
->staging
.bssid_addr
,
2487 bss_conf
->bssid
, ETH_ALEN
);
2489 /* currently needed in a few places */
2490 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2492 ctx
->staging
.filter_flags
&=
2493 ~RXON_FILTER_ASSOC_MSK
;
2499 * This needs to be after setting the BSSID in case
2500 * mac80211 decides to do both changes at once because
2501 * it will invoke post_associate.
2503 if (vif
->type
== NL80211_IFTYPE_ADHOC
&& changes
& BSS_CHANGED_BEACON
)
2504 iwl_legacy_beacon_update(hw
, vif
);
2506 if (changes
& BSS_CHANGED_ERP_PREAMBLE
) {
2507 IWL_DEBUG_MAC80211(priv
, "ERP_PREAMBLE %d\n",
2508 bss_conf
->use_short_preamble
);
2509 if (bss_conf
->use_short_preamble
)
2510 ctx
->staging
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
2512 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
2515 if (changes
& BSS_CHANGED_ERP_CTS_PROT
) {
2516 IWL_DEBUG_MAC80211(priv
,
2517 "ERP_CTS %d\n", bss_conf
->use_cts_prot
);
2518 if (bss_conf
->use_cts_prot
&&
2519 (priv
->band
!= IEEE80211_BAND_5GHZ
))
2520 ctx
->staging
.flags
|= RXON_FLG_TGG_PROTECT_MSK
;
2522 ctx
->staging
.flags
&= ~RXON_FLG_TGG_PROTECT_MSK
;
2523 if (bss_conf
->use_cts_prot
)
2524 ctx
->staging
.flags
|= RXON_FLG_SELF_CTS_EN
;
2526 ctx
->staging
.flags
&= ~RXON_FLG_SELF_CTS_EN
;
2529 if (changes
& BSS_CHANGED_BASIC_RATES
) {
2530 /* XXX use this information
2532 * To do that, remove code from iwl_legacy_set_rate() and put something
2536 ctx->staging.ofdm_basic_rates =
2537 bss_conf->basic_rates;
2539 ctx->staging.ofdm_basic_rates =
2540 bss_conf->basic_rates >> 4;
2541 ctx->staging.cck_basic_rates =
2542 bss_conf->basic_rates & 0xF;
2546 if (changes
& BSS_CHANGED_HT
) {
2547 iwl_legacy_ht_conf(priv
, vif
);
2549 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2550 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
2553 if (changes
& BSS_CHANGED_ASSOC
) {
2554 IWL_DEBUG_MAC80211(priv
, "ASSOC %d\n", bss_conf
->assoc
);
2555 if (bss_conf
->assoc
) {
2556 priv
->timestamp
= bss_conf
->timestamp
;
2558 if (!iwl_legacy_is_rfkill(priv
))
2559 priv
->cfg
->ops
->legacy
->post_associate(priv
);
2561 iwl_legacy_set_no_assoc(priv
, vif
);
2564 if (changes
&& iwl_legacy_is_associated_ctx(ctx
) && bss_conf
->aid
) {
2565 IWL_DEBUG_MAC80211(priv
, "Changes (%#x) while associated\n",
2567 ret
= iwl_legacy_send_rxon_assoc(priv
, ctx
);
2569 /* Sync active_rxon with latest change. */
2570 memcpy((void *)&ctx
->active
,
2572 sizeof(struct iwl_legacy_rxon_cmd
));
2576 if (changes
& BSS_CHANGED_BEACON_ENABLED
) {
2577 if (vif
->bss_conf
.enable_beacon
) {
2578 memcpy(ctx
->staging
.bssid_addr
,
2579 bss_conf
->bssid
, ETH_ALEN
);
2580 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2581 priv
->cfg
->ops
->legacy
->config_ap(priv
);
2583 iwl_legacy_set_no_assoc(priv
, vif
);
2586 if (changes
& BSS_CHANGED_IBSS
) {
2587 ret
= priv
->cfg
->ops
->legacy
->manage_ibss_station(priv
, vif
,
2588 bss_conf
->ibss_joined
);
2590 IWL_ERR(priv
, "failed to %s IBSS station %pM\n",
2591 bss_conf
->ibss_joined
? "add" : "remove",
2595 mutex_unlock(&priv
->mutex
);
2597 IWL_DEBUG_MAC80211(priv
, "leave\n");
2599 EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed
);
2601 irqreturn_t
iwl_legacy_isr(int irq
, void *data
)
2603 struct iwl_priv
*priv
= data
;
2604 u32 inta
, inta_mask
;
2606 unsigned long flags
;
2610 spin_lock_irqsave(&priv
->lock
, flags
);
2612 /* Disable (but don't clear!) interrupts here to avoid
2613 * back-to-back ISRs and sporadic interrupts from our NIC.
2614 * If we have something to service, the tasklet will re-enable ints.
2615 * If we *don't* have something, we'll re-enable before leaving here. */
2616 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
); /* just for debug */
2617 iwl_write32(priv
, CSR_INT_MASK
, 0x00000000);
2619 /* Discover which interrupts are active/pending */
2620 inta
= iwl_read32(priv
, CSR_INT
);
2621 inta_fh
= iwl_read32(priv
, CSR_FH_INT_STATUS
);
2623 /* Ignore interrupt if there's nothing in NIC to service.
2624 * This may be due to IRQ shared with another device,
2625 * or due to sporadic interrupts thrown from our NIC. */
2626 if (!inta
&& !inta_fh
) {
2628 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2632 if ((inta
== 0xFFFFFFFF) || ((inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
2633 /* Hardware disappeared. It might have already raised
2635 IWL_WARN(priv
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
2639 IWL_DEBUG_ISR(priv
, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2640 inta
, inta_mask
, inta_fh
);
2642 inta
&= ~CSR_INT_BIT_SCD
;
2644 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2645 if (likely(inta
|| inta_fh
))
2646 tasklet_schedule(&priv
->irq_tasklet
);
2649 spin_unlock_irqrestore(&priv
->lock
, flags
);
2653 /* re-enable interrupts here since we don't have anything to service. */
2654 /* only Re-enable if disabled by irq */
2655 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
))
2656 iwl_legacy_enable_interrupts(priv
);
2657 spin_unlock_irqrestore(&priv
->lock
, flags
);
2660 EXPORT_SYMBOL(iwl_legacy_isr
);
2663 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2666 void iwl_legacy_tx_cmd_protection(struct iwl_priv
*priv
,
2667 struct ieee80211_tx_info
*info
,
2668 __le16 fc
, __le32
*tx_flags
)
2670 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
) {
2671 *tx_flags
|= TX_CMD_FLG_RTS_MSK
;
2672 *tx_flags
&= ~TX_CMD_FLG_CTS_MSK
;
2673 *tx_flags
|= TX_CMD_FLG_FULL_TXOP_PROT_MSK
;
2675 if (!ieee80211_is_mgmt(fc
))
2678 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
2679 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
2680 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
2681 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
2682 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
2683 *tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
2684 *tx_flags
|= TX_CMD_FLG_CTS_MSK
;
2687 } else if (info
->control
.rates
[0].flags
&
2688 IEEE80211_TX_RC_USE_CTS_PROTECT
) {
2689 *tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
2690 *tx_flags
|= TX_CMD_FLG_CTS_MSK
;
2691 *tx_flags
|= TX_CMD_FLG_FULL_TXOP_PROT_MSK
;
2694 EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection
);