1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
36 #include "iwl-eeprom.h"
38 #include "iwl-debug.h"
41 #include "iwl-power.h"
43 #include "iwl-helpers.h"
46 MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
47 MODULE_VERSION(IWLWIFI_VERSION
);
48 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
49 MODULE_LICENSE("GPL");
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
67 static bool bt_coex_active
= true;
68 module_param(bt_coex_active
, bool, S_IRUGO
);
69 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bluetooth co-exist");
71 u32 iwlegacy_debug_level
;
72 EXPORT_SYMBOL(iwlegacy_debug_level
);
74 const u8 iwlegacy_bcast_addr
[ETH_ALEN
] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
75 EXPORT_SYMBOL(iwlegacy_bcast_addr
);
78 /* This function both allocates and initializes hw and priv. */
79 struct ieee80211_hw
*iwl_legacy_alloc_all(struct iwl_cfg
*cfg
)
81 struct iwl_priv
*priv
;
82 /* mac80211 allocates memory for this device instance, including
83 * space for this driver's private structure */
84 struct ieee80211_hw
*hw
;
86 hw
= ieee80211_alloc_hw(sizeof(struct iwl_priv
),
87 cfg
->ops
->ieee80211_ops
);
89 pr_err("%s: Can not allocate network device\n",
100 EXPORT_SYMBOL(iwl_legacy_alloc_all
);
102 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
103 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
104 static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv
*priv
,
105 struct ieee80211_sta_ht_cap
*ht_info
,
106 enum ieee80211_band band
)
108 u16 max_bit_rate
= 0;
109 u8 rx_chains_num
= priv
->hw_params
.rx_chains_num
;
110 u8 tx_chains_num
= priv
->hw_params
.tx_chains_num
;
113 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
115 ht_info
->ht_supported
= true;
117 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
118 max_bit_rate
= MAX_BIT_RATE_20_MHZ
;
119 if (priv
->hw_params
.ht40_channel
& BIT(band
)) {
120 ht_info
->cap
|= IEEE80211_HT_CAP_SUP_WIDTH_20_40
;
121 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_40
;
122 ht_info
->mcs
.rx_mask
[4] = 0x01;
123 max_bit_rate
= MAX_BIT_RATE_40_MHZ
;
126 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
127 ht_info
->cap
|= IEEE80211_HT_CAP_MAX_AMSDU
;
129 ht_info
->ampdu_factor
= CFG_HT_RX_AMPDU_FACTOR_DEF
;
130 ht_info
->ampdu_density
= CFG_HT_MPDU_DENSITY_DEF
;
132 ht_info
->mcs
.rx_mask
[0] = 0xFF;
133 if (rx_chains_num
>= 2)
134 ht_info
->mcs
.rx_mask
[1] = 0xFF;
135 if (rx_chains_num
>= 3)
136 ht_info
->mcs
.rx_mask
[2] = 0xFF;
138 /* Highest supported Rx data rate */
139 max_bit_rate
*= rx_chains_num
;
140 WARN_ON(max_bit_rate
& ~IEEE80211_HT_MCS_RX_HIGHEST_MASK
);
141 ht_info
->mcs
.rx_highest
= cpu_to_le16(max_bit_rate
);
143 /* Tx MCS capabilities */
144 ht_info
->mcs
.tx_params
= IEEE80211_HT_MCS_TX_DEFINED
;
145 if (tx_chains_num
!= rx_chains_num
) {
146 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
147 ht_info
->mcs
.tx_params
|= ((tx_chains_num
- 1) <<
148 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
153 * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
155 int iwl_legacy_init_geos(struct iwl_priv
*priv
)
157 struct iwl_channel_info
*ch
;
158 struct ieee80211_supported_band
*sband
;
159 struct ieee80211_channel
*channels
;
160 struct ieee80211_channel
*geo_ch
;
161 struct ieee80211_rate
*rates
;
165 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_bitrates
||
166 priv
->bands
[IEEE80211_BAND_5GHZ
].n_bitrates
) {
167 IWL_DEBUG_INFO(priv
, "Geography modes already initialized.\n");
168 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
172 channels
= kzalloc(sizeof(struct ieee80211_channel
) *
173 priv
->channel_count
, GFP_KERNEL
);
177 rates
= kzalloc((sizeof(struct ieee80211_rate
) * IWL_RATE_COUNT_LEGACY
),
184 /* 5.2GHz channels start after the 2.4GHz channels */
185 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
186 sband
->channels
= &channels
[ARRAY_SIZE(iwlegacy_eeprom_band_1
)];
188 sband
->bitrates
= &rates
[IWL_FIRST_OFDM_RATE
];
189 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
- IWL_FIRST_OFDM_RATE
;
191 if (priv
->cfg
->sku
& IWL_SKU_N
)
192 iwl_legacy_init_ht_hw_capab(priv
, &sband
->ht_cap
,
193 IEEE80211_BAND_5GHZ
);
195 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
196 sband
->channels
= channels
;
198 sband
->bitrates
= rates
;
199 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
;
201 if (priv
->cfg
->sku
& IWL_SKU_N
)
202 iwl_legacy_init_ht_hw_capab(priv
, &sband
->ht_cap
,
203 IEEE80211_BAND_2GHZ
);
205 priv
->ieee_channels
= channels
;
206 priv
->ieee_rates
= rates
;
208 for (i
= 0; i
< priv
->channel_count
; i
++) {
209 ch
= &priv
->channel_info
[i
];
211 if (!iwl_legacy_is_channel_valid(ch
))
214 sband
= &priv
->bands
[ch
->band
];
216 geo_ch
= &sband
->channels
[sband
->n_channels
++];
218 geo_ch
->center_freq
=
219 ieee80211_channel_to_frequency(ch
->channel
, ch
->band
);
220 geo_ch
->max_power
= ch
->max_power_avg
;
221 geo_ch
->max_antenna_gain
= 0xff;
222 geo_ch
->hw_value
= ch
->channel
;
224 if (iwl_legacy_is_channel_valid(ch
)) {
225 if (!(ch
->flags
& EEPROM_CHANNEL_IBSS
))
226 geo_ch
->flags
|= IEEE80211_CHAN_NO_IBSS
;
228 if (!(ch
->flags
& EEPROM_CHANNEL_ACTIVE
))
229 geo_ch
->flags
|= IEEE80211_CHAN_PASSIVE_SCAN
;
231 if (ch
->flags
& EEPROM_CHANNEL_RADAR
)
232 geo_ch
->flags
|= IEEE80211_CHAN_RADAR
;
234 geo_ch
->flags
|= ch
->ht40_extension_channel
;
236 if (ch
->max_power_avg
> max_tx_power
)
237 max_tx_power
= ch
->max_power_avg
;
239 geo_ch
->flags
|= IEEE80211_CHAN_DISABLED
;
242 IWL_DEBUG_INFO(priv
, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
243 ch
->channel
, geo_ch
->center_freq
,
244 iwl_legacy_is_channel_a_band(ch
) ? "5.2" : "2.4",
245 geo_ch
->flags
& IEEE80211_CHAN_DISABLED
?
246 "restricted" : "valid",
250 priv
->tx_power_device_lmt
= max_tx_power
;
251 priv
->tx_power_user_lmt
= max_tx_power
;
252 priv
->tx_power_next
= max_tx_power
;
254 if ((priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
== 0) &&
255 priv
->cfg
->sku
& IWL_SKU_A
) {
256 IWL_INFO(priv
, "Incorrectly detected BG card as ABG. "
257 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
258 priv
->pci_dev
->device
,
259 priv
->pci_dev
->subsystem_device
);
260 priv
->cfg
->sku
&= ~IWL_SKU_A
;
263 IWL_INFO(priv
, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
264 priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
,
265 priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
);
267 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
271 EXPORT_SYMBOL(iwl_legacy_init_geos
);
274 * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
276 void iwl_legacy_free_geos(struct iwl_priv
*priv
)
278 kfree(priv
->ieee_channels
);
279 kfree(priv
->ieee_rates
);
280 clear_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
282 EXPORT_SYMBOL(iwl_legacy_free_geos
);
284 static bool iwl_legacy_is_channel_extension(struct iwl_priv
*priv
,
285 enum ieee80211_band band
,
286 u16 channel
, u8 extension_chan_offset
)
288 const struct iwl_channel_info
*ch_info
;
290 ch_info
= iwl_legacy_get_channel_info(priv
, band
, channel
);
291 if (!iwl_legacy_is_channel_valid(ch_info
))
294 if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_ABOVE
)
295 return !(ch_info
->ht40_extension_channel
&
296 IEEE80211_CHAN_NO_HT40PLUS
);
297 else if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_BELOW
)
298 return !(ch_info
->ht40_extension_channel
&
299 IEEE80211_CHAN_NO_HT40MINUS
);
304 bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv
*priv
,
305 struct iwl_rxon_context
*ctx
,
306 struct ieee80211_sta_ht_cap
*ht_cap
)
308 if (!ctx
->ht
.enabled
|| !ctx
->ht
.is_40mhz
)
312 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
313 * the bit will not set if it is pure 40MHz case
315 if (ht_cap
&& !ht_cap
->ht_supported
)
318 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
319 if (priv
->disable_ht40
)
323 return iwl_legacy_is_channel_extension(priv
, priv
->band
,
324 le16_to_cpu(ctx
->staging
.channel
),
325 ctx
->ht
.extension_chan_offset
);
327 EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed
);
329 static u16
iwl_legacy_adjust_beacon_interval(u16 beacon_val
, u16 max_beacon_val
)
335 * If mac80211 hasn't given us a beacon interval, program
336 * the default into the device.
339 return DEFAULT_BEACON_INTERVAL
;
342 * If the beacon interval we obtained from the peer
343 * is too large, we'll have to wake up more often
344 * (and in IBSS case, we'll beacon too much)
346 * For example, if max_beacon_val is 4096, and the
347 * requested beacon interval is 7000, we'll have to
348 * use 3500 to be able to wake up on the beacons.
350 * This could badly influence beacon detection stats.
353 beacon_factor
= (beacon_val
+ max_beacon_val
) / max_beacon_val
;
354 new_val
= beacon_val
/ beacon_factor
;
357 new_val
= max_beacon_val
;
363 iwl_legacy_send_rxon_timing(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
366 s32 interval_tm
, rem
;
367 struct ieee80211_conf
*conf
= NULL
;
369 struct ieee80211_vif
*vif
= ctx
->vif
;
371 conf
= iwl_legacy_ieee80211_get_hw_conf(priv
->hw
);
373 lockdep_assert_held(&priv
->mutex
);
375 memset(&ctx
->timing
, 0, sizeof(struct iwl_rxon_time_cmd
));
377 ctx
->timing
.timestamp
= cpu_to_le64(priv
->timestamp
);
378 ctx
->timing
.listen_interval
= cpu_to_le16(conf
->listen_interval
);
380 beacon_int
= vif
? vif
->bss_conf
.beacon_int
: 0;
383 * TODO: For IBSS we need to get atim_window from mac80211,
384 * for now just always use 0
386 ctx
->timing
.atim_window
= 0;
388 beacon_int
= iwl_legacy_adjust_beacon_interval(beacon_int
,
389 priv
->hw_params
.max_beacon_itrvl
* TIME_UNIT
);
390 ctx
->timing
.beacon_interval
= cpu_to_le16(beacon_int
);
392 tsf
= priv
->timestamp
; /* tsf is modifed by do_div: copy it */
393 interval_tm
= beacon_int
* TIME_UNIT
;
394 rem
= do_div(tsf
, interval_tm
);
395 ctx
->timing
.beacon_init_val
= cpu_to_le32(interval_tm
- rem
);
397 ctx
->timing
.dtim_period
= vif
? (vif
->bss_conf
.dtim_period
?: 1) : 1;
399 IWL_DEBUG_ASSOC(priv
,
400 "beacon interval %d beacon timer %d beacon tim %d\n",
401 le16_to_cpu(ctx
->timing
.beacon_interval
),
402 le32_to_cpu(ctx
->timing
.beacon_init_val
),
403 le16_to_cpu(ctx
->timing
.atim_window
));
405 return iwl_legacy_send_cmd_pdu(priv
, ctx
->rxon_timing_cmd
,
406 sizeof(ctx
->timing
), &ctx
->timing
);
408 EXPORT_SYMBOL(iwl_legacy_send_rxon_timing
);
411 iwl_legacy_set_rxon_hwcrypto(struct iwl_priv
*priv
,
412 struct iwl_rxon_context
*ctx
,
415 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
418 rxon
->filter_flags
&= ~RXON_FILTER_DIS_DECRYPT_MSK
;
420 rxon
->filter_flags
|= RXON_FILTER_DIS_DECRYPT_MSK
;
423 EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto
);
425 /* validate RXON structure is valid */
427 iwl_legacy_check_rxon_cmd(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
429 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
432 if (rxon
->flags
& RXON_FLG_BAND_24G_MSK
) {
433 if (rxon
->flags
& RXON_FLG_TGJ_NARROW_BAND_MSK
) {
434 IWL_WARN(priv
, "check 2.4G: wrong narrow\n");
437 if (rxon
->flags
& RXON_FLG_RADAR_DETECT_MSK
) {
438 IWL_WARN(priv
, "check 2.4G: wrong radar\n");
442 if (!(rxon
->flags
& RXON_FLG_SHORT_SLOT_MSK
)) {
443 IWL_WARN(priv
, "check 5.2G: not short slot!\n");
446 if (rxon
->flags
& RXON_FLG_CCK_MSK
) {
447 IWL_WARN(priv
, "check 5.2G: CCK!\n");
451 if ((rxon
->node_addr
[0] | rxon
->bssid_addr
[0]) & 0x1) {
452 IWL_WARN(priv
, "mac/bssid mcast!\n");
456 /* make sure basic rates 6Mbps and 1Mbps are supported */
457 if ((rxon
->ofdm_basic_rates
& IWL_RATE_6M_MASK
) == 0 &&
458 (rxon
->cck_basic_rates
& IWL_RATE_1M_MASK
) == 0) {
459 IWL_WARN(priv
, "neither 1 nor 6 are basic\n");
463 if (le16_to_cpu(rxon
->assoc_id
) > 2007) {
464 IWL_WARN(priv
, "aid > 2007\n");
468 if ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
))
469 == (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
)) {
470 IWL_WARN(priv
, "CCK and short slot\n");
474 if ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
))
475 == (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
)) {
476 IWL_WARN(priv
, "CCK and auto detect");
480 if ((rxon
->flags
& (RXON_FLG_AUTO_DETECT_MSK
|
481 RXON_FLG_TGG_PROTECT_MSK
)) ==
482 RXON_FLG_TGG_PROTECT_MSK
) {
483 IWL_WARN(priv
, "TGg but no auto-detect\n");
488 IWL_WARN(priv
, "Tuning to channel %d\n",
489 le16_to_cpu(rxon
->channel
));
492 IWL_ERR(priv
, "Invalid RXON\n");
497 EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd
);
500 * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
501 * @priv: staging_rxon is compared to active_rxon
503 * If the RXON structure is changing enough to require a new tune,
504 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
505 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
507 int iwl_legacy_full_rxon_required(struct iwl_priv
*priv
,
508 struct iwl_rxon_context
*ctx
)
510 const struct iwl_legacy_rxon_cmd
*staging
= &ctx
->staging
;
511 const struct iwl_legacy_rxon_cmd
*active
= &ctx
->active
;
515 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
519 #define CHK_NEQ(c1, c2) \
520 if ((c1) != (c2)) { \
521 IWL_DEBUG_INFO(priv, "need full RXON - " \
522 #c1 " != " #c2 " - %d != %d\n", \
527 /* These items are only settable from the full RXON command */
528 CHK(!iwl_legacy_is_associated_ctx(ctx
));
529 CHK(compare_ether_addr(staging
->bssid_addr
, active
->bssid_addr
));
530 CHK(compare_ether_addr(staging
->node_addr
, active
->node_addr
));
531 CHK(compare_ether_addr(staging
->wlap_bssid_addr
,
532 active
->wlap_bssid_addr
));
533 CHK_NEQ(staging
->dev_type
, active
->dev_type
);
534 CHK_NEQ(staging
->channel
, active
->channel
);
535 CHK_NEQ(staging
->air_propagation
, active
->air_propagation
);
536 CHK_NEQ(staging
->ofdm_ht_single_stream_basic_rates
,
537 active
->ofdm_ht_single_stream_basic_rates
);
538 CHK_NEQ(staging
->ofdm_ht_dual_stream_basic_rates
,
539 active
->ofdm_ht_dual_stream_basic_rates
);
540 CHK_NEQ(staging
->assoc_id
, active
->assoc_id
);
542 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
543 * be updated with the RXON_ASSOC command -- however only some
544 * flag transitions are allowed using RXON_ASSOC */
546 /* Check if we are not switching bands */
547 CHK_NEQ(staging
->flags
& RXON_FLG_BAND_24G_MSK
,
548 active
->flags
& RXON_FLG_BAND_24G_MSK
);
550 /* Check if we are switching association toggle */
551 CHK_NEQ(staging
->filter_flags
& RXON_FILTER_ASSOC_MSK
,
552 active
->filter_flags
& RXON_FILTER_ASSOC_MSK
);
559 EXPORT_SYMBOL(iwl_legacy_full_rxon_required
);
561 u8
iwl_legacy_get_lowest_plcp(struct iwl_priv
*priv
,
562 struct iwl_rxon_context
*ctx
)
565 * Assign the lowest rate -- should really get this from
566 * the beacon skb from mac80211.
568 if (ctx
->staging
.flags
& RXON_FLG_BAND_24G_MSK
)
569 return IWL_RATE_1M_PLCP
;
571 return IWL_RATE_6M_PLCP
;
573 EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp
);
575 static void _iwl_legacy_set_rxon_ht(struct iwl_priv
*priv
,
576 struct iwl_ht_config
*ht_conf
,
577 struct iwl_rxon_context
*ctx
)
579 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
581 if (!ctx
->ht
.enabled
) {
582 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
583 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
|
584 RXON_FLG_HT40_PROT_MSK
|
585 RXON_FLG_HT_PROT_MSK
);
589 rxon
->flags
|= cpu_to_le32(ctx
->ht
.protection
<<
590 RXON_FLG_HT_OPERATING_MODE_POS
);
592 /* Set up channel bandwidth:
593 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
594 /* clear the HT channel mode before set the mode */
595 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
596 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
597 if (iwl_legacy_is_ht40_tx_allowed(priv
, ctx
, NULL
)) {
599 if (ctx
->ht
.protection
==
600 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
) {
601 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_PURE_40
;
602 /* Note: control channel is opposite of extension channel */
603 switch (ctx
->ht
.extension_chan_offset
) {
604 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
606 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
608 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
610 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
614 /* Note: control channel is opposite of extension channel */
615 switch (ctx
->ht
.extension_chan_offset
) {
616 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
618 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
619 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
621 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
623 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
624 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
626 case IEEE80211_HT_PARAM_CHA_SEC_NONE
:
628 /* channel location only valid if in Mixed mode */
630 "invalid extension channel offset\n");
635 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_LEGACY
;
638 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
639 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
641 IWL_DEBUG_ASSOC(priv
, "rxon flags 0x%X operation mode :0x%X "
642 "extension channel offset 0x%x\n",
643 le32_to_cpu(rxon
->flags
), ctx
->ht
.protection
,
644 ctx
->ht
.extension_chan_offset
);
647 void iwl_legacy_set_rxon_ht(struct iwl_priv
*priv
, struct iwl_ht_config
*ht_conf
)
649 struct iwl_rxon_context
*ctx
;
651 for_each_context(priv
, ctx
)
652 _iwl_legacy_set_rxon_ht(priv
, ht_conf
, ctx
);
654 EXPORT_SYMBOL(iwl_legacy_set_rxon_ht
);
656 /* Return valid, unused, channel for a passive scan to reset the RF */
657 u8
iwl_legacy_get_single_channel_number(struct iwl_priv
*priv
,
658 enum ieee80211_band band
)
660 const struct iwl_channel_info
*ch_info
;
664 struct iwl_rxon_context
*ctx
;
666 if (band
== IEEE80211_BAND_5GHZ
) {
668 max
= priv
->channel_count
;
674 for (i
= min
; i
< max
; i
++) {
677 for_each_context(priv
, ctx
) {
678 busy
= priv
->channel_info
[i
].channel
==
679 le16_to_cpu(ctx
->staging
.channel
);
687 channel
= priv
->channel_info
[i
].channel
;
688 ch_info
= iwl_legacy_get_channel_info(priv
, band
, channel
);
689 if (iwl_legacy_is_channel_valid(ch_info
))
695 EXPORT_SYMBOL(iwl_legacy_get_single_channel_number
);
698 * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
699 * @ch: requested channel as a pointer to struct ieee80211_channel
701 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
702 * in the staging RXON flag structure based on the ch->band
705 iwl_legacy_set_rxon_channel(struct iwl_priv
*priv
, struct ieee80211_channel
*ch
,
706 struct iwl_rxon_context
*ctx
)
708 enum ieee80211_band band
= ch
->band
;
709 u16 channel
= ch
->hw_value
;
711 if ((le16_to_cpu(ctx
->staging
.channel
) == channel
) &&
712 (priv
->band
== band
))
715 ctx
->staging
.channel
= cpu_to_le16(channel
);
716 if (band
== IEEE80211_BAND_5GHZ
)
717 ctx
->staging
.flags
&= ~RXON_FLG_BAND_24G_MSK
;
719 ctx
->staging
.flags
|= RXON_FLG_BAND_24G_MSK
;
723 IWL_DEBUG_INFO(priv
, "Staging channel set to %d [%d]\n", channel
, band
);
727 EXPORT_SYMBOL(iwl_legacy_set_rxon_channel
);
729 void iwl_legacy_set_flags_for_band(struct iwl_priv
*priv
,
730 struct iwl_rxon_context
*ctx
,
731 enum ieee80211_band band
,
732 struct ieee80211_vif
*vif
)
734 if (band
== IEEE80211_BAND_5GHZ
) {
735 ctx
->staging
.flags
&=
736 ~(RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
738 ctx
->staging
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
740 /* Copied from iwl_post_associate() */
741 if (vif
&& vif
->bss_conf
.use_short_slot
)
742 ctx
->staging
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
744 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
746 ctx
->staging
.flags
|= RXON_FLG_BAND_24G_MSK
;
747 ctx
->staging
.flags
|= RXON_FLG_AUTO_DETECT_MSK
;
748 ctx
->staging
.flags
&= ~RXON_FLG_CCK_MSK
;
751 EXPORT_SYMBOL(iwl_legacy_set_flags_for_band
);
754 * initialize rxon structure with default values from eeprom
756 void iwl_legacy_connection_init_rx_config(struct iwl_priv
*priv
,
757 struct iwl_rxon_context
*ctx
)
759 const struct iwl_channel_info
*ch_info
;
761 memset(&ctx
->staging
, 0, sizeof(ctx
->staging
));
764 ctx
->staging
.dev_type
= ctx
->unused_devtype
;
766 switch (ctx
->vif
->type
) {
768 case NL80211_IFTYPE_STATION
:
769 ctx
->staging
.dev_type
= ctx
->station_devtype
;
770 ctx
->staging
.filter_flags
= RXON_FILTER_ACCEPT_GRP_MSK
;
773 case NL80211_IFTYPE_ADHOC
:
774 ctx
->staging
.dev_type
= ctx
->ibss_devtype
;
775 ctx
->staging
.flags
= RXON_FLG_SHORT_PREAMBLE_MSK
;
776 ctx
->staging
.filter_flags
= RXON_FILTER_BCON_AWARE_MSK
|
777 RXON_FILTER_ACCEPT_GRP_MSK
;
781 IWL_ERR(priv
, "Unsupported interface type %d\n",
787 /* TODO: Figure out when short_preamble would be set and cache from
789 if (!hw_to_local(priv
->hw
)->short_preamble
)
790 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
792 ctx
->staging
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
795 ch_info
= iwl_legacy_get_channel_info(priv
, priv
->band
,
796 le16_to_cpu(ctx
->active
.channel
));
799 ch_info
= &priv
->channel_info
[0];
801 ctx
->staging
.channel
= cpu_to_le16(ch_info
->channel
);
802 priv
->band
= ch_info
->band
;
804 iwl_legacy_set_flags_for_band(priv
, ctx
, priv
->band
, ctx
->vif
);
806 ctx
->staging
.ofdm_basic_rates
=
807 (IWL_OFDM_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
808 ctx
->staging
.cck_basic_rates
=
809 (IWL_CCK_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
811 /* clear both MIX and PURE40 mode flag */
812 ctx
->staging
.flags
&= ~(RXON_FLG_CHANNEL_MODE_MIXED
|
813 RXON_FLG_CHANNEL_MODE_PURE_40
);
815 memcpy(ctx
->staging
.node_addr
, ctx
->vif
->addr
, ETH_ALEN
);
817 ctx
->staging
.ofdm_ht_single_stream_basic_rates
= 0xff;
818 ctx
->staging
.ofdm_ht_dual_stream_basic_rates
= 0xff;
820 EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config
);
822 void iwl_legacy_set_rate(struct iwl_priv
*priv
)
824 const struct ieee80211_supported_band
*hw
= NULL
;
825 struct ieee80211_rate
*rate
;
826 struct iwl_rxon_context
*ctx
;
829 hw
= iwl_get_hw_mode(priv
, priv
->band
);
831 IWL_ERR(priv
, "Failed to set rate: unable to get hw mode\n");
835 priv
->active_rate
= 0;
837 for (i
= 0; i
< hw
->n_bitrates
; i
++) {
838 rate
= &(hw
->bitrates
[i
]);
839 if (rate
->hw_value
< IWL_RATE_COUNT_LEGACY
)
840 priv
->active_rate
|= (1 << rate
->hw_value
);
843 IWL_DEBUG_RATE(priv
, "Set active_rate = %0x\n", priv
->active_rate
);
845 for_each_context(priv
, ctx
) {
846 ctx
->staging
.cck_basic_rates
=
847 (IWL_CCK_BASIC_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
849 ctx
->staging
.ofdm_basic_rates
=
850 (IWL_OFDM_BASIC_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
853 EXPORT_SYMBOL(iwl_legacy_set_rate
);
855 void iwl_legacy_chswitch_done(struct iwl_priv
*priv
, bool is_success
)
857 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
859 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->status
))
863 ieee80211_chswitch_done(ctx
->vif
, is_success
);
865 EXPORT_SYMBOL(iwl_legacy_chswitch_done
);
867 void iwl_legacy_rx_csa(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
869 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
870 struct iwl_csa_notification
*csa
= &(pkt
->u
.csa_notif
);
872 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
873 struct iwl_legacy_rxon_cmd
*rxon
= (void *)&ctx
->active
;
875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->status
))
878 if (!le32_to_cpu(csa
->status
) && csa
->channel
== priv
->switch_channel
) {
879 rxon
->channel
= csa
->channel
;
880 ctx
->staging
.channel
= csa
->channel
;
881 IWL_DEBUG_11H(priv
, "CSA notif: channel %d\n",
882 le16_to_cpu(csa
->channel
));
883 iwl_legacy_chswitch_done(priv
, true);
885 IWL_ERR(priv
, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa
->channel
));
887 iwl_legacy_chswitch_done(priv
, false);
890 EXPORT_SYMBOL(iwl_legacy_rx_csa
);
892 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
893 void iwl_legacy_print_rx_config_cmd(struct iwl_priv
*priv
,
894 struct iwl_rxon_context
*ctx
)
896 struct iwl_legacy_rxon_cmd
*rxon
= &ctx
->staging
;
898 IWL_DEBUG_RADIO(priv
, "RX CONFIG:\n");
899 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, (u8
*) rxon
, sizeof(*rxon
));
900 IWL_DEBUG_RADIO(priv
, "u16 channel: 0x%x\n",
901 le16_to_cpu(rxon
->channel
));
902 IWL_DEBUG_RADIO(priv
, "u32 flags: 0x%08X\n", le32_to_cpu(rxon
->flags
));
903 IWL_DEBUG_RADIO(priv
, "u32 filter_flags: 0x%08x\n",
904 le32_to_cpu(rxon
->filter_flags
));
905 IWL_DEBUG_RADIO(priv
, "u8 dev_type: 0x%x\n", rxon
->dev_type
);
906 IWL_DEBUG_RADIO(priv
, "u8 ofdm_basic_rates: 0x%02x\n",
907 rxon
->ofdm_basic_rates
);
908 IWL_DEBUG_RADIO(priv
, "u8 cck_basic_rates: 0x%02x\n",
909 rxon
->cck_basic_rates
);
910 IWL_DEBUG_RADIO(priv
, "u8[6] node_addr: %pM\n", rxon
->node_addr
);
911 IWL_DEBUG_RADIO(priv
, "u8[6] bssid_addr: %pM\n", rxon
->bssid_addr
);
912 IWL_DEBUG_RADIO(priv
, "u16 assoc_id: 0x%x\n",
913 le16_to_cpu(rxon
->assoc_id
));
915 EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd
);
918 * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
920 void iwl_legacy_irq_handle_error(struct iwl_priv
*priv
)
922 /* Set the FW error flag -- cleared on iwl_down */
923 set_bit(STATUS_FW_ERROR
, &priv
->status
);
925 /* Cancel currently queued command. */
926 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
928 IWL_ERR(priv
, "Loaded firmware version: %s\n",
929 priv
->hw
->wiphy
->fw_version
);
931 priv
->cfg
->ops
->lib
->dump_nic_error_log(priv
);
932 if (priv
->cfg
->ops
->lib
->dump_fh
)
933 priv
->cfg
->ops
->lib
->dump_fh(priv
, NULL
, false);
934 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
935 if (iwl_legacy_get_debug_level(priv
) & IWL_DL_FW_ERRORS
)
936 iwl_legacy_print_rx_config_cmd(priv
,
937 &priv
->contexts
[IWL_RXON_CTX_BSS
]);
940 wake_up_interruptible(&priv
->wait_command_queue
);
942 /* Keep the restart process from trying to send host
943 * commands by clearing the INIT status bit */
944 clear_bit(STATUS_READY
, &priv
->status
);
946 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
947 IWL_DEBUG(priv
, IWL_DL_FW_ERRORS
,
948 "Restarting adapter due to uCode error.\n");
950 if (priv
->cfg
->mod_params
->restart_fw
)
951 queue_work(priv
->workqueue
, &priv
->restart
);
954 EXPORT_SYMBOL(iwl_legacy_irq_handle_error
);
956 static int iwl_legacy_apm_stop_master(struct iwl_priv
*priv
)
960 /* stop device's busmaster DMA activity */
961 iwl_legacy_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
963 ret
= iwl_poll_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_MASTER_DISABLED
,
964 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
966 IWL_WARN(priv
, "Master Disable Timed Out, 100 usec\n");
968 IWL_DEBUG_INFO(priv
, "stop master\n");
973 void iwl_legacy_apm_stop(struct iwl_priv
*priv
)
975 IWL_DEBUG_INFO(priv
, "Stop card, put in low power state\n");
977 /* Stop device's DMA activity */
978 iwl_legacy_apm_stop_master(priv
);
980 /* Reset the entire device */
981 iwl_legacy_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
986 * Clear "initialization complete" bit to move adapter from
987 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
989 iwl_legacy_clear_bit(priv
, CSR_GP_CNTRL
,
990 CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
992 EXPORT_SYMBOL(iwl_legacy_apm_stop
);
996 * Start up NIC's basic functionality after it has been reset
997 * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
998 * NOTE: This does not load uCode nor start the embedded processor
1000 int iwl_legacy_apm_init(struct iwl_priv
*priv
)
1005 IWL_DEBUG_INFO(priv
, "Init card's basic functions\n");
1008 * Use "set_bit" below rather than "write", to preserve any hardware
1009 * bits already set by default after reset.
1012 /* Disable L0S exit timer (platform NMI Work/Around) */
1013 iwl_legacy_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1014 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
1017 * Disable L0s without affecting L1;
1018 * don't wait for ICH L0s (ICH bug W/A)
1020 iwl_legacy_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1021 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
1023 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1024 iwl_legacy_set_bit(priv
, CSR_DBG_HPET_MEM_REG
,
1025 CSR_DBG_HPET_MEM_REG_VAL
);
1028 * Enable HAP INTA (interrupt from management bus) to
1029 * wake device's PCI Express link L1a -> L0s
1030 * NOTE: This is no-op for 3945 (non-existent bit)
1032 iwl_legacy_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
1033 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
1036 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1037 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1038 * If so (likely), disable L0S, so device moves directly L0->L1;
1039 * costs negligible amount of power savings.
1040 * If not (unlikely), enable L0S, so there is at least some
1041 * power savings, even without L1.
1043 if (priv
->cfg
->base_params
->set_l0s
) {
1044 lctl
= iwl_legacy_pcie_link_ctl(priv
);
1045 if ((lctl
& PCI_CFG_LINK_CTRL_VAL_L1_EN
) ==
1046 PCI_CFG_LINK_CTRL_VAL_L1_EN
) {
1047 /* L1-ASPM enabled; disable(!) L0S */
1048 iwl_legacy_set_bit(priv
, CSR_GIO_REG
,
1049 CSR_GIO_REG_VAL_L0S_ENABLED
);
1050 IWL_DEBUG_POWER(priv
, "L1 Enabled; Disabling L0S\n");
1052 /* L1-ASPM disabled; enable(!) L0S */
1053 iwl_legacy_clear_bit(priv
, CSR_GIO_REG
,
1054 CSR_GIO_REG_VAL_L0S_ENABLED
);
1055 IWL_DEBUG_POWER(priv
, "L1 Disabled; Enabling L0S\n");
1059 /* Configure analog phase-lock-loop before activating to D0A */
1060 if (priv
->cfg
->base_params
->pll_cfg_val
)
1061 iwl_legacy_set_bit(priv
, CSR_ANA_PLL_CFG
,
1062 priv
->cfg
->base_params
->pll_cfg_val
);
1065 * Set "initialization complete" bit to move adapter from
1066 * D0U* --> D0A* (powered-up active) state.
1068 iwl_legacy_set_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1071 * Wait for clock stabilization; once stabilized, access to
1072 * device-internal resources is supported, e.g. iwl_legacy_write_prph()
1073 * and accesses to uCode SRAM.
1075 ret
= iwl_poll_bit(priv
, CSR_GP_CNTRL
,
1076 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1077 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
1079 IWL_DEBUG_INFO(priv
, "Failed to init the card\n");
1084 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1085 * BSM (Boostrap State Machine) is only in 3945 and 4965.
1087 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1088 * do not disable clocks. This preserves any hardware bits already
1089 * set by default in "CLK_CTRL_REG" after reset.
1091 if (priv
->cfg
->base_params
->use_bsm
)
1092 iwl_legacy_write_prph(priv
, APMG_CLK_EN_REG
,
1093 APMG_CLK_VAL_DMA_CLK_RQT
| APMG_CLK_VAL_BSM_CLK_RQT
);
1095 iwl_legacy_write_prph(priv
, APMG_CLK_EN_REG
,
1096 APMG_CLK_VAL_DMA_CLK_RQT
);
1099 /* Disable L1-Active */
1100 iwl_legacy_set_bits_prph(priv
, APMG_PCIDEV_STT_REG
,
1101 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
1106 EXPORT_SYMBOL(iwl_legacy_apm_init
);
1109 int iwl_legacy_set_tx_power(struct iwl_priv
*priv
, s8 tx_power
, bool force
)
1114 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1116 lockdep_assert_held(&priv
->mutex
);
1118 if (priv
->tx_power_user_lmt
== tx_power
&& !force
)
1121 if (!priv
->cfg
->ops
->lib
->send_tx_power
)
1124 /* 0 dBm mean 1 milliwatt */
1127 "Requested user TXPOWER %d below 1 mW.\n",
1132 if (tx_power
> priv
->tx_power_device_lmt
) {
1134 "Requested user TXPOWER %d above upper limit %d.\n",
1135 tx_power
, priv
->tx_power_device_lmt
);
1139 if (!iwl_legacy_is_ready_rf(priv
))
1142 /* scan complete and commit_rxon use tx_power_next value,
1143 * it always need to be updated for newest request */
1144 priv
->tx_power_next
= tx_power
;
1146 /* do not set tx power when scanning or channel changing */
1147 defer
= test_bit(STATUS_SCANNING
, &priv
->status
) ||
1148 memcmp(&ctx
->active
, &ctx
->staging
, sizeof(ctx
->staging
));
1149 if (defer
&& !force
) {
1150 IWL_DEBUG_INFO(priv
, "Deferring tx power set\n");
1154 prev_tx_power
= priv
->tx_power_user_lmt
;
1155 priv
->tx_power_user_lmt
= tx_power
;
1157 ret
= priv
->cfg
->ops
->lib
->send_tx_power(priv
);
1159 /* if fail to set tx_power, restore the orig. tx power */
1161 priv
->tx_power_user_lmt
= prev_tx_power
;
1162 priv
->tx_power_next
= prev_tx_power
;
1166 EXPORT_SYMBOL(iwl_legacy_set_tx_power
);
1168 void iwl_legacy_send_bt_config(struct iwl_priv
*priv
)
1170 struct iwl_bt_cmd bt_cmd
= {
1171 .lead_time
= BT_LEAD_TIME_DEF
,
1172 .max_kill
= BT_MAX_KILL_DEF
,
1177 if (!bt_coex_active
)
1178 bt_cmd
.flags
= BT_COEX_DISABLE
;
1180 bt_cmd
.flags
= BT_COEX_ENABLE
;
1182 IWL_DEBUG_INFO(priv
, "BT coex %s\n",
1183 (bt_cmd
.flags
== BT_COEX_DISABLE
) ? "disable" : "active");
1185 if (iwl_legacy_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1186 sizeof(struct iwl_bt_cmd
), &bt_cmd
))
1187 IWL_ERR(priv
, "failed to send BT Coex Config\n");
1189 EXPORT_SYMBOL(iwl_legacy_send_bt_config
);
1191 int iwl_legacy_send_statistics_request(struct iwl_priv
*priv
, u8 flags
, bool clear
)
1193 struct iwl_statistics_cmd statistics_cmd
= {
1194 .configuration_flags
=
1195 clear
? IWL_STATS_CONF_CLEAR_STATS
: 0,
1198 if (flags
& CMD_ASYNC
)
1199 return iwl_legacy_send_cmd_pdu_async(priv
, REPLY_STATISTICS_CMD
,
1200 sizeof(struct iwl_statistics_cmd
),
1201 &statistics_cmd
, NULL
);
1203 return iwl_legacy_send_cmd_pdu(priv
, REPLY_STATISTICS_CMD
,
1204 sizeof(struct iwl_statistics_cmd
),
1207 EXPORT_SYMBOL(iwl_legacy_send_statistics_request
);
1209 void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv
*priv
,
1210 struct iwl_rx_mem_buffer
*rxb
)
1212 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
1213 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1214 struct iwl_sleep_notification
*sleep
= &(pkt
->u
.sleep_notif
);
1215 IWL_DEBUG_RX(priv
, "sleep mode: %d, src: %d\n",
1216 sleep
->pm_sleep_mode
, sleep
->pm_wakeup_src
);
1219 EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif
);
1221 void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv
*priv
,
1222 struct iwl_rx_mem_buffer
*rxb
)
1224 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1225 u32 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
1226 IWL_DEBUG_RADIO(priv
, "Dumping %d bytes of unhandled "
1227 "notification for %s:\n", len
,
1228 iwl_legacy_get_cmd_string(pkt
->hdr
.cmd
));
1229 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, pkt
->u
.raw
, len
);
1231 EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif
);
1233 void iwl_legacy_rx_reply_error(struct iwl_priv
*priv
,
1234 struct iwl_rx_mem_buffer
*rxb
)
1236 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1238 IWL_ERR(priv
, "Error Reply type 0x%08X cmd %s (0x%02X) "
1239 "seq 0x%04X ser 0x%08X\n",
1240 le32_to_cpu(pkt
->u
.err_resp
.error_type
),
1241 iwl_legacy_get_cmd_string(pkt
->u
.err_resp
.cmd_id
),
1242 pkt
->u
.err_resp
.cmd_id
,
1243 le16_to_cpu(pkt
->u
.err_resp
.bad_cmd_seq_num
),
1244 le32_to_cpu(pkt
->u
.err_resp
.error_info
));
1246 EXPORT_SYMBOL(iwl_legacy_rx_reply_error
);
1248 void iwl_legacy_clear_isr_stats(struct iwl_priv
*priv
)
1250 memset(&priv
->isr_stats
, 0, sizeof(priv
->isr_stats
));
1253 int iwl_legacy_mac_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
1254 const struct ieee80211_tx_queue_params
*params
)
1256 struct iwl_priv
*priv
= hw
->priv
;
1257 struct iwl_rxon_context
*ctx
;
1258 unsigned long flags
;
1261 IWL_DEBUG_MAC80211(priv
, "enter\n");
1263 if (!iwl_legacy_is_ready_rf(priv
)) {
1264 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
1268 if (queue
>= AC_NUM
) {
1269 IWL_DEBUG_MAC80211(priv
, "leave - queue >= AC_NUM %d\n", queue
);
1273 q
= AC_NUM
- 1 - queue
;
1275 spin_lock_irqsave(&priv
->lock
, flags
);
1277 for_each_context(priv
, ctx
) {
1278 ctx
->qos_data
.def_qos_parm
.ac
[q
].cw_min
=
1279 cpu_to_le16(params
->cw_min
);
1280 ctx
->qos_data
.def_qos_parm
.ac
[q
].cw_max
=
1281 cpu_to_le16(params
->cw_max
);
1282 ctx
->qos_data
.def_qos_parm
.ac
[q
].aifsn
= params
->aifs
;
1283 ctx
->qos_data
.def_qos_parm
.ac
[q
].edca_txop
=
1284 cpu_to_le16((params
->txop
* 32));
1286 ctx
->qos_data
.def_qos_parm
.ac
[q
].reserved1
= 0;
1289 spin_unlock_irqrestore(&priv
->lock
, flags
);
1291 IWL_DEBUG_MAC80211(priv
, "leave\n");
1294 EXPORT_SYMBOL(iwl_legacy_mac_conf_tx
);
1296 int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw
*hw
)
1298 struct iwl_priv
*priv
= hw
->priv
;
1300 return priv
->ibss_manager
== IWL_IBSS_MANAGER
;
1302 EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon
);
1305 iwl_legacy_set_mode(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
1307 iwl_legacy_connection_init_rx_config(priv
, ctx
);
1309 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
1310 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
1312 return iwl_legacy_commit_rxon(priv
, ctx
);
1315 static int iwl_legacy_setup_interface(struct iwl_priv
*priv
,
1316 struct iwl_rxon_context
*ctx
)
1318 struct ieee80211_vif
*vif
= ctx
->vif
;
1321 lockdep_assert_held(&priv
->mutex
);
1324 * This variable will be correct only when there's just
1325 * a single context, but all code using it is for hardware
1326 * that supports only one context.
1328 priv
->iw_mode
= vif
->type
;
1330 ctx
->is_active
= true;
1332 err
= iwl_legacy_set_mode(priv
, ctx
);
1334 if (!ctx
->always_active
)
1335 ctx
->is_active
= false;
1343 iwl_legacy_mac_add_interface(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
)
1345 struct iwl_priv
*priv
= hw
->priv
;
1346 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
1347 struct iwl_rxon_context
*tmp
, *ctx
= NULL
;
1350 IWL_DEBUG_MAC80211(priv
, "enter: type %d, addr %pM\n",
1351 vif
->type
, vif
->addr
);
1353 mutex_lock(&priv
->mutex
);
1355 if (!iwl_legacy_is_ready_rf(priv
)) {
1356 IWL_WARN(priv
, "Try to add interface when device not ready\n");
1361 for_each_context(priv
, tmp
) {
1362 u32 possible_modes
=
1363 tmp
->interface_modes
| tmp
->exclusive_interface_modes
;
1366 /* check if this busy context is exclusive */
1367 if (tmp
->exclusive_interface_modes
&
1368 BIT(tmp
->vif
->type
)) {
1375 if (!(possible_modes
& BIT(vif
->type
)))
1378 /* have maybe usable context w/o interface */
1388 vif_priv
->ctx
= ctx
;
1391 err
= iwl_legacy_setup_interface(priv
, ctx
);
1396 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
1398 mutex_unlock(&priv
->mutex
);
1400 IWL_DEBUG_MAC80211(priv
, "leave\n");
1403 EXPORT_SYMBOL(iwl_legacy_mac_add_interface
);
1405 static void iwl_legacy_teardown_interface(struct iwl_priv
*priv
,
1406 struct ieee80211_vif
*vif
,
1409 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
1411 lockdep_assert_held(&priv
->mutex
);
1413 if (priv
->scan_vif
== vif
) {
1414 iwl_legacy_scan_cancel_timeout(priv
, 200);
1415 iwl_legacy_force_scan_end(priv
);
1419 iwl_legacy_set_mode(priv
, ctx
);
1420 if (!ctx
->always_active
)
1421 ctx
->is_active
= false;
1425 void iwl_legacy_mac_remove_interface(struct ieee80211_hw
*hw
,
1426 struct ieee80211_vif
*vif
)
1428 struct iwl_priv
*priv
= hw
->priv
;
1429 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
1431 IWL_DEBUG_MAC80211(priv
, "enter\n");
1433 mutex_lock(&priv
->mutex
);
1435 WARN_ON(ctx
->vif
!= vif
);
1438 iwl_legacy_teardown_interface(priv
, vif
, false);
1440 memset(priv
->bssid
, 0, ETH_ALEN
);
1441 mutex_unlock(&priv
->mutex
);
1443 IWL_DEBUG_MAC80211(priv
, "leave\n");
1446 EXPORT_SYMBOL(iwl_legacy_mac_remove_interface
);
1448 int iwl_legacy_alloc_txq_mem(struct iwl_priv
*priv
)
1451 priv
->txq
= kzalloc(
1452 sizeof(struct iwl_tx_queue
) *
1453 priv
->cfg
->base_params
->num_of_queues
,
1456 IWL_ERR(priv
, "Not enough memory for txq\n");
1461 EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem
);
1463 void iwl_legacy_txq_mem(struct iwl_priv
*priv
)
1468 EXPORT_SYMBOL(iwl_legacy_txq_mem
);
1470 #ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
1472 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1474 void iwl_legacy_reset_traffic_log(struct iwl_priv
*priv
)
1476 priv
->tx_traffic_idx
= 0;
1477 priv
->rx_traffic_idx
= 0;
1478 if (priv
->tx_traffic
)
1479 memset(priv
->tx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
1480 if (priv
->rx_traffic
)
1481 memset(priv
->rx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
1484 int iwl_legacy_alloc_traffic_mem(struct iwl_priv
*priv
)
1486 u32 traffic_size
= IWL_TRAFFIC_DUMP_SIZE
;
1488 if (iwlegacy_debug_level
& IWL_DL_TX
) {
1489 if (!priv
->tx_traffic
) {
1491 kzalloc(traffic_size
, GFP_KERNEL
);
1492 if (!priv
->tx_traffic
)
1496 if (iwlegacy_debug_level
& IWL_DL_RX
) {
1497 if (!priv
->rx_traffic
) {
1499 kzalloc(traffic_size
, GFP_KERNEL
);
1500 if (!priv
->rx_traffic
)
1504 iwl_legacy_reset_traffic_log(priv
);
1507 EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem
);
1509 void iwl_legacy_free_traffic_mem(struct iwl_priv
*priv
)
1511 kfree(priv
->tx_traffic
);
1512 priv
->tx_traffic
= NULL
;
1514 kfree(priv
->rx_traffic
);
1515 priv
->rx_traffic
= NULL
;
1517 EXPORT_SYMBOL(iwl_legacy_free_traffic_mem
);
1519 void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv
*priv
,
1520 u16 length
, struct ieee80211_hdr
*header
)
1525 if (likely(!(iwlegacy_debug_level
& IWL_DL_TX
)))
1528 if (!priv
->tx_traffic
)
1531 fc
= header
->frame_control
;
1532 if (ieee80211_is_data(fc
)) {
1533 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
1534 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
1535 memcpy((priv
->tx_traffic
+
1536 (priv
->tx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
1538 priv
->tx_traffic_idx
=
1539 (priv
->tx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
1542 EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame
);
1544 void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv
*priv
,
1545 u16 length
, struct ieee80211_hdr
*header
)
1550 if (likely(!(iwlegacy_debug_level
& IWL_DL_RX
)))
1553 if (!priv
->rx_traffic
)
1556 fc
= header
->frame_control
;
1557 if (ieee80211_is_data(fc
)) {
1558 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
1559 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
1560 memcpy((priv
->rx_traffic
+
1561 (priv
->rx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
1563 priv
->rx_traffic_idx
=
1564 (priv
->rx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
1567 EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame
);
1569 const char *iwl_legacy_get_mgmt_string(int cmd
)
1572 IWL_CMD(MANAGEMENT_ASSOC_REQ
);
1573 IWL_CMD(MANAGEMENT_ASSOC_RESP
);
1574 IWL_CMD(MANAGEMENT_REASSOC_REQ
);
1575 IWL_CMD(MANAGEMENT_REASSOC_RESP
);
1576 IWL_CMD(MANAGEMENT_PROBE_REQ
);
1577 IWL_CMD(MANAGEMENT_PROBE_RESP
);
1578 IWL_CMD(MANAGEMENT_BEACON
);
1579 IWL_CMD(MANAGEMENT_ATIM
);
1580 IWL_CMD(MANAGEMENT_DISASSOC
);
1581 IWL_CMD(MANAGEMENT_AUTH
);
1582 IWL_CMD(MANAGEMENT_DEAUTH
);
1583 IWL_CMD(MANAGEMENT_ACTION
);
1590 const char *iwl_legacy_get_ctrl_string(int cmd
)
1593 IWL_CMD(CONTROL_BACK_REQ
);
1594 IWL_CMD(CONTROL_BACK
);
1595 IWL_CMD(CONTROL_PSPOLL
);
1596 IWL_CMD(CONTROL_RTS
);
1597 IWL_CMD(CONTROL_CTS
);
1598 IWL_CMD(CONTROL_ACK
);
1599 IWL_CMD(CONTROL_CFEND
);
1600 IWL_CMD(CONTROL_CFENDACK
);
1607 void iwl_legacy_clear_traffic_stats(struct iwl_priv
*priv
)
1609 memset(&priv
->tx_stats
, 0, sizeof(struct traffic_stats
));
1610 memset(&priv
->rx_stats
, 0, sizeof(struct traffic_stats
));
1614 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
1615 * iwl_legacy_update_stats function will
1616 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
1617 * Use debugFs to display the rx/rx_statistics
1618 * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
1619 * information will be recorded, but DATA pkt still will be recorded
1620 * for the reason of iwl_led.c need to control the led blinking based on
1621 * number of tx and rx data.
1625 iwl_legacy_update_stats(struct iwl_priv
*priv
, bool is_tx
, __le16 fc
, u16 len
)
1627 struct traffic_stats
*stats
;
1630 stats
= &priv
->tx_stats
;
1632 stats
= &priv
->rx_stats
;
1634 if (ieee80211_is_mgmt(fc
)) {
1635 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
1636 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
1637 stats
->mgmt
[MANAGEMENT_ASSOC_REQ
]++;
1639 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP
):
1640 stats
->mgmt
[MANAGEMENT_ASSOC_RESP
]++;
1642 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
1643 stats
->mgmt
[MANAGEMENT_REASSOC_REQ
]++;
1645 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP
):
1646 stats
->mgmt
[MANAGEMENT_REASSOC_RESP
]++;
1648 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ
):
1649 stats
->mgmt
[MANAGEMENT_PROBE_REQ
]++;
1651 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP
):
1652 stats
->mgmt
[MANAGEMENT_PROBE_RESP
]++;
1654 case cpu_to_le16(IEEE80211_STYPE_BEACON
):
1655 stats
->mgmt
[MANAGEMENT_BEACON
]++;
1657 case cpu_to_le16(IEEE80211_STYPE_ATIM
):
1658 stats
->mgmt
[MANAGEMENT_ATIM
]++;
1660 case cpu_to_le16(IEEE80211_STYPE_DISASSOC
):
1661 stats
->mgmt
[MANAGEMENT_DISASSOC
]++;
1663 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
1664 stats
->mgmt
[MANAGEMENT_AUTH
]++;
1666 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
1667 stats
->mgmt
[MANAGEMENT_DEAUTH
]++;
1669 case cpu_to_le16(IEEE80211_STYPE_ACTION
):
1670 stats
->mgmt
[MANAGEMENT_ACTION
]++;
1673 } else if (ieee80211_is_ctl(fc
)) {
1674 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
1675 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ
):
1676 stats
->ctrl
[CONTROL_BACK_REQ
]++;
1678 case cpu_to_le16(IEEE80211_STYPE_BACK
):
1679 stats
->ctrl
[CONTROL_BACK
]++;
1681 case cpu_to_le16(IEEE80211_STYPE_PSPOLL
):
1682 stats
->ctrl
[CONTROL_PSPOLL
]++;
1684 case cpu_to_le16(IEEE80211_STYPE_RTS
):
1685 stats
->ctrl
[CONTROL_RTS
]++;
1687 case cpu_to_le16(IEEE80211_STYPE_CTS
):
1688 stats
->ctrl
[CONTROL_CTS
]++;
1690 case cpu_to_le16(IEEE80211_STYPE_ACK
):
1691 stats
->ctrl
[CONTROL_ACK
]++;
1693 case cpu_to_le16(IEEE80211_STYPE_CFEND
):
1694 stats
->ctrl
[CONTROL_CFEND
]++;
1696 case cpu_to_le16(IEEE80211_STYPE_CFENDACK
):
1697 stats
->ctrl
[CONTROL_CFENDACK
]++;
1703 stats
->data_bytes
+= len
;
1706 EXPORT_SYMBOL(iwl_legacy_update_stats
);
1709 int iwl_legacy_force_reset(struct iwl_priv
*priv
, bool external
)
1711 struct iwl_force_reset
*force_reset
;
1713 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1716 force_reset
= &priv
->force_reset
;
1717 force_reset
->reset_request_count
++;
1719 if (force_reset
->last_force_reset_jiffies
&&
1720 time_after(force_reset
->last_force_reset_jiffies
+
1721 force_reset
->reset_duration
, jiffies
)) {
1722 IWL_DEBUG_INFO(priv
, "force reset rejected\n");
1723 force_reset
->reset_reject_count
++;
1727 force_reset
->reset_success_count
++;
1728 force_reset
->last_force_reset_jiffies
= jiffies
;
1731 * if the request is from external(ex: debugfs),
1732 * then always perform the request in regardless the module
1734 * if the request is from internal (uCode error or driver
1735 * detect failure), then fw_restart module parameter
1736 * need to be check before performing firmware reload
1739 if (!external
&& !priv
->cfg
->mod_params
->restart_fw
) {
1740 IWL_DEBUG_INFO(priv
, "Cancel firmware reload based on "
1741 "module parameter setting\n");
1745 IWL_ERR(priv
, "On demand firmware reload\n");
1747 /* Set the FW error flag -- cleared on iwl_down */
1748 set_bit(STATUS_FW_ERROR
, &priv
->status
);
1749 wake_up_interruptible(&priv
->wait_command_queue
);
1751 * Keep the restart process from trying to send host
1752 * commands by clearing the INIT status bit
1754 clear_bit(STATUS_READY
, &priv
->status
);
1755 queue_work(priv
->workqueue
, &priv
->restart
);
1761 iwl_legacy_mac_change_interface(struct ieee80211_hw
*hw
,
1762 struct ieee80211_vif
*vif
,
1763 enum nl80211_iftype newtype
, bool newp2p
)
1765 struct iwl_priv
*priv
= hw
->priv
;
1766 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
1767 struct iwl_rxon_context
*tmp
;
1768 u32 interface_modes
;
1771 newtype
= ieee80211_iftype_p2p(newtype
, newp2p
);
1773 mutex_lock(&priv
->mutex
);
1775 if (!ctx
->vif
|| !iwl_legacy_is_ready_rf(priv
)) {
1777 * Huh? But wait ... this can maybe happen when
1778 * we're in the middle of a firmware restart!
1784 interface_modes
= ctx
->interface_modes
| ctx
->exclusive_interface_modes
;
1786 if (!(interface_modes
& BIT(newtype
))) {
1791 if (ctx
->exclusive_interface_modes
& BIT(newtype
)) {
1792 for_each_context(priv
, tmp
) {
1800 * The current mode switch would be exclusive, but
1801 * another context is active ... refuse the switch.
1809 iwl_legacy_teardown_interface(priv
, vif
, true);
1810 vif
->type
= newtype
;
1812 err
= iwl_legacy_setup_interface(priv
, ctx
);
1815 * We've switched internally, but submitting to the
1816 * device may have failed for some reason. Mask this
1817 * error, because otherwise mac80211 will not switch
1818 * (and set the interface type back) and we'll be
1819 * out of sync with it.
1824 mutex_unlock(&priv
->mutex
);
1827 EXPORT_SYMBOL(iwl_legacy_mac_change_interface
);
1830 * On every watchdog tick we check (latest) time stamp. If it does not
1831 * change during timeout period and queue is not empty we reset firmware.
1833 static int iwl_legacy_check_stuck_queue(struct iwl_priv
*priv
, int cnt
)
1835 struct iwl_tx_queue
*txq
= &priv
->txq
[cnt
];
1836 struct iwl_queue
*q
= &txq
->q
;
1837 unsigned long timeout
;
1840 if (q
->read_ptr
== q
->write_ptr
) {
1841 txq
->time_stamp
= jiffies
;
1845 timeout
= txq
->time_stamp
+
1846 msecs_to_jiffies(priv
->cfg
->base_params
->wd_timeout
);
1848 if (time_after(jiffies
, timeout
)) {
1849 IWL_ERR(priv
, "Queue %d stuck for %u ms.\n",
1850 q
->id
, priv
->cfg
->base_params
->wd_timeout
);
1851 ret
= iwl_legacy_force_reset(priv
, false);
1852 return (ret
== -EAGAIN
) ? 0 : 1;
1859 * Making watchdog tick be a quarter of timeout assure we will
1860 * discover the queue hung between timeout and 1.25*timeout
1862 #define IWL_WD_TICK(timeout) ((timeout) / 4)
1865 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1866 * we reset the firmware. If everything is fine just rearm the timer.
1868 void iwl_legacy_bg_watchdog(unsigned long data
)
1870 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
1872 unsigned long timeout
;
1874 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1877 timeout
= priv
->cfg
->base_params
->wd_timeout
;
1881 /* monitor and check for stuck cmd queue */
1882 if (iwl_legacy_check_stuck_queue(priv
, priv
->cmd_queue
))
1885 /* monitor and check for other stuck queues */
1886 if (iwl_legacy_is_any_associated(priv
)) {
1887 for (cnt
= 0; cnt
< priv
->hw_params
.max_txq_num
; cnt
++) {
1888 /* skip as we already checked the command queue */
1889 if (cnt
== priv
->cmd_queue
)
1891 if (iwl_legacy_check_stuck_queue(priv
, cnt
))
1896 mod_timer(&priv
->watchdog
, jiffies
+
1897 msecs_to_jiffies(IWL_WD_TICK(timeout
)));
1899 EXPORT_SYMBOL(iwl_legacy_bg_watchdog
);
1901 void iwl_legacy_setup_watchdog(struct iwl_priv
*priv
)
1903 unsigned int timeout
= priv
->cfg
->base_params
->wd_timeout
;
1906 mod_timer(&priv
->watchdog
,
1907 jiffies
+ msecs_to_jiffies(IWL_WD_TICK(timeout
)));
1909 del_timer(&priv
->watchdog
);
1911 EXPORT_SYMBOL(iwl_legacy_setup_watchdog
);
1914 * extended beacon time format
1915 * time in usec will be changed into a 32-bit value in extended:internal format
1916 * the extended part is the beacon counts
1917 * the internal part is the time in usec within one beacon interval
1920 iwl_legacy_usecs_to_beacons(struct iwl_priv
*priv
,
1921 u32 usec
, u32 beacon_interval
)
1925 u32 interval
= beacon_interval
* TIME_UNIT
;
1927 if (!interval
|| !usec
)
1930 quot
= (usec
/ interval
) &
1931 (iwl_legacy_beacon_time_mask_high(priv
,
1932 priv
->hw_params
.beacon_time_tsf_bits
) >>
1933 priv
->hw_params
.beacon_time_tsf_bits
);
1934 rem
= (usec
% interval
) & iwl_legacy_beacon_time_mask_low(priv
,
1935 priv
->hw_params
.beacon_time_tsf_bits
);
1937 return (quot
<< priv
->hw_params
.beacon_time_tsf_bits
) + rem
;
1939 EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons
);
1941 /* base is usually what we get from ucode with each received frame,
1942 * the same as HW timer counter counting down
1944 __le32
iwl_legacy_add_beacon_time(struct iwl_priv
*priv
, u32 base
,
1945 u32 addon
, u32 beacon_interval
)
1947 u32 base_low
= base
& iwl_legacy_beacon_time_mask_low(priv
,
1948 priv
->hw_params
.beacon_time_tsf_bits
);
1949 u32 addon_low
= addon
& iwl_legacy_beacon_time_mask_low(priv
,
1950 priv
->hw_params
.beacon_time_tsf_bits
);
1951 u32 interval
= beacon_interval
* TIME_UNIT
;
1952 u32 res
= (base
& iwl_legacy_beacon_time_mask_high(priv
,
1953 priv
->hw_params
.beacon_time_tsf_bits
)) +
1954 (addon
& iwl_legacy_beacon_time_mask_high(priv
,
1955 priv
->hw_params
.beacon_time_tsf_bits
));
1957 if (base_low
> addon_low
)
1958 res
+= base_low
- addon_low
;
1959 else if (base_low
< addon_low
) {
1960 res
+= interval
+ base_low
- addon_low
;
1961 res
+= (1 << priv
->hw_params
.beacon_time_tsf_bits
);
1963 res
+= (1 << priv
->hw_params
.beacon_time_tsf_bits
);
1965 return cpu_to_le32(res
);
1967 EXPORT_SYMBOL(iwl_legacy_add_beacon_time
);
1971 int iwl_legacy_pci_suspend(struct device
*device
)
1973 struct pci_dev
*pdev
= to_pci_dev(device
);
1974 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
1977 * This function is called when system goes into suspend state
1978 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
1979 * first but since iwl_mac_stop() has no knowledge of who the caller is,
1980 * it will not call apm_ops.stop() to stop the DMA operation.
1981 * Calling apm_ops.stop here to make sure we stop the DMA.
1983 iwl_legacy_apm_stop(priv
);
1987 EXPORT_SYMBOL(iwl_legacy_pci_suspend
);
1989 int iwl_legacy_pci_resume(struct device
*device
)
1991 struct pci_dev
*pdev
= to_pci_dev(device
);
1992 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
1993 bool hw_rfkill
= false;
1996 * We disable the RETRY_TIMEOUT register (0x41) to keep
1997 * PCI Tx retries from interfering with C3 CPU state.
1999 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2001 iwl_legacy_enable_interrupts(priv
);
2003 if (!(iwl_read32(priv
, CSR_GP_CNTRL
) &
2004 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
))
2008 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2010 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2012 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
, hw_rfkill
);
2016 EXPORT_SYMBOL(iwl_legacy_pci_resume
);
2018 const struct dev_pm_ops iwl_legacy_pm_ops
= {
2019 .suspend
= iwl_legacy_pci_suspend
,
2020 .resume
= iwl_legacy_pci_resume
,
2021 .freeze
= iwl_legacy_pci_suspend
,
2022 .thaw
= iwl_legacy_pci_resume
,
2023 .poweroff
= iwl_legacy_pci_suspend
,
2024 .restore
= iwl_legacy_pci_resume
,
2026 EXPORT_SYMBOL(iwl_legacy_pm_ops
);
2028 #endif /* CONFIG_PM */
2031 iwl_legacy_update_qos(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
2033 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2036 if (!ctx
->is_active
)
2039 ctx
->qos_data
.def_qos_parm
.qos_flags
= 0;
2041 if (ctx
->qos_data
.qos_active
)
2042 ctx
->qos_data
.def_qos_parm
.qos_flags
|=
2043 QOS_PARAM_FLG_UPDATE_EDCA_MSK
;
2045 if (ctx
->ht
.enabled
)
2046 ctx
->qos_data
.def_qos_parm
.qos_flags
|= QOS_PARAM_FLG_TGN_MSK
;
2048 IWL_DEBUG_QOS(priv
, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2049 ctx
->qos_data
.qos_active
,
2050 ctx
->qos_data
.def_qos_parm
.qos_flags
);
2052 iwl_legacy_send_cmd_pdu_async(priv
, ctx
->qos_cmd
,
2053 sizeof(struct iwl_qosparam_cmd
),
2054 &ctx
->qos_data
.def_qos_parm
, NULL
);
2058 * iwl_legacy_mac_config - mac80211 config callback
2060 int iwl_legacy_mac_config(struct ieee80211_hw
*hw
, u32 changed
)
2062 struct iwl_priv
*priv
= hw
->priv
;
2063 const struct iwl_channel_info
*ch_info
;
2064 struct ieee80211_conf
*conf
= &hw
->conf
;
2065 struct ieee80211_channel
*channel
= conf
->channel
;
2066 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2067 struct iwl_rxon_context
*ctx
;
2068 unsigned long flags
= 0;
2071 int scan_active
= 0;
2072 bool ht_changed
[NUM_IWL_RXON_CTX
] = {};
2074 if (WARN_ON(!priv
->cfg
->ops
->legacy
))
2077 mutex_lock(&priv
->mutex
);
2079 IWL_DEBUG_MAC80211(priv
, "enter to channel %d changed 0x%X\n",
2080 channel
->hw_value
, changed
);
2082 if (unlikely(test_bit(STATUS_SCANNING
, &priv
->status
))) {
2084 IWL_DEBUG_MAC80211(priv
, "scan active\n");
2087 if (changed
& (IEEE80211_CONF_CHANGE_SMPS
|
2088 IEEE80211_CONF_CHANGE_CHANNEL
)) {
2089 /* mac80211 uses static for non-HT which is what we want */
2090 priv
->current_ht_config
.smps
= conf
->smps_mode
;
2093 * Recalculate chain counts.
2095 * If monitor mode is enabled then mac80211 will
2096 * set up the SM PS mode to OFF if an HT channel is
2099 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2100 for_each_context(priv
, ctx
)
2101 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
2104 /* during scanning mac80211 will delay channel setting until
2105 * scan finish with changed = 0
2107 if (!changed
|| (changed
& IEEE80211_CONF_CHANGE_CHANNEL
)) {
2111 ch
= channel
->hw_value
;
2112 ch_info
= iwl_legacy_get_channel_info(priv
, channel
->band
, ch
);
2113 if (!iwl_legacy_is_channel_valid(ch_info
)) {
2114 IWL_DEBUG_MAC80211(priv
, "leave - invalid channel\n");
2119 if (priv
->iw_mode
== NL80211_IFTYPE_ADHOC
&&
2120 !iwl_legacy_is_channel_ibss(ch_info
)) {
2121 IWL_DEBUG_MAC80211(priv
, "leave - not IBSS channel\n");
2126 spin_lock_irqsave(&priv
->lock
, flags
);
2128 for_each_context(priv
, ctx
) {
2129 /* Configure HT40 channels */
2130 if (ctx
->ht
.enabled
!= conf_is_ht(conf
)) {
2131 ctx
->ht
.enabled
= conf_is_ht(conf
);
2132 ht_changed
[ctx
->ctxid
] = true;
2134 if (ctx
->ht
.enabled
) {
2135 if (conf_is_ht40_minus(conf
)) {
2136 ctx
->ht
.extension_chan_offset
=
2137 IEEE80211_HT_PARAM_CHA_SEC_BELOW
;
2138 ctx
->ht
.is_40mhz
= true;
2139 } else if (conf_is_ht40_plus(conf
)) {
2140 ctx
->ht
.extension_chan_offset
=
2141 IEEE80211_HT_PARAM_CHA_SEC_ABOVE
;
2142 ctx
->ht
.is_40mhz
= true;
2144 ctx
->ht
.extension_chan_offset
=
2145 IEEE80211_HT_PARAM_CHA_SEC_NONE
;
2146 ctx
->ht
.is_40mhz
= false;
2149 ctx
->ht
.is_40mhz
= false;
2152 * Default to no protection. Protection mode will
2153 * later be set from BSS config in iwl_ht_conf
2155 ctx
->ht
.protection
=
2156 IEEE80211_HT_OP_MODE_PROTECTION_NONE
;
2158 /* if we are switching from ht to 2.4 clear flags
2159 * from any ht related info since 2.4 does not
2161 if ((le16_to_cpu(ctx
->staging
.channel
) != ch
))
2162 ctx
->staging
.flags
= 0;
2164 iwl_legacy_set_rxon_channel(priv
, channel
, ctx
);
2165 iwl_legacy_set_rxon_ht(priv
, ht_conf
);
2167 iwl_legacy_set_flags_for_band(priv
, ctx
, channel
->band
,
2171 spin_unlock_irqrestore(&priv
->lock
, flags
);
2173 if (priv
->cfg
->ops
->legacy
->update_bcast_stations
)
2175 priv
->cfg
->ops
->legacy
->update_bcast_stations(priv
);
2178 /* The list of supported rates and rate mask can be different
2179 * for each band; since the band may have changed, reset
2180 * the rate mask to what mac80211 lists */
2181 iwl_legacy_set_rate(priv
);
2184 if (changed
& (IEEE80211_CONF_CHANGE_PS
|
2185 IEEE80211_CONF_CHANGE_IDLE
)) {
2186 ret
= iwl_legacy_power_update_mode(priv
, false);
2188 IWL_DEBUG_MAC80211(priv
, "Error setting sleep level\n");
2191 if (changed
& IEEE80211_CONF_CHANGE_POWER
) {
2192 IWL_DEBUG_MAC80211(priv
, "TX Power old=%d new=%d\n",
2193 priv
->tx_power_user_lmt
, conf
->power_level
);
2195 iwl_legacy_set_tx_power(priv
, conf
->power_level
, false);
2198 if (!iwl_legacy_is_ready(priv
)) {
2199 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2206 for_each_context(priv
, ctx
) {
2207 if (memcmp(&ctx
->active
, &ctx
->staging
, sizeof(ctx
->staging
)))
2208 iwl_legacy_commit_rxon(priv
, ctx
);
2210 IWL_DEBUG_INFO(priv
,
2211 "Not re-sending same RXON configuration.\n");
2212 if (ht_changed
[ctx
->ctxid
])
2213 iwl_legacy_update_qos(priv
, ctx
);
2217 IWL_DEBUG_MAC80211(priv
, "leave\n");
2218 mutex_unlock(&priv
->mutex
);
2221 EXPORT_SYMBOL(iwl_legacy_mac_config
);
2223 void iwl_legacy_mac_reset_tsf(struct ieee80211_hw
*hw
)
2225 struct iwl_priv
*priv
= hw
->priv
;
2226 unsigned long flags
;
2227 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2228 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
2230 if (WARN_ON(!priv
->cfg
->ops
->legacy
))
2233 mutex_lock(&priv
->mutex
);
2234 IWL_DEBUG_MAC80211(priv
, "enter\n");
2236 spin_lock_irqsave(&priv
->lock
, flags
);
2237 memset(&priv
->current_ht_config
, 0, sizeof(struct iwl_ht_config
));
2238 spin_unlock_irqrestore(&priv
->lock
, flags
);
2240 spin_lock_irqsave(&priv
->lock
, flags
);
2242 /* new association get rid of ibss beacon skb */
2243 if (priv
->beacon_skb
)
2244 dev_kfree_skb(priv
->beacon_skb
);
2246 priv
->beacon_skb
= NULL
;
2248 priv
->timestamp
= 0;
2250 spin_unlock_irqrestore(&priv
->lock
, flags
);
2252 iwl_legacy_scan_cancel_timeout(priv
, 100);
2253 if (!iwl_legacy_is_ready_rf(priv
)) {
2254 IWL_DEBUG_MAC80211(priv
, "leave - not ready\n");
2255 mutex_unlock(&priv
->mutex
);
2259 /* we are restarting association process
2260 * clear RXON_FILTER_ASSOC_MSK bit
2262 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2263 iwl_legacy_commit_rxon(priv
, ctx
);
2265 iwl_legacy_set_rate(priv
);
2267 mutex_unlock(&priv
->mutex
);
2269 IWL_DEBUG_MAC80211(priv
, "leave\n");
2271 EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf
);
2273 static void iwl_legacy_ht_conf(struct iwl_priv
*priv
,
2274 struct ieee80211_vif
*vif
)
2276 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2277 struct ieee80211_sta
*sta
;
2278 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
2279 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
2281 IWL_DEBUG_ASSOC(priv
, "enter:\n");
2283 if (!ctx
->ht
.enabled
)
2286 ctx
->ht
.protection
=
2287 bss_conf
->ht_operation_mode
& IEEE80211_HT_OP_MODE_PROTECTION
;
2288 ctx
->ht
.non_gf_sta_present
=
2289 !!(bss_conf
->ht_operation_mode
&
2290 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT
);
2292 ht_conf
->single_chain_sufficient
= false;
2294 switch (vif
->type
) {
2295 case NL80211_IFTYPE_STATION
:
2297 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
2299 struct ieee80211_sta_ht_cap
*ht_cap
= &sta
->ht_cap
;
2302 maxstreams
= (ht_cap
->mcs
.tx_params
&
2303 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK
)
2304 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
;
2307 if ((ht_cap
->mcs
.rx_mask
[1] == 0) &&
2308 (ht_cap
->mcs
.rx_mask
[2] == 0))
2309 ht_conf
->single_chain_sufficient
= true;
2310 if (maxstreams
<= 1)
2311 ht_conf
->single_chain_sufficient
= true;
2314 * If at all, this can only happen through a race
2315 * when the AP disconnects us while we're still
2316 * setting up the connection, in that case mac80211
2317 * will soon tell us about that.
2319 ht_conf
->single_chain_sufficient
= true;
2323 case NL80211_IFTYPE_ADHOC
:
2324 ht_conf
->single_chain_sufficient
= true;
2330 IWL_DEBUG_ASSOC(priv
, "leave\n");
2333 static inline void iwl_legacy_set_no_assoc(struct iwl_priv
*priv
,
2334 struct ieee80211_vif
*vif
)
2336 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
2339 * inform the ucode that there is no longer an
2340 * association and that no more packets should be
2343 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2344 ctx
->staging
.assoc_id
= 0;
2345 iwl_legacy_commit_rxon(priv
, ctx
);
2348 static void iwl_legacy_beacon_update(struct ieee80211_hw
*hw
,
2349 struct ieee80211_vif
*vif
)
2351 struct iwl_priv
*priv
= hw
->priv
;
2352 unsigned long flags
;
2354 struct sk_buff
*skb
= ieee80211_beacon_get(hw
, vif
);
2359 IWL_DEBUG_MAC80211(priv
, "enter\n");
2361 lockdep_assert_held(&priv
->mutex
);
2363 if (!priv
->beacon_ctx
) {
2364 IWL_ERR(priv
, "update beacon but no beacon context!\n");
2369 spin_lock_irqsave(&priv
->lock
, flags
);
2371 if (priv
->beacon_skb
)
2372 dev_kfree_skb(priv
->beacon_skb
);
2374 priv
->beacon_skb
= skb
;
2376 timestamp
= ((struct ieee80211_mgmt
*)skb
->data
)->u
.beacon
.timestamp
;
2377 priv
->timestamp
= le64_to_cpu(timestamp
);
2379 IWL_DEBUG_MAC80211(priv
, "leave\n");
2380 spin_unlock_irqrestore(&priv
->lock
, flags
);
2382 if (!iwl_legacy_is_ready_rf(priv
)) {
2383 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
2387 priv
->cfg
->ops
->legacy
->post_associate(priv
);
2390 void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw
*hw
,
2391 struct ieee80211_vif
*vif
,
2392 struct ieee80211_bss_conf
*bss_conf
,
2395 struct iwl_priv
*priv
= hw
->priv
;
2396 struct iwl_rxon_context
*ctx
= iwl_legacy_rxon_ctx_from_vif(vif
);
2399 if (WARN_ON(!priv
->cfg
->ops
->legacy
))
2402 IWL_DEBUG_MAC80211(priv
, "changes = 0x%X\n", changes
);
2404 mutex_lock(&priv
->mutex
);
2406 if (!iwl_legacy_is_alive(priv
)) {
2407 mutex_unlock(&priv
->mutex
);
2411 if (changes
& BSS_CHANGED_QOS
) {
2412 unsigned long flags
;
2414 spin_lock_irqsave(&priv
->lock
, flags
);
2415 ctx
->qos_data
.qos_active
= bss_conf
->qos
;
2416 iwl_legacy_update_qos(priv
, ctx
);
2417 spin_unlock_irqrestore(&priv
->lock
, flags
);
2420 if (changes
& BSS_CHANGED_BEACON_ENABLED
) {
2422 * the add_interface code must make sure we only ever
2423 * have a single interface that could be beaconing at
2426 if (vif
->bss_conf
.enable_beacon
)
2427 priv
->beacon_ctx
= ctx
;
2429 priv
->beacon_ctx
= NULL
;
2432 if (changes
& BSS_CHANGED_BSSID
) {
2433 IWL_DEBUG_MAC80211(priv
, "BSSID %pM\n", bss_conf
->bssid
);
2436 * If there is currently a HW scan going on in the
2437 * background then we need to cancel it else the RXON
2438 * below/in post_associate will fail.
2440 if (iwl_legacy_scan_cancel_timeout(priv
, 100)) {
2442 "Aborted scan still in progress after 100ms\n");
2443 IWL_DEBUG_MAC80211(priv
,
2444 "leaving - scan abort failed.\n");
2445 mutex_unlock(&priv
->mutex
);
2449 /* mac80211 only sets assoc when in STATION mode */
2450 if (vif
->type
== NL80211_IFTYPE_ADHOC
|| bss_conf
->assoc
) {
2451 memcpy(ctx
->staging
.bssid_addr
,
2452 bss_conf
->bssid
, ETH_ALEN
);
2454 /* currently needed in a few places */
2455 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2457 ctx
->staging
.filter_flags
&=
2458 ~RXON_FILTER_ASSOC_MSK
;
2464 * This needs to be after setting the BSSID in case
2465 * mac80211 decides to do both changes at once because
2466 * it will invoke post_associate.
2468 if (vif
->type
== NL80211_IFTYPE_ADHOC
&& changes
& BSS_CHANGED_BEACON
)
2469 iwl_legacy_beacon_update(hw
, vif
);
2471 if (changes
& BSS_CHANGED_ERP_PREAMBLE
) {
2472 IWL_DEBUG_MAC80211(priv
, "ERP_PREAMBLE %d\n",
2473 bss_conf
->use_short_preamble
);
2474 if (bss_conf
->use_short_preamble
)
2475 ctx
->staging
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
2477 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
2480 if (changes
& BSS_CHANGED_ERP_CTS_PROT
) {
2481 IWL_DEBUG_MAC80211(priv
,
2482 "ERP_CTS %d\n", bss_conf
->use_cts_prot
);
2483 if (bss_conf
->use_cts_prot
&&
2484 (priv
->band
!= IEEE80211_BAND_5GHZ
))
2485 ctx
->staging
.flags
|= RXON_FLG_TGG_PROTECT_MSK
;
2487 ctx
->staging
.flags
&= ~RXON_FLG_TGG_PROTECT_MSK
;
2488 if (bss_conf
->use_cts_prot
)
2489 ctx
->staging
.flags
|= RXON_FLG_SELF_CTS_EN
;
2491 ctx
->staging
.flags
&= ~RXON_FLG_SELF_CTS_EN
;
2494 if (changes
& BSS_CHANGED_BASIC_RATES
) {
2495 /* XXX use this information
2497 * To do that, remove code from iwl_legacy_set_rate() and put something
2501 ctx->staging.ofdm_basic_rates =
2502 bss_conf->basic_rates;
2504 ctx->staging.ofdm_basic_rates =
2505 bss_conf->basic_rates >> 4;
2506 ctx->staging.cck_basic_rates =
2507 bss_conf->basic_rates & 0xF;
2511 if (changes
& BSS_CHANGED_HT
) {
2512 iwl_legacy_ht_conf(priv
, vif
);
2514 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2515 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
2518 if (changes
& BSS_CHANGED_ASSOC
) {
2519 IWL_DEBUG_MAC80211(priv
, "ASSOC %d\n", bss_conf
->assoc
);
2520 if (bss_conf
->assoc
) {
2521 priv
->timestamp
= bss_conf
->timestamp
;
2523 if (!iwl_legacy_is_rfkill(priv
))
2524 priv
->cfg
->ops
->legacy
->post_associate(priv
);
2526 iwl_legacy_set_no_assoc(priv
, vif
);
2529 if (changes
&& iwl_legacy_is_associated_ctx(ctx
) && bss_conf
->aid
) {
2530 IWL_DEBUG_MAC80211(priv
, "Changes (%#x) while associated\n",
2532 ret
= iwl_legacy_send_rxon_assoc(priv
, ctx
);
2534 /* Sync active_rxon with latest change. */
2535 memcpy((void *)&ctx
->active
,
2537 sizeof(struct iwl_legacy_rxon_cmd
));
2541 if (changes
& BSS_CHANGED_BEACON_ENABLED
) {
2542 if (vif
->bss_conf
.enable_beacon
) {
2543 memcpy(ctx
->staging
.bssid_addr
,
2544 bss_conf
->bssid
, ETH_ALEN
);
2545 memcpy(priv
->bssid
, bss_conf
->bssid
, ETH_ALEN
);
2546 priv
->cfg
->ops
->legacy
->config_ap(priv
);
2548 iwl_legacy_set_no_assoc(priv
, vif
);
2551 if (changes
& BSS_CHANGED_IBSS
) {
2552 ret
= priv
->cfg
->ops
->legacy
->manage_ibss_station(priv
, vif
,
2553 bss_conf
->ibss_joined
);
2555 IWL_ERR(priv
, "failed to %s IBSS station %pM\n",
2556 bss_conf
->ibss_joined
? "add" : "remove",
2560 mutex_unlock(&priv
->mutex
);
2562 IWL_DEBUG_MAC80211(priv
, "leave\n");
2564 EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed
);
2566 irqreturn_t
iwl_legacy_isr(int irq
, void *data
)
2568 struct iwl_priv
*priv
= data
;
2569 u32 inta
, inta_mask
;
2571 unsigned long flags
;
2575 spin_lock_irqsave(&priv
->lock
, flags
);
2577 /* Disable (but don't clear!) interrupts here to avoid
2578 * back-to-back ISRs and sporadic interrupts from our NIC.
2579 * If we have something to service, the tasklet will re-enable ints.
2580 * If we *don't* have something, we'll re-enable before leaving here. */
2581 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
); /* just for debug */
2582 iwl_write32(priv
, CSR_INT_MASK
, 0x00000000);
2584 /* Discover which interrupts are active/pending */
2585 inta
= iwl_read32(priv
, CSR_INT
);
2586 inta_fh
= iwl_read32(priv
, CSR_FH_INT_STATUS
);
2588 /* Ignore interrupt if there's nothing in NIC to service.
2589 * This may be due to IRQ shared with another device,
2590 * or due to sporadic interrupts thrown from our NIC. */
2591 if (!inta
&& !inta_fh
) {
2593 "Ignore interrupt, inta == 0, inta_fh == 0\n");
2597 if ((inta
== 0xFFFFFFFF) || ((inta
& 0xFFFFFFF0) == 0xa5a5a5a0)) {
2598 /* Hardware disappeared. It might have already raised
2600 IWL_WARN(priv
, "HARDWARE GONE?? INTA == 0x%08x\n", inta
);
2604 IWL_DEBUG_ISR(priv
, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
2605 inta
, inta_mask
, inta_fh
);
2607 inta
&= ~CSR_INT_BIT_SCD
;
2609 /* iwl_irq_tasklet() will service interrupts and re-enable them */
2610 if (likely(inta
|| inta_fh
))
2611 tasklet_schedule(&priv
->irq_tasklet
);
2614 spin_unlock_irqrestore(&priv
->lock
, flags
);
2618 /* re-enable interrupts here since we don't have anything to service. */
2619 /* only Re-enable if disabled by irq */
2620 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
))
2621 iwl_legacy_enable_interrupts(priv
);
2622 spin_unlock_irqrestore(&priv
->lock
, flags
);
2625 EXPORT_SYMBOL(iwl_legacy_isr
);
2628 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
2631 void iwl_legacy_tx_cmd_protection(struct iwl_priv
*priv
,
2632 struct ieee80211_tx_info
*info
,
2633 __le16 fc
, __le32
*tx_flags
)
2635 if (info
->control
.rates
[0].flags
& IEEE80211_TX_RC_USE_RTS_CTS
) {
2636 *tx_flags
|= TX_CMD_FLG_RTS_MSK
;
2637 *tx_flags
&= ~TX_CMD_FLG_CTS_MSK
;
2638 *tx_flags
|= TX_CMD_FLG_FULL_TXOP_PROT_MSK
;
2640 if (!ieee80211_is_mgmt(fc
))
2643 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
2644 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
2645 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
2646 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
2647 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
2648 *tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
2649 *tx_flags
|= TX_CMD_FLG_CTS_MSK
;
2652 } else if (info
->control
.rates
[0].flags
&
2653 IEEE80211_TX_RC_USE_CTS_PROTECT
) {
2654 *tx_flags
&= ~TX_CMD_FLG_RTS_MSK
;
2655 *tx_flags
|= TX_CMD_FLG_CTS_MSK
;
2656 *tx_flags
|= TX_CMD_FLG_FULL_TXOP_PROT_MSK
;
2659 EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection
);