1 /******************************************************************************
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/etherdevice.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <net/mac80211.h>
36 #include "iwl-eeprom.h"
37 #include "iwl-dev.h" /* FIXME: remove */
38 #include "iwl-debug.h"
41 #include "iwl-power.h"
43 #include "iwl-helpers.h"
46 MODULE_DESCRIPTION("iwl core");
47 MODULE_VERSION(IWLWIFI_VERSION
);
48 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
49 MODULE_LICENSE("GPL");
52 * set bt_coex_active to true, uCode will do kill/defer
53 * every time the priority line is asserted (BT is sending signals on the
54 * priority line in the PCIx).
55 * set bt_coex_active to false, uCode will ignore the BT activity and
56 * perform the normal operation
58 * User might experience transmit issue on some platform due to WiFi/BT
59 * co-exist problem. The possible behaviors are:
60 * Able to scan and finding all the available AP
61 * Not able to associate with any AP
62 * On those platforms, WiFi communication can be restored by set
63 * "bt_coex_active" module parameter to "false"
65 * default: bt_coex_active = true (BT_COEX_ENABLE)
67 bool bt_coex_active
= true;
68 EXPORT_SYMBOL_GPL(bt_coex_active
);
69 module_param(bt_coex_active
, bool, S_IRUGO
);
70 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bluetooth co-exist");
73 EXPORT_SYMBOL(iwl_debug_level
);
75 const u8 iwl_bcast_addr
[ETH_ALEN
] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
76 EXPORT_SYMBOL(iwl_bcast_addr
);
79 /* This function both allocates and initializes hw and priv. */
80 struct ieee80211_hw
*iwl_alloc_all(struct iwl_cfg
*cfg
)
82 struct iwl_priv
*priv
;
83 /* mac80211 allocates memory for this device instance, including
84 * space for this driver's private structure */
85 struct ieee80211_hw
*hw
;
87 hw
= ieee80211_alloc_hw(sizeof(struct iwl_priv
),
88 cfg
->ops
->ieee80211_ops
);
90 pr_err("%s: Can not allocate network device\n",
101 EXPORT_SYMBOL(iwl_alloc_all
);
103 #define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
104 #define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
105 static void iwlcore_init_ht_hw_capab(const struct iwl_priv
*priv
,
106 struct ieee80211_sta_ht_cap
*ht_info
,
107 enum ieee80211_band band
)
109 u16 max_bit_rate
= 0;
110 u8 rx_chains_num
= priv
->hw_params
.rx_chains_num
;
111 u8 tx_chains_num
= priv
->hw_params
.tx_chains_num
;
114 memset(&ht_info
->mcs
, 0, sizeof(ht_info
->mcs
));
116 ht_info
->ht_supported
= true;
118 if (priv
->cfg
->ht_params
&&
119 priv
->cfg
->ht_params
->ht_greenfield_support
)
120 ht_info
->cap
|= IEEE80211_HT_CAP_GRN_FLD
;
121 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_20
;
122 max_bit_rate
= MAX_BIT_RATE_20_MHZ
;
123 if (priv
->hw_params
.ht40_channel
& BIT(band
)) {
124 ht_info
->cap
|= IEEE80211_HT_CAP_SUP_WIDTH_20_40
;
125 ht_info
->cap
|= IEEE80211_HT_CAP_SGI_40
;
126 ht_info
->mcs
.rx_mask
[4] = 0x01;
127 max_bit_rate
= MAX_BIT_RATE_40_MHZ
;
130 if (priv
->cfg
->mod_params
->amsdu_size_8K
)
131 ht_info
->cap
|= IEEE80211_HT_CAP_MAX_AMSDU
;
133 ht_info
->ampdu_factor
= CFG_HT_RX_AMPDU_FACTOR_DEF
;
134 if (priv
->cfg
->bt_params
&& priv
->cfg
->bt_params
->ampdu_factor
)
135 ht_info
->ampdu_factor
= priv
->cfg
->bt_params
->ampdu_factor
;
136 ht_info
->ampdu_density
= CFG_HT_MPDU_DENSITY_DEF
;
137 if (priv
->cfg
->bt_params
&& priv
->cfg
->bt_params
->ampdu_density
)
138 ht_info
->ampdu_density
= priv
->cfg
->bt_params
->ampdu_density
;
140 ht_info
->mcs
.rx_mask
[0] = 0xFF;
141 if (rx_chains_num
>= 2)
142 ht_info
->mcs
.rx_mask
[1] = 0xFF;
143 if (rx_chains_num
>= 3)
144 ht_info
->mcs
.rx_mask
[2] = 0xFF;
146 /* Highest supported Rx data rate */
147 max_bit_rate
*= rx_chains_num
;
148 WARN_ON(max_bit_rate
& ~IEEE80211_HT_MCS_RX_HIGHEST_MASK
);
149 ht_info
->mcs
.rx_highest
= cpu_to_le16(max_bit_rate
);
151 /* Tx MCS capabilities */
152 ht_info
->mcs
.tx_params
= IEEE80211_HT_MCS_TX_DEFINED
;
153 if (tx_chains_num
!= rx_chains_num
) {
154 ht_info
->mcs
.tx_params
|= IEEE80211_HT_MCS_TX_RX_DIFF
;
155 ht_info
->mcs
.tx_params
|= ((tx_chains_num
- 1) <<
156 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT
);
161 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
163 int iwlcore_init_geos(struct iwl_priv
*priv
)
165 struct iwl_channel_info
*ch
;
166 struct ieee80211_supported_band
*sband
;
167 struct ieee80211_channel
*channels
;
168 struct ieee80211_channel
*geo_ch
;
169 struct ieee80211_rate
*rates
;
172 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_bitrates
||
173 priv
->bands
[IEEE80211_BAND_5GHZ
].n_bitrates
) {
174 IWL_DEBUG_INFO(priv
, "Geography modes already initialized.\n");
175 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
179 channels
= kzalloc(sizeof(struct ieee80211_channel
) *
180 priv
->channel_count
, GFP_KERNEL
);
184 rates
= kzalloc((sizeof(struct ieee80211_rate
) * IWL_RATE_COUNT_LEGACY
),
191 /* 5.2GHz channels start after the 2.4GHz channels */
192 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
193 sband
->channels
= &channels
[ARRAY_SIZE(iwl_eeprom_band_1
)];
195 sband
->bitrates
= &rates
[IWL_FIRST_OFDM_RATE
];
196 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
- IWL_FIRST_OFDM_RATE
;
198 if (priv
->cfg
->sku
& IWL_SKU_N
)
199 iwlcore_init_ht_hw_capab(priv
, &sband
->ht_cap
,
200 IEEE80211_BAND_5GHZ
);
202 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
203 sband
->channels
= channels
;
205 sband
->bitrates
= rates
;
206 sband
->n_bitrates
= IWL_RATE_COUNT_LEGACY
;
208 if (priv
->cfg
->sku
& IWL_SKU_N
)
209 iwlcore_init_ht_hw_capab(priv
, &sband
->ht_cap
,
210 IEEE80211_BAND_2GHZ
);
212 priv
->ieee_channels
= channels
;
213 priv
->ieee_rates
= rates
;
215 for (i
= 0; i
< priv
->channel_count
; i
++) {
216 ch
= &priv
->channel_info
[i
];
218 /* FIXME: might be removed if scan is OK */
219 if (!is_channel_valid(ch
))
222 if (is_channel_a_band(ch
))
223 sband
= &priv
->bands
[IEEE80211_BAND_5GHZ
];
225 sband
= &priv
->bands
[IEEE80211_BAND_2GHZ
];
227 geo_ch
= &sband
->channels
[sband
->n_channels
++];
229 geo_ch
->center_freq
=
230 ieee80211_channel_to_frequency(ch
->channel
);
231 geo_ch
->max_power
= ch
->max_power_avg
;
232 geo_ch
->max_antenna_gain
= 0xff;
233 geo_ch
->hw_value
= ch
->channel
;
235 if (is_channel_valid(ch
)) {
236 if (!(ch
->flags
& EEPROM_CHANNEL_IBSS
))
237 geo_ch
->flags
|= IEEE80211_CHAN_NO_IBSS
;
239 if (!(ch
->flags
& EEPROM_CHANNEL_ACTIVE
))
240 geo_ch
->flags
|= IEEE80211_CHAN_PASSIVE_SCAN
;
242 if (ch
->flags
& EEPROM_CHANNEL_RADAR
)
243 geo_ch
->flags
|= IEEE80211_CHAN_RADAR
;
245 geo_ch
->flags
|= ch
->ht40_extension_channel
;
247 if (ch
->max_power_avg
> priv
->tx_power_device_lmt
)
248 priv
->tx_power_device_lmt
= ch
->max_power_avg
;
250 geo_ch
->flags
|= IEEE80211_CHAN_DISABLED
;
253 IWL_DEBUG_INFO(priv
, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
254 ch
->channel
, geo_ch
->center_freq
,
255 is_channel_a_band(ch
) ? "5.2" : "2.4",
256 geo_ch
->flags
& IEEE80211_CHAN_DISABLED
?
257 "restricted" : "valid",
261 if ((priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
== 0) &&
262 priv
->cfg
->sku
& IWL_SKU_A
) {
263 IWL_INFO(priv
, "Incorrectly detected BG card as ABG. "
264 "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
265 priv
->pci_dev
->device
,
266 priv
->pci_dev
->subsystem_device
);
267 priv
->cfg
->sku
&= ~IWL_SKU_A
;
270 IWL_INFO(priv
, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
271 priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
,
272 priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
);
274 set_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
278 EXPORT_SYMBOL(iwlcore_init_geos
);
281 * iwlcore_free_geos - undo allocations in iwlcore_init_geos
283 void iwlcore_free_geos(struct iwl_priv
*priv
)
285 kfree(priv
->ieee_channels
);
286 kfree(priv
->ieee_rates
);
287 clear_bit(STATUS_GEO_CONFIGURED
, &priv
->status
);
289 EXPORT_SYMBOL(iwlcore_free_geos
);
291 static bool iwl_is_channel_extension(struct iwl_priv
*priv
,
292 enum ieee80211_band band
,
293 u16 channel
, u8 extension_chan_offset
)
295 const struct iwl_channel_info
*ch_info
;
297 ch_info
= iwl_get_channel_info(priv
, band
, channel
);
298 if (!is_channel_valid(ch_info
))
301 if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_ABOVE
)
302 return !(ch_info
->ht40_extension_channel
&
303 IEEE80211_CHAN_NO_HT40PLUS
);
304 else if (extension_chan_offset
== IEEE80211_HT_PARAM_CHA_SEC_BELOW
)
305 return !(ch_info
->ht40_extension_channel
&
306 IEEE80211_CHAN_NO_HT40MINUS
);
311 bool iwl_is_ht40_tx_allowed(struct iwl_priv
*priv
,
312 struct iwl_rxon_context
*ctx
,
313 struct ieee80211_sta_ht_cap
*ht_cap
)
315 if (!ctx
->ht
.enabled
|| !ctx
->ht
.is_40mhz
)
319 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
320 * the bit will not set if it is pure 40MHz case
322 if (ht_cap
&& !ht_cap
->ht_supported
)
325 #ifdef CONFIG_IWLWIFI_DEBUGFS
326 if (priv
->disable_ht40
)
330 return iwl_is_channel_extension(priv
, priv
->band
,
331 le16_to_cpu(ctx
->staging
.channel
),
332 ctx
->ht
.extension_chan_offset
);
334 EXPORT_SYMBOL(iwl_is_ht40_tx_allowed
);
336 static u16
iwl_adjust_beacon_interval(u16 beacon_val
, u16 max_beacon_val
)
342 * If mac80211 hasn't given us a beacon interval, program
343 * the default into the device (not checking this here
344 * would cause the adjustment below to return the maximum
345 * value, which may break PAN.)
348 return DEFAULT_BEACON_INTERVAL
;
351 * If the beacon interval we obtained from the peer
352 * is too large, we'll have to wake up more often
353 * (and in IBSS case, we'll beacon too much)
355 * For example, if max_beacon_val is 4096, and the
356 * requested beacon interval is 7000, we'll have to
357 * use 3500 to be able to wake up on the beacons.
359 * This could badly influence beacon detection stats.
362 beacon_factor
= (beacon_val
+ max_beacon_val
) / max_beacon_val
;
363 new_val
= beacon_val
/ beacon_factor
;
366 new_val
= max_beacon_val
;
371 int iwl_send_rxon_timing(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
374 s32 interval_tm
, rem
;
375 struct ieee80211_conf
*conf
= NULL
;
377 struct ieee80211_vif
*vif
= ctx
->vif
;
379 conf
= ieee80211_get_hw_conf(priv
->hw
);
381 lockdep_assert_held(&priv
->mutex
);
383 memset(&ctx
->timing
, 0, sizeof(struct iwl_rxon_time_cmd
));
385 ctx
->timing
.timestamp
= cpu_to_le64(priv
->timestamp
);
386 ctx
->timing
.listen_interval
= cpu_to_le16(conf
->listen_interval
);
388 beacon_int
= vif
? vif
->bss_conf
.beacon_int
: 0;
391 * TODO: For IBSS we need to get atim_window from mac80211,
392 * for now just always use 0
394 ctx
->timing
.atim_window
= 0;
396 if (ctx
->ctxid
== IWL_RXON_CTX_PAN
&&
397 (!ctx
->vif
|| ctx
->vif
->type
!= NL80211_IFTYPE_STATION
) &&
398 iwl_is_associated(priv
, IWL_RXON_CTX_BSS
) &&
399 priv
->contexts
[IWL_RXON_CTX_BSS
].vif
&&
400 priv
->contexts
[IWL_RXON_CTX_BSS
].vif
->bss_conf
.beacon_int
) {
401 ctx
->timing
.beacon_interval
=
402 priv
->contexts
[IWL_RXON_CTX_BSS
].timing
.beacon_interval
;
403 beacon_int
= le16_to_cpu(ctx
->timing
.beacon_interval
);
404 } else if (ctx
->ctxid
== IWL_RXON_CTX_BSS
&&
405 iwl_is_associated(priv
, IWL_RXON_CTX_PAN
) &&
406 priv
->contexts
[IWL_RXON_CTX_PAN
].vif
&&
407 priv
->contexts
[IWL_RXON_CTX_PAN
].vif
->bss_conf
.beacon_int
&&
408 (!iwl_is_associated_ctx(ctx
) || !ctx
->vif
||
409 !ctx
->vif
->bss_conf
.beacon_int
)) {
410 ctx
->timing
.beacon_interval
=
411 priv
->contexts
[IWL_RXON_CTX_PAN
].timing
.beacon_interval
;
412 beacon_int
= le16_to_cpu(ctx
->timing
.beacon_interval
);
414 beacon_int
= iwl_adjust_beacon_interval(beacon_int
,
415 priv
->hw_params
.max_beacon_itrvl
* TIME_UNIT
);
416 ctx
->timing
.beacon_interval
= cpu_to_le16(beacon_int
);
419 tsf
= priv
->timestamp
; /* tsf is modifed by do_div: copy it */
420 interval_tm
= beacon_int
* TIME_UNIT
;
421 rem
= do_div(tsf
, interval_tm
);
422 ctx
->timing
.beacon_init_val
= cpu_to_le32(interval_tm
- rem
);
424 ctx
->timing
.dtim_period
= vif
? (vif
->bss_conf
.dtim_period
?: 1) : 1;
426 IWL_DEBUG_ASSOC(priv
,
427 "beacon interval %d beacon timer %d beacon tim %d\n",
428 le16_to_cpu(ctx
->timing
.beacon_interval
),
429 le32_to_cpu(ctx
->timing
.beacon_init_val
),
430 le16_to_cpu(ctx
->timing
.atim_window
));
432 return iwl_send_cmd_pdu(priv
, ctx
->rxon_timing_cmd
,
433 sizeof(ctx
->timing
), &ctx
->timing
);
435 EXPORT_SYMBOL(iwl_send_rxon_timing
);
437 void iwl_set_rxon_hwcrypto(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
,
440 struct iwl_rxon_cmd
*rxon
= &ctx
->staging
;
443 rxon
->filter_flags
&= ~RXON_FILTER_DIS_DECRYPT_MSK
;
445 rxon
->filter_flags
|= RXON_FILTER_DIS_DECRYPT_MSK
;
448 EXPORT_SYMBOL(iwl_set_rxon_hwcrypto
);
450 /* validate RXON structure is valid */
451 int iwl_check_rxon_cmd(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
453 struct iwl_rxon_cmd
*rxon
= &ctx
->staging
;
456 if (rxon
->flags
& RXON_FLG_BAND_24G_MSK
) {
457 if (rxon
->flags
& RXON_FLG_TGJ_NARROW_BAND_MSK
) {
458 IWL_WARN(priv
, "check 2.4G: wrong narrow\n");
461 if (rxon
->flags
& RXON_FLG_RADAR_DETECT_MSK
) {
462 IWL_WARN(priv
, "check 2.4G: wrong radar\n");
466 if (!(rxon
->flags
& RXON_FLG_SHORT_SLOT_MSK
)) {
467 IWL_WARN(priv
, "check 5.2G: not short slot!\n");
470 if (rxon
->flags
& RXON_FLG_CCK_MSK
) {
471 IWL_WARN(priv
, "check 5.2G: CCK!\n");
475 if ((rxon
->node_addr
[0] | rxon
->bssid_addr
[0]) & 0x1) {
476 IWL_WARN(priv
, "mac/bssid mcast!\n");
480 /* make sure basic rates 6Mbps and 1Mbps are supported */
481 if ((rxon
->ofdm_basic_rates
& IWL_RATE_6M_MASK
) == 0 &&
482 (rxon
->cck_basic_rates
& IWL_RATE_1M_MASK
) == 0) {
483 IWL_WARN(priv
, "neither 1 nor 6 are basic\n");
487 if (le16_to_cpu(rxon
->assoc_id
) > 2007) {
488 IWL_WARN(priv
, "aid > 2007\n");
492 if ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
))
493 == (RXON_FLG_CCK_MSK
| RXON_FLG_SHORT_SLOT_MSK
)) {
494 IWL_WARN(priv
, "CCK and short slot\n");
498 if ((rxon
->flags
& (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
))
499 == (RXON_FLG_CCK_MSK
| RXON_FLG_AUTO_DETECT_MSK
)) {
500 IWL_WARN(priv
, "CCK and auto detect");
504 if ((rxon
->flags
& (RXON_FLG_AUTO_DETECT_MSK
|
505 RXON_FLG_TGG_PROTECT_MSK
)) ==
506 RXON_FLG_TGG_PROTECT_MSK
) {
507 IWL_WARN(priv
, "TGg but no auto-detect\n");
512 IWL_WARN(priv
, "Tuning to channel %d\n",
513 le16_to_cpu(rxon
->channel
));
516 IWL_ERR(priv
, "Invalid RXON\n");
521 EXPORT_SYMBOL(iwl_check_rxon_cmd
);
524 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
525 * @priv: staging_rxon is compared to active_rxon
527 * If the RXON structure is changing enough to require a new tune,
528 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
529 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
531 int iwl_full_rxon_required(struct iwl_priv
*priv
,
532 struct iwl_rxon_context
*ctx
)
534 const struct iwl_rxon_cmd
*staging
= &ctx
->staging
;
535 const struct iwl_rxon_cmd
*active
= &ctx
->active
;
539 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
543 #define CHK_NEQ(c1, c2) \
544 if ((c1) != (c2)) { \
545 IWL_DEBUG_INFO(priv, "need full RXON - " \
546 #c1 " != " #c2 " - %d != %d\n", \
551 /* These items are only settable from the full RXON command */
552 CHK(!iwl_is_associated_ctx(ctx
));
553 CHK(compare_ether_addr(staging
->bssid_addr
, active
->bssid_addr
));
554 CHK(compare_ether_addr(staging
->node_addr
, active
->node_addr
));
555 CHK(compare_ether_addr(staging
->wlap_bssid_addr
,
556 active
->wlap_bssid_addr
));
557 CHK_NEQ(staging
->dev_type
, active
->dev_type
);
558 CHK_NEQ(staging
->channel
, active
->channel
);
559 CHK_NEQ(staging
->air_propagation
, active
->air_propagation
);
560 CHK_NEQ(staging
->ofdm_ht_single_stream_basic_rates
,
561 active
->ofdm_ht_single_stream_basic_rates
);
562 CHK_NEQ(staging
->ofdm_ht_dual_stream_basic_rates
,
563 active
->ofdm_ht_dual_stream_basic_rates
);
564 CHK_NEQ(staging
->ofdm_ht_triple_stream_basic_rates
,
565 active
->ofdm_ht_triple_stream_basic_rates
);
566 CHK_NEQ(staging
->assoc_id
, active
->assoc_id
);
568 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
569 * be updated with the RXON_ASSOC command -- however only some
570 * flag transitions are allowed using RXON_ASSOC */
572 /* Check if we are not switching bands */
573 CHK_NEQ(staging
->flags
& RXON_FLG_BAND_24G_MSK
,
574 active
->flags
& RXON_FLG_BAND_24G_MSK
);
576 /* Check if we are switching association toggle */
577 CHK_NEQ(staging
->filter_flags
& RXON_FILTER_ASSOC_MSK
,
578 active
->filter_flags
& RXON_FILTER_ASSOC_MSK
);
585 EXPORT_SYMBOL(iwl_full_rxon_required
);
587 u8
iwl_rate_get_lowest_plcp(struct iwl_priv
*priv
,
588 struct iwl_rxon_context
*ctx
)
591 * Assign the lowest rate -- should really get this from
592 * the beacon skb from mac80211.
594 if (ctx
->staging
.flags
& RXON_FLG_BAND_24G_MSK
)
595 return IWL_RATE_1M_PLCP
;
597 return IWL_RATE_6M_PLCP
;
599 EXPORT_SYMBOL(iwl_rate_get_lowest_plcp
);
601 static void _iwl_set_rxon_ht(struct iwl_priv
*priv
,
602 struct iwl_ht_config
*ht_conf
,
603 struct iwl_rxon_context
*ctx
)
605 struct iwl_rxon_cmd
*rxon
= &ctx
->staging
;
607 if (!ctx
->ht
.enabled
) {
608 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
609 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
|
610 RXON_FLG_HT40_PROT_MSK
|
611 RXON_FLG_HT_PROT_MSK
);
615 /* FIXME: if the definition of ht.protection changed, the "translation"
616 * will be needed for rxon->flags
618 rxon
->flags
|= cpu_to_le32(ctx
->ht
.protection
<< RXON_FLG_HT_OPERATING_MODE_POS
);
620 /* Set up channel bandwidth:
621 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
622 /* clear the HT channel mode before set the mode */
623 rxon
->flags
&= ~(RXON_FLG_CHANNEL_MODE_MSK
|
624 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
625 if (iwl_is_ht40_tx_allowed(priv
, ctx
, NULL
)) {
627 if (ctx
->ht
.protection
== IEEE80211_HT_OP_MODE_PROTECTION_20MHZ
) {
628 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_PURE_40
;
629 /* Note: control channel is opposite of extension channel */
630 switch (ctx
->ht
.extension_chan_offset
) {
631 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
632 rxon
->flags
&= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
634 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
635 rxon
->flags
|= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
639 /* Note: control channel is opposite of extension channel */
640 switch (ctx
->ht
.extension_chan_offset
) {
641 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE
:
642 rxon
->flags
&= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
);
643 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
645 case IEEE80211_HT_PARAM_CHA_SEC_BELOW
:
646 rxon
->flags
|= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK
;
647 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_MIXED
;
649 case IEEE80211_HT_PARAM_CHA_SEC_NONE
:
651 /* channel location only valid if in Mixed mode */
652 IWL_ERR(priv
, "invalid extension channel offset\n");
657 rxon
->flags
|= RXON_FLG_CHANNEL_MODE_LEGACY
;
660 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
661 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
663 IWL_DEBUG_ASSOC(priv
, "rxon flags 0x%X operation mode :0x%X "
664 "extension channel offset 0x%x\n",
665 le32_to_cpu(rxon
->flags
), ctx
->ht
.protection
,
666 ctx
->ht
.extension_chan_offset
);
669 void iwl_set_rxon_ht(struct iwl_priv
*priv
, struct iwl_ht_config
*ht_conf
)
671 struct iwl_rxon_context
*ctx
;
673 for_each_context(priv
, ctx
)
674 _iwl_set_rxon_ht(priv
, ht_conf
, ctx
);
676 EXPORT_SYMBOL(iwl_set_rxon_ht
);
678 /* Return valid, unused, channel for a passive scan to reset the RF */
679 u8
iwl_get_single_channel_number(struct iwl_priv
*priv
,
680 enum ieee80211_band band
)
682 const struct iwl_channel_info
*ch_info
;
686 struct iwl_rxon_context
*ctx
;
688 if (band
== IEEE80211_BAND_5GHZ
) {
690 max
= priv
->channel_count
;
696 for (i
= min
; i
< max
; i
++) {
699 for_each_context(priv
, ctx
) {
700 busy
= priv
->channel_info
[i
].channel
==
701 le16_to_cpu(ctx
->staging
.channel
);
709 channel
= priv
->channel_info
[i
].channel
;
710 ch_info
= iwl_get_channel_info(priv
, band
, channel
);
711 if (is_channel_valid(ch_info
))
717 EXPORT_SYMBOL(iwl_get_single_channel_number
);
720 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
721 * @ch: requested channel as a pointer to struct ieee80211_channel
723 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
724 * in the staging RXON flag structure based on the ch->band
726 int iwl_set_rxon_channel(struct iwl_priv
*priv
, struct ieee80211_channel
*ch
,
727 struct iwl_rxon_context
*ctx
)
729 enum ieee80211_band band
= ch
->band
;
730 u16 channel
= ch
->hw_value
;
732 if ((le16_to_cpu(ctx
->staging
.channel
) == channel
) &&
733 (priv
->band
== band
))
736 ctx
->staging
.channel
= cpu_to_le16(channel
);
737 if (band
== IEEE80211_BAND_5GHZ
)
738 ctx
->staging
.flags
&= ~RXON_FLG_BAND_24G_MSK
;
740 ctx
->staging
.flags
|= RXON_FLG_BAND_24G_MSK
;
744 IWL_DEBUG_INFO(priv
, "Staging channel set to %d [%d]\n", channel
, band
);
748 EXPORT_SYMBOL(iwl_set_rxon_channel
);
750 void iwl_set_flags_for_band(struct iwl_priv
*priv
,
751 struct iwl_rxon_context
*ctx
,
752 enum ieee80211_band band
,
753 struct ieee80211_vif
*vif
)
755 if (band
== IEEE80211_BAND_5GHZ
) {
756 ctx
->staging
.flags
&=
757 ~(RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
759 ctx
->staging
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
761 /* Copied from iwl_post_associate() */
762 if (vif
&& vif
->bss_conf
.use_short_slot
)
763 ctx
->staging
.flags
|= RXON_FLG_SHORT_SLOT_MSK
;
765 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_SLOT_MSK
;
767 ctx
->staging
.flags
|= RXON_FLG_BAND_24G_MSK
;
768 ctx
->staging
.flags
|= RXON_FLG_AUTO_DETECT_MSK
;
769 ctx
->staging
.flags
&= ~RXON_FLG_CCK_MSK
;
772 EXPORT_SYMBOL(iwl_set_flags_for_band
);
775 * initialize rxon structure with default values from eeprom
777 void iwl_connection_init_rx_config(struct iwl_priv
*priv
,
778 struct iwl_rxon_context
*ctx
)
780 const struct iwl_channel_info
*ch_info
;
782 memset(&ctx
->staging
, 0, sizeof(ctx
->staging
));
785 ctx
->staging
.dev_type
= ctx
->unused_devtype
;
786 } else switch (ctx
->vif
->type
) {
787 case NL80211_IFTYPE_AP
:
788 ctx
->staging
.dev_type
= ctx
->ap_devtype
;
791 case NL80211_IFTYPE_STATION
:
792 ctx
->staging
.dev_type
= ctx
->station_devtype
;
793 ctx
->staging
.filter_flags
= RXON_FILTER_ACCEPT_GRP_MSK
;
796 case NL80211_IFTYPE_ADHOC
:
797 ctx
->staging
.dev_type
= ctx
->ibss_devtype
;
798 ctx
->staging
.flags
= RXON_FLG_SHORT_PREAMBLE_MSK
;
799 ctx
->staging
.filter_flags
= RXON_FILTER_BCON_AWARE_MSK
|
800 RXON_FILTER_ACCEPT_GRP_MSK
;
804 IWL_ERR(priv
, "Unsupported interface type %d\n",
810 /* TODO: Figure out when short_preamble would be set and cache from
812 if (!hw_to_local(priv
->hw
)->short_preamble
)
813 ctx
->staging
.flags
&= ~RXON_FLG_SHORT_PREAMBLE_MSK
;
815 ctx
->staging
.flags
|= RXON_FLG_SHORT_PREAMBLE_MSK
;
818 ch_info
= iwl_get_channel_info(priv
, priv
->band
,
819 le16_to_cpu(ctx
->active
.channel
));
822 ch_info
= &priv
->channel_info
[0];
824 ctx
->staging
.channel
= cpu_to_le16(ch_info
->channel
);
825 priv
->band
= ch_info
->band
;
827 iwl_set_flags_for_band(priv
, ctx
, priv
->band
, ctx
->vif
);
829 ctx
->staging
.ofdm_basic_rates
=
830 (IWL_OFDM_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
831 ctx
->staging
.cck_basic_rates
=
832 (IWL_CCK_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
834 /* clear both MIX and PURE40 mode flag */
835 ctx
->staging
.flags
&= ~(RXON_FLG_CHANNEL_MODE_MIXED
|
836 RXON_FLG_CHANNEL_MODE_PURE_40
);
838 memcpy(ctx
->staging
.node_addr
, ctx
->vif
->addr
, ETH_ALEN
);
840 ctx
->staging
.ofdm_ht_single_stream_basic_rates
= 0xff;
841 ctx
->staging
.ofdm_ht_dual_stream_basic_rates
= 0xff;
842 ctx
->staging
.ofdm_ht_triple_stream_basic_rates
= 0xff;
844 EXPORT_SYMBOL(iwl_connection_init_rx_config
);
846 void iwl_set_rate(struct iwl_priv
*priv
)
848 const struct ieee80211_supported_band
*hw
= NULL
;
849 struct ieee80211_rate
*rate
;
850 struct iwl_rxon_context
*ctx
;
853 hw
= iwl_get_hw_mode(priv
, priv
->band
);
855 IWL_ERR(priv
, "Failed to set rate: unable to get hw mode\n");
859 priv
->active_rate
= 0;
861 for (i
= 0; i
< hw
->n_bitrates
; i
++) {
862 rate
= &(hw
->bitrates
[i
]);
863 if (rate
->hw_value
< IWL_RATE_COUNT_LEGACY
)
864 priv
->active_rate
|= (1 << rate
->hw_value
);
867 IWL_DEBUG_RATE(priv
, "Set active_rate = %0x\n", priv
->active_rate
);
869 for_each_context(priv
, ctx
) {
870 ctx
->staging
.cck_basic_rates
=
871 (IWL_CCK_BASIC_RATES_MASK
>> IWL_FIRST_CCK_RATE
) & 0xF;
873 ctx
->staging
.ofdm_basic_rates
=
874 (IWL_OFDM_BASIC_RATES_MASK
>> IWL_FIRST_OFDM_RATE
) & 0xFF;
877 EXPORT_SYMBOL(iwl_set_rate
);
879 void iwl_chswitch_done(struct iwl_priv
*priv
, bool is_success
)
883 * See iwl_mac_channel_switch.
885 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
887 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
890 if (priv
->switch_rxon
.switch_in_progress
) {
891 ieee80211_chswitch_done(ctx
->vif
, is_success
);
892 mutex_lock(&priv
->mutex
);
893 priv
->switch_rxon
.switch_in_progress
= false;
894 mutex_unlock(&priv
->mutex
);
897 EXPORT_SYMBOL(iwl_chswitch_done
);
899 void iwl_rx_csa(struct iwl_priv
*priv
, struct iwl_rx_mem_buffer
*rxb
)
901 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
902 struct iwl_csa_notification
*csa
= &(pkt
->u
.csa_notif
);
905 * See iwl_mac_channel_switch.
907 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
908 struct iwl_rxon_cmd
*rxon
= (void *)&ctx
->active
;
910 if (priv
->switch_rxon
.switch_in_progress
) {
911 if (!le32_to_cpu(csa
->status
) &&
912 (csa
->channel
== priv
->switch_rxon
.channel
)) {
913 rxon
->channel
= csa
->channel
;
914 ctx
->staging
.channel
= csa
->channel
;
915 IWL_DEBUG_11H(priv
, "CSA notif: channel %d\n",
916 le16_to_cpu(csa
->channel
));
917 iwl_chswitch_done(priv
, true);
919 IWL_ERR(priv
, "CSA notif (fail) : channel %d\n",
920 le16_to_cpu(csa
->channel
));
921 iwl_chswitch_done(priv
, false);
925 EXPORT_SYMBOL(iwl_rx_csa
);
927 #ifdef CONFIG_IWLWIFI_DEBUG
928 void iwl_print_rx_config_cmd(struct iwl_priv
*priv
,
929 struct iwl_rxon_context
*ctx
)
931 struct iwl_rxon_cmd
*rxon
= &ctx
->staging
;
933 IWL_DEBUG_RADIO(priv
, "RX CONFIG:\n");
934 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, (u8
*) rxon
, sizeof(*rxon
));
935 IWL_DEBUG_RADIO(priv
, "u16 channel: 0x%x\n", le16_to_cpu(rxon
->channel
));
936 IWL_DEBUG_RADIO(priv
, "u32 flags: 0x%08X\n", le32_to_cpu(rxon
->flags
));
937 IWL_DEBUG_RADIO(priv
, "u32 filter_flags: 0x%08x\n",
938 le32_to_cpu(rxon
->filter_flags
));
939 IWL_DEBUG_RADIO(priv
, "u8 dev_type: 0x%x\n", rxon
->dev_type
);
940 IWL_DEBUG_RADIO(priv
, "u8 ofdm_basic_rates: 0x%02x\n",
941 rxon
->ofdm_basic_rates
);
942 IWL_DEBUG_RADIO(priv
, "u8 cck_basic_rates: 0x%02x\n", rxon
->cck_basic_rates
);
943 IWL_DEBUG_RADIO(priv
, "u8[6] node_addr: %pM\n", rxon
->node_addr
);
944 IWL_DEBUG_RADIO(priv
, "u8[6] bssid_addr: %pM\n", rxon
->bssid_addr
);
945 IWL_DEBUG_RADIO(priv
, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon
->assoc_id
));
947 EXPORT_SYMBOL(iwl_print_rx_config_cmd
);
950 * iwl_irq_handle_error - called for HW or SW error interrupt from card
952 void iwl_irq_handle_error(struct iwl_priv
*priv
)
954 /* Set the FW error flag -- cleared on iwl_down */
955 set_bit(STATUS_FW_ERROR
, &priv
->status
);
957 /* Cancel currently queued command. */
958 clear_bit(STATUS_HCMD_ACTIVE
, &priv
->status
);
960 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
961 if (priv
->cfg
->internal_wimax_coex
&&
962 (!(iwl_read_prph(priv
, APMG_CLK_CTRL_REG
) &
963 APMS_CLK_VAL_MRB_FUNC_MODE
) ||
964 (iwl_read_prph(priv
, APMG_PS_CTRL_REG
) &
965 APMG_PS_CTRL_VAL_RESET_REQ
))) {
966 wake_up_interruptible(&priv
->wait_command_queue
);
968 *Keep the restart process from trying to send host
969 * commands by clearing the INIT status bit
971 clear_bit(STATUS_READY
, &priv
->status
);
972 IWL_ERR(priv
, "RF is used by WiMAX\n");
976 IWL_ERR(priv
, "Loaded firmware version: %s\n",
977 priv
->hw
->wiphy
->fw_version
);
979 priv
->cfg
->ops
->lib
->dump_nic_error_log(priv
);
980 if (priv
->cfg
->ops
->lib
->dump_csr
)
981 priv
->cfg
->ops
->lib
->dump_csr(priv
);
982 if (priv
->cfg
->ops
->lib
->dump_fh
)
983 priv
->cfg
->ops
->lib
->dump_fh(priv
, NULL
, false);
984 priv
->cfg
->ops
->lib
->dump_nic_event_log(priv
, false, NULL
, false);
985 #ifdef CONFIG_IWLWIFI_DEBUG
986 if (iwl_get_debug_level(priv
) & IWL_DL_FW_ERRORS
)
987 iwl_print_rx_config_cmd(priv
,
988 &priv
->contexts
[IWL_RXON_CTX_BSS
]);
991 wake_up_interruptible(&priv
->wait_command_queue
);
993 /* Keep the restart process from trying to send host
994 * commands by clearing the INIT status bit */
995 clear_bit(STATUS_READY
, &priv
->status
);
997 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
998 IWL_DEBUG(priv
, IWL_DL_FW_ERRORS
,
999 "Restarting adapter due to uCode error.\n");
1001 if (priv
->cfg
->mod_params
->restart_fw
)
1002 queue_work(priv
->workqueue
, &priv
->restart
);
1005 EXPORT_SYMBOL(iwl_irq_handle_error
);
1007 static int iwl_apm_stop_master(struct iwl_priv
*priv
)
1011 /* stop device's busmaster DMA activity */
1012 iwl_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_STOP_MASTER
);
1014 ret
= iwl_poll_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_MASTER_DISABLED
,
1015 CSR_RESET_REG_FLAG_MASTER_DISABLED
, 100);
1017 IWL_WARN(priv
, "Master Disable Timed Out, 100 usec\n");
1019 IWL_DEBUG_INFO(priv
, "stop master\n");
1024 void iwl_apm_stop(struct iwl_priv
*priv
)
1026 IWL_DEBUG_INFO(priv
, "Stop card, put in low power state\n");
1028 /* Stop device's DMA activity */
1029 iwl_apm_stop_master(priv
);
1031 /* Reset the entire device */
1032 iwl_set_bit(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_SW_RESET
);
1037 * Clear "initialization complete" bit to move adapter from
1038 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1040 iwl_clear_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1042 EXPORT_SYMBOL(iwl_apm_stop
);
1046 * Start up NIC's basic functionality after it has been reset
1047 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1048 * NOTE: This does not load uCode nor start the embedded processor
1050 int iwl_apm_init(struct iwl_priv
*priv
)
1055 IWL_DEBUG_INFO(priv
, "Init card's basic functions\n");
1058 * Use "set_bit" below rather than "write", to preserve any hardware
1059 * bits already set by default after reset.
1062 /* Disable L0S exit timer (platform NMI Work/Around) */
1063 iwl_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1064 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER
);
1067 * Disable L0s without affecting L1;
1068 * don't wait for ICH L0s (ICH bug W/A)
1070 iwl_set_bit(priv
, CSR_GIO_CHICKEN_BITS
,
1071 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX
);
1073 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1074 iwl_set_bit(priv
, CSR_DBG_HPET_MEM_REG
, CSR_DBG_HPET_MEM_REG_VAL
);
1077 * Enable HAP INTA (interrupt from management bus) to
1078 * wake device's PCI Express link L1a -> L0s
1079 * NOTE: This is no-op for 3945 (non-existant bit)
1081 iwl_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
1082 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A
);
1085 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1086 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1087 * If so (likely), disable L0S, so device moves directly L0->L1;
1088 * costs negligible amount of power savings.
1089 * If not (unlikely), enable L0S, so there is at least some
1090 * power savings, even without L1.
1092 if (priv
->cfg
->base_params
->set_l0s
) {
1093 lctl
= iwl_pcie_link_ctl(priv
);
1094 if ((lctl
& PCI_CFG_LINK_CTRL_VAL_L1_EN
) ==
1095 PCI_CFG_LINK_CTRL_VAL_L1_EN
) {
1096 /* L1-ASPM enabled; disable(!) L0S */
1097 iwl_set_bit(priv
, CSR_GIO_REG
,
1098 CSR_GIO_REG_VAL_L0S_ENABLED
);
1099 IWL_DEBUG_POWER(priv
, "L1 Enabled; Disabling L0S\n");
1101 /* L1-ASPM disabled; enable(!) L0S */
1102 iwl_clear_bit(priv
, CSR_GIO_REG
,
1103 CSR_GIO_REG_VAL_L0S_ENABLED
);
1104 IWL_DEBUG_POWER(priv
, "L1 Disabled; Enabling L0S\n");
1108 /* Configure analog phase-lock-loop before activating to D0A */
1109 if (priv
->cfg
->base_params
->pll_cfg_val
)
1110 iwl_set_bit(priv
, CSR_ANA_PLL_CFG
,
1111 priv
->cfg
->base_params
->pll_cfg_val
);
1114 * Set "initialization complete" bit to move adapter from
1115 * D0U* --> D0A* (powered-up active) state.
1117 iwl_set_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_INIT_DONE
);
1120 * Wait for clock stabilization; once stabilized, access to
1121 * device-internal resources is supported, e.g. iwl_write_prph()
1122 * and accesses to uCode SRAM.
1124 ret
= iwl_poll_bit(priv
, CSR_GP_CNTRL
,
1125 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
,
1126 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
, 25000);
1128 IWL_DEBUG_INFO(priv
, "Failed to init the card\n");
1133 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1134 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1135 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1136 * and don't need BSM to restore data after power-saving sleep.
1138 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1139 * do not disable clocks. This preserves any hardware bits already
1140 * set by default in "CLK_CTRL_REG" after reset.
1142 if (priv
->cfg
->base_params
->use_bsm
)
1143 iwl_write_prph(priv
, APMG_CLK_EN_REG
,
1144 APMG_CLK_VAL_DMA_CLK_RQT
| APMG_CLK_VAL_BSM_CLK_RQT
);
1146 iwl_write_prph(priv
, APMG_CLK_EN_REG
,
1147 APMG_CLK_VAL_DMA_CLK_RQT
);
1150 /* Disable L1-Active */
1151 iwl_set_bits_prph(priv
, APMG_PCIDEV_STT_REG
,
1152 APMG_PCIDEV_STT_VAL_L1_ACT_DIS
);
1157 EXPORT_SYMBOL(iwl_apm_init
);
1160 int iwl_set_tx_power(struct iwl_priv
*priv
, s8 tx_power
, bool force
)
1165 lockdep_assert_held(&priv
->mutex
);
1167 if (priv
->tx_power_user_lmt
== tx_power
&& !force
)
1170 if (!priv
->cfg
->ops
->lib
->send_tx_power
)
1173 if (tx_power
< IWLAGN_TX_POWER_TARGET_POWER_MIN
) {
1175 "Requested user TXPOWER %d below lower limit %d.\n",
1177 IWLAGN_TX_POWER_TARGET_POWER_MIN
);
1181 if (tx_power
> priv
->tx_power_device_lmt
) {
1183 "Requested user TXPOWER %d above upper limit %d.\n",
1184 tx_power
, priv
->tx_power_device_lmt
);
1188 if (!iwl_is_ready_rf(priv
))
1191 /* scan complete use tx_power_next, need to be updated */
1192 priv
->tx_power_next
= tx_power
;
1193 if (test_bit(STATUS_SCANNING
, &priv
->status
) && !force
) {
1194 IWL_DEBUG_INFO(priv
, "Deferring tx power set while scanning\n");
1198 prev_tx_power
= priv
->tx_power_user_lmt
;
1199 priv
->tx_power_user_lmt
= tx_power
;
1201 ret
= priv
->cfg
->ops
->lib
->send_tx_power(priv
);
1203 /* if fail to set tx_power, restore the orig. tx power */
1205 priv
->tx_power_user_lmt
= prev_tx_power
;
1206 priv
->tx_power_next
= prev_tx_power
;
1210 EXPORT_SYMBOL(iwl_set_tx_power
);
1212 void iwl_send_bt_config(struct iwl_priv
*priv
)
1214 struct iwl_bt_cmd bt_cmd
= {
1215 .lead_time
= BT_LEAD_TIME_DEF
,
1216 .max_kill
= BT_MAX_KILL_DEF
,
1221 if (!bt_coex_active
)
1222 bt_cmd
.flags
= BT_COEX_DISABLE
;
1224 bt_cmd
.flags
= BT_COEX_ENABLE
;
1226 priv
->bt_enable_flag
= bt_cmd
.flags
;
1227 IWL_DEBUG_INFO(priv
, "BT coex %s\n",
1228 (bt_cmd
.flags
== BT_COEX_DISABLE
) ? "disable" : "active");
1230 if (iwl_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1231 sizeof(struct iwl_bt_cmd
), &bt_cmd
))
1232 IWL_ERR(priv
, "failed to send BT Coex Config\n");
1234 EXPORT_SYMBOL(iwl_send_bt_config
);
1236 int iwl_send_statistics_request(struct iwl_priv
*priv
, u8 flags
, bool clear
)
1238 struct iwl_statistics_cmd statistics_cmd
= {
1239 .configuration_flags
=
1240 clear
? IWL_STATS_CONF_CLEAR_STATS
: 0,
1243 if (flags
& CMD_ASYNC
)
1244 return iwl_send_cmd_pdu_async(priv
, REPLY_STATISTICS_CMD
,
1245 sizeof(struct iwl_statistics_cmd
),
1246 &statistics_cmd
, NULL
);
1248 return iwl_send_cmd_pdu(priv
, REPLY_STATISTICS_CMD
,
1249 sizeof(struct iwl_statistics_cmd
),
1252 EXPORT_SYMBOL(iwl_send_statistics_request
);
1254 void iwl_rx_pm_sleep_notif(struct iwl_priv
*priv
,
1255 struct iwl_rx_mem_buffer
*rxb
)
1257 #ifdef CONFIG_IWLWIFI_DEBUG
1258 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1259 struct iwl_sleep_notification
*sleep
= &(pkt
->u
.sleep_notif
);
1260 IWL_DEBUG_RX(priv
, "sleep mode: %d, src: %d\n",
1261 sleep
->pm_sleep_mode
, sleep
->pm_wakeup_src
);
1264 EXPORT_SYMBOL(iwl_rx_pm_sleep_notif
);
1266 void iwl_rx_pm_debug_statistics_notif(struct iwl_priv
*priv
,
1267 struct iwl_rx_mem_buffer
*rxb
)
1269 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1270 u32 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
1271 IWL_DEBUG_RADIO(priv
, "Dumping %d bytes of unhandled "
1272 "notification for %s:\n", len
,
1273 get_cmd_string(pkt
->hdr
.cmd
));
1274 iwl_print_hex_dump(priv
, IWL_DL_RADIO
, pkt
->u
.raw
, len
);
1276 EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif
);
1278 void iwl_rx_reply_error(struct iwl_priv
*priv
,
1279 struct iwl_rx_mem_buffer
*rxb
)
1281 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1283 IWL_ERR(priv
, "Error Reply type 0x%08X cmd %s (0x%02X) "
1284 "seq 0x%04X ser 0x%08X\n",
1285 le32_to_cpu(pkt
->u
.err_resp
.error_type
),
1286 get_cmd_string(pkt
->u
.err_resp
.cmd_id
),
1287 pkt
->u
.err_resp
.cmd_id
,
1288 le16_to_cpu(pkt
->u
.err_resp
.bad_cmd_seq_num
),
1289 le32_to_cpu(pkt
->u
.err_resp
.error_info
));
1291 EXPORT_SYMBOL(iwl_rx_reply_error
);
1293 void iwl_clear_isr_stats(struct iwl_priv
*priv
)
1295 memset(&priv
->isr_stats
, 0, sizeof(priv
->isr_stats
));
1298 int iwl_mac_conf_tx(struct ieee80211_hw
*hw
, u16 queue
,
1299 const struct ieee80211_tx_queue_params
*params
)
1301 struct iwl_priv
*priv
= hw
->priv
;
1302 struct iwl_rxon_context
*ctx
;
1303 unsigned long flags
;
1306 IWL_DEBUG_MAC80211(priv
, "enter\n");
1308 if (!iwl_is_ready_rf(priv
)) {
1309 IWL_DEBUG_MAC80211(priv
, "leave - RF not ready\n");
1313 if (queue
>= AC_NUM
) {
1314 IWL_DEBUG_MAC80211(priv
, "leave - queue >= AC_NUM %d\n", queue
);
1318 q
= AC_NUM
- 1 - queue
;
1320 spin_lock_irqsave(&priv
->lock
, flags
);
1324 * This may need to be done per interface in nl80211/cfg80211/mac80211.
1326 for_each_context(priv
, ctx
) {
1327 ctx
->qos_data
.def_qos_parm
.ac
[q
].cw_min
=
1328 cpu_to_le16(params
->cw_min
);
1329 ctx
->qos_data
.def_qos_parm
.ac
[q
].cw_max
=
1330 cpu_to_le16(params
->cw_max
);
1331 ctx
->qos_data
.def_qos_parm
.ac
[q
].aifsn
= params
->aifs
;
1332 ctx
->qos_data
.def_qos_parm
.ac
[q
].edca_txop
=
1333 cpu_to_le16((params
->txop
* 32));
1335 ctx
->qos_data
.def_qos_parm
.ac
[q
].reserved1
= 0;
1338 spin_unlock_irqrestore(&priv
->lock
, flags
);
1340 IWL_DEBUG_MAC80211(priv
, "leave\n");
1343 EXPORT_SYMBOL(iwl_mac_conf_tx
);
1345 int iwl_mac_tx_last_beacon(struct ieee80211_hw
*hw
)
1347 struct iwl_priv
*priv
= hw
->priv
;
1349 return priv
->ibss_manager
== IWL_IBSS_MANAGER
;
1351 EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon
);
1353 static int iwl_set_mode(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
1355 iwl_connection_init_rx_config(priv
, ctx
);
1357 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
1358 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
1360 return iwlcore_commit_rxon(priv
, ctx
);
1363 static int iwl_setup_interface(struct iwl_priv
*priv
,
1364 struct iwl_rxon_context
*ctx
)
1366 struct ieee80211_vif
*vif
= ctx
->vif
;
1369 lockdep_assert_held(&priv
->mutex
);
1372 * This variable will be correct only when there's just
1373 * a single context, but all code using it is for hardware
1374 * that supports only one context.
1376 priv
->iw_mode
= vif
->type
;
1378 ctx
->is_active
= true;
1380 err
= iwl_set_mode(priv
, ctx
);
1382 if (!ctx
->always_active
)
1383 ctx
->is_active
= false;
1387 if (priv
->cfg
->bt_params
&& priv
->cfg
->bt_params
->advanced_bt_coexist
&&
1388 vif
->type
== NL80211_IFTYPE_ADHOC
) {
1390 * pretend to have high BT traffic as long as we
1391 * are operating in IBSS mode, as this will cause
1392 * the rate scaling etc. to behave as intended.
1394 priv
->bt_traffic_load
= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
;
1400 int iwl_mac_add_interface(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
)
1402 struct iwl_priv
*priv
= hw
->priv
;
1403 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
1404 struct iwl_rxon_context
*tmp
, *ctx
= NULL
;
1407 IWL_DEBUG_MAC80211(priv
, "enter: type %d, addr %pM\n",
1408 vif
->type
, vif
->addr
);
1410 mutex_lock(&priv
->mutex
);
1412 if (!iwl_is_ready_rf(priv
)) {
1413 IWL_WARN(priv
, "Try to add interface when device not ready\n");
1418 for_each_context(priv
, tmp
) {
1419 u32 possible_modes
=
1420 tmp
->interface_modes
| tmp
->exclusive_interface_modes
;
1423 /* check if this busy context is exclusive */
1424 if (tmp
->exclusive_interface_modes
&
1425 BIT(tmp
->vif
->type
)) {
1432 if (!(possible_modes
& BIT(vif
->type
)))
1435 /* have maybe usable context w/o interface */
1445 vif_priv
->ctx
= ctx
;
1448 err
= iwl_setup_interface(priv
, ctx
);
1453 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
1455 mutex_unlock(&priv
->mutex
);
1457 IWL_DEBUG_MAC80211(priv
, "leave\n");
1460 EXPORT_SYMBOL(iwl_mac_add_interface
);
1462 static void iwl_teardown_interface(struct iwl_priv
*priv
,
1463 struct ieee80211_vif
*vif
,
1466 struct iwl_rxon_context
*ctx
= iwl_rxon_ctx_from_vif(vif
);
1468 lockdep_assert_held(&priv
->mutex
);
1470 if (priv
->scan_vif
== vif
) {
1471 iwl_scan_cancel_timeout(priv
, 200);
1472 iwl_force_scan_end(priv
);
1476 iwl_set_mode(priv
, ctx
);
1477 if (!ctx
->always_active
)
1478 ctx
->is_active
= false;
1482 * When removing the IBSS interface, overwrite the
1483 * BT traffic load with the stored one from the last
1484 * notification, if any. If this is a device that
1485 * doesn't implement this, this has no effect since
1486 * both values are the same and zero.
1488 if (vif
->type
== NL80211_IFTYPE_ADHOC
)
1489 priv
->bt_traffic_load
= priv
->last_bt_traffic_load
;
1492 void iwl_mac_remove_interface(struct ieee80211_hw
*hw
,
1493 struct ieee80211_vif
*vif
)
1495 struct iwl_priv
*priv
= hw
->priv
;
1496 struct iwl_rxon_context
*ctx
= iwl_rxon_ctx_from_vif(vif
);
1498 IWL_DEBUG_MAC80211(priv
, "enter\n");
1500 mutex_lock(&priv
->mutex
);
1502 WARN_ON(ctx
->vif
!= vif
);
1505 iwl_teardown_interface(priv
, vif
, false);
1507 memset(priv
->bssid
, 0, ETH_ALEN
);
1508 mutex_unlock(&priv
->mutex
);
1510 IWL_DEBUG_MAC80211(priv
, "leave\n");
1513 EXPORT_SYMBOL(iwl_mac_remove_interface
);
1515 int iwl_alloc_txq_mem(struct iwl_priv
*priv
)
1518 priv
->txq
= kzalloc(
1519 sizeof(struct iwl_tx_queue
) *
1520 priv
->cfg
->base_params
->num_of_queues
,
1523 IWL_ERR(priv
, "Not enough memory for txq\n");
1528 EXPORT_SYMBOL(iwl_alloc_txq_mem
);
1530 void iwl_free_txq_mem(struct iwl_priv
*priv
)
1535 EXPORT_SYMBOL(iwl_free_txq_mem
);
1537 #ifdef CONFIG_IWLWIFI_DEBUGFS
1539 #define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
1541 void iwl_reset_traffic_log(struct iwl_priv
*priv
)
1543 priv
->tx_traffic_idx
= 0;
1544 priv
->rx_traffic_idx
= 0;
1545 if (priv
->tx_traffic
)
1546 memset(priv
->tx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
1547 if (priv
->rx_traffic
)
1548 memset(priv
->rx_traffic
, 0, IWL_TRAFFIC_DUMP_SIZE
);
1551 int iwl_alloc_traffic_mem(struct iwl_priv
*priv
)
1553 u32 traffic_size
= IWL_TRAFFIC_DUMP_SIZE
;
1555 if (iwl_debug_level
& IWL_DL_TX
) {
1556 if (!priv
->tx_traffic
) {
1558 kzalloc(traffic_size
, GFP_KERNEL
);
1559 if (!priv
->tx_traffic
)
1563 if (iwl_debug_level
& IWL_DL_RX
) {
1564 if (!priv
->rx_traffic
) {
1566 kzalloc(traffic_size
, GFP_KERNEL
);
1567 if (!priv
->rx_traffic
)
1571 iwl_reset_traffic_log(priv
);
1574 EXPORT_SYMBOL(iwl_alloc_traffic_mem
);
1576 void iwl_free_traffic_mem(struct iwl_priv
*priv
)
1578 kfree(priv
->tx_traffic
);
1579 priv
->tx_traffic
= NULL
;
1581 kfree(priv
->rx_traffic
);
1582 priv
->rx_traffic
= NULL
;
1584 EXPORT_SYMBOL(iwl_free_traffic_mem
);
1586 void iwl_dbg_log_tx_data_frame(struct iwl_priv
*priv
,
1587 u16 length
, struct ieee80211_hdr
*header
)
1592 if (likely(!(iwl_debug_level
& IWL_DL_TX
)))
1595 if (!priv
->tx_traffic
)
1598 fc
= header
->frame_control
;
1599 if (ieee80211_is_data(fc
)) {
1600 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
1601 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
1602 memcpy((priv
->tx_traffic
+
1603 (priv
->tx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
1605 priv
->tx_traffic_idx
=
1606 (priv
->tx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
1609 EXPORT_SYMBOL(iwl_dbg_log_tx_data_frame
);
1611 void iwl_dbg_log_rx_data_frame(struct iwl_priv
*priv
,
1612 u16 length
, struct ieee80211_hdr
*header
)
1617 if (likely(!(iwl_debug_level
& IWL_DL_RX
)))
1620 if (!priv
->rx_traffic
)
1623 fc
= header
->frame_control
;
1624 if (ieee80211_is_data(fc
)) {
1625 len
= (length
> IWL_TRAFFIC_ENTRY_SIZE
)
1626 ? IWL_TRAFFIC_ENTRY_SIZE
: length
;
1627 memcpy((priv
->rx_traffic
+
1628 (priv
->rx_traffic_idx
* IWL_TRAFFIC_ENTRY_SIZE
)),
1630 priv
->rx_traffic_idx
=
1631 (priv
->rx_traffic_idx
+ 1) % IWL_TRAFFIC_ENTRIES
;
1634 EXPORT_SYMBOL(iwl_dbg_log_rx_data_frame
);
1636 const char *get_mgmt_string(int cmd
)
1639 IWL_CMD(MANAGEMENT_ASSOC_REQ
);
1640 IWL_CMD(MANAGEMENT_ASSOC_RESP
);
1641 IWL_CMD(MANAGEMENT_REASSOC_REQ
);
1642 IWL_CMD(MANAGEMENT_REASSOC_RESP
);
1643 IWL_CMD(MANAGEMENT_PROBE_REQ
);
1644 IWL_CMD(MANAGEMENT_PROBE_RESP
);
1645 IWL_CMD(MANAGEMENT_BEACON
);
1646 IWL_CMD(MANAGEMENT_ATIM
);
1647 IWL_CMD(MANAGEMENT_DISASSOC
);
1648 IWL_CMD(MANAGEMENT_AUTH
);
1649 IWL_CMD(MANAGEMENT_DEAUTH
);
1650 IWL_CMD(MANAGEMENT_ACTION
);
1657 const char *get_ctrl_string(int cmd
)
1660 IWL_CMD(CONTROL_BACK_REQ
);
1661 IWL_CMD(CONTROL_BACK
);
1662 IWL_CMD(CONTROL_PSPOLL
);
1663 IWL_CMD(CONTROL_RTS
);
1664 IWL_CMD(CONTROL_CTS
);
1665 IWL_CMD(CONTROL_ACK
);
1666 IWL_CMD(CONTROL_CFEND
);
1667 IWL_CMD(CONTROL_CFENDACK
);
1674 void iwl_clear_traffic_stats(struct iwl_priv
*priv
)
1676 memset(&priv
->tx_stats
, 0, sizeof(struct traffic_stats
));
1677 memset(&priv
->rx_stats
, 0, sizeof(struct traffic_stats
));
1682 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
1683 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
1684 * Use debugFs to display the rx/rx_statistics
1685 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
1686 * information will be recorded, but DATA pkt still will be recorded
1687 * for the reason of iwl_led.c need to control the led blinking based on
1688 * number of tx and rx data.
1691 void iwl_update_stats(struct iwl_priv
*priv
, bool is_tx
, __le16 fc
, u16 len
)
1693 struct traffic_stats
*stats
;
1696 stats
= &priv
->tx_stats
;
1698 stats
= &priv
->rx_stats
;
1700 if (ieee80211_is_mgmt(fc
)) {
1701 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
1702 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ
):
1703 stats
->mgmt
[MANAGEMENT_ASSOC_REQ
]++;
1705 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP
):
1706 stats
->mgmt
[MANAGEMENT_ASSOC_RESP
]++;
1708 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ
):
1709 stats
->mgmt
[MANAGEMENT_REASSOC_REQ
]++;
1711 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP
):
1712 stats
->mgmt
[MANAGEMENT_REASSOC_RESP
]++;
1714 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ
):
1715 stats
->mgmt
[MANAGEMENT_PROBE_REQ
]++;
1717 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP
):
1718 stats
->mgmt
[MANAGEMENT_PROBE_RESP
]++;
1720 case cpu_to_le16(IEEE80211_STYPE_BEACON
):
1721 stats
->mgmt
[MANAGEMENT_BEACON
]++;
1723 case cpu_to_le16(IEEE80211_STYPE_ATIM
):
1724 stats
->mgmt
[MANAGEMENT_ATIM
]++;
1726 case cpu_to_le16(IEEE80211_STYPE_DISASSOC
):
1727 stats
->mgmt
[MANAGEMENT_DISASSOC
]++;
1729 case cpu_to_le16(IEEE80211_STYPE_AUTH
):
1730 stats
->mgmt
[MANAGEMENT_AUTH
]++;
1732 case cpu_to_le16(IEEE80211_STYPE_DEAUTH
):
1733 stats
->mgmt
[MANAGEMENT_DEAUTH
]++;
1735 case cpu_to_le16(IEEE80211_STYPE_ACTION
):
1736 stats
->mgmt
[MANAGEMENT_ACTION
]++;
1739 } else if (ieee80211_is_ctl(fc
)) {
1740 switch (fc
& cpu_to_le16(IEEE80211_FCTL_STYPE
)) {
1741 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ
):
1742 stats
->ctrl
[CONTROL_BACK_REQ
]++;
1744 case cpu_to_le16(IEEE80211_STYPE_BACK
):
1745 stats
->ctrl
[CONTROL_BACK
]++;
1747 case cpu_to_le16(IEEE80211_STYPE_PSPOLL
):
1748 stats
->ctrl
[CONTROL_PSPOLL
]++;
1750 case cpu_to_le16(IEEE80211_STYPE_RTS
):
1751 stats
->ctrl
[CONTROL_RTS
]++;
1753 case cpu_to_le16(IEEE80211_STYPE_CTS
):
1754 stats
->ctrl
[CONTROL_CTS
]++;
1756 case cpu_to_le16(IEEE80211_STYPE_ACK
):
1757 stats
->ctrl
[CONTROL_ACK
]++;
1759 case cpu_to_le16(IEEE80211_STYPE_CFEND
):
1760 stats
->ctrl
[CONTROL_CFEND
]++;
1762 case cpu_to_le16(IEEE80211_STYPE_CFENDACK
):
1763 stats
->ctrl
[CONTROL_CFENDACK
]++;
1769 stats
->data_bytes
+= len
;
1771 iwl_leds_background(priv
);
1773 EXPORT_SYMBOL(iwl_update_stats
);
1776 static void iwl_force_rf_reset(struct iwl_priv
*priv
)
1778 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1781 if (!iwl_is_any_associated(priv
)) {
1782 IWL_DEBUG_SCAN(priv
, "force reset rejected: not associated\n");
1786 * There is no easy and better way to force reset the radio,
1787 * the only known method is switching channel which will force to
1788 * reset and tune the radio.
1789 * Use internal short scan (single channel) operation to should
1790 * achieve this objective.
1791 * Driver should reset the radio when number of consecutive missed
1792 * beacon, or any other uCode error condition detected.
1794 IWL_DEBUG_INFO(priv
, "perform radio reset.\n");
1795 iwl_internal_short_hw_scan(priv
);
1799 int iwl_force_reset(struct iwl_priv
*priv
, int mode
, bool external
)
1801 struct iwl_force_reset
*force_reset
;
1803 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1806 if (mode
>= IWL_MAX_FORCE_RESET
) {
1807 IWL_DEBUG_INFO(priv
, "invalid reset request.\n");
1810 force_reset
= &priv
->force_reset
[mode
];
1811 force_reset
->reset_request_count
++;
1813 if (force_reset
->last_force_reset_jiffies
&&
1814 time_after(force_reset
->last_force_reset_jiffies
+
1815 force_reset
->reset_duration
, jiffies
)) {
1816 IWL_DEBUG_INFO(priv
, "force reset rejected\n");
1817 force_reset
->reset_reject_count
++;
1821 force_reset
->reset_success_count
++;
1822 force_reset
->last_force_reset_jiffies
= jiffies
;
1823 IWL_DEBUG_INFO(priv
, "perform force reset (%d)\n", mode
);
1826 iwl_force_rf_reset(priv
);
1830 * if the request is from external(ex: debugfs),
1831 * then always perform the request in regardless the module
1833 * if the request is from internal (uCode error or driver
1834 * detect failure), then fw_restart module parameter
1835 * need to be check before performing firmware reload
1837 if (!external
&& !priv
->cfg
->mod_params
->restart_fw
) {
1838 IWL_DEBUG_INFO(priv
, "Cancel firmware reload based on "
1839 "module parameter setting\n");
1842 IWL_ERR(priv
, "On demand firmware reload\n");
1843 /* Set the FW error flag -- cleared on iwl_down */
1844 set_bit(STATUS_FW_ERROR
, &priv
->status
);
1845 wake_up_interruptible(&priv
->wait_command_queue
);
1847 * Keep the restart process from trying to send host
1848 * commands by clearing the INIT status bit
1850 clear_bit(STATUS_READY
, &priv
->status
);
1851 queue_work(priv
->workqueue
, &priv
->restart
);
1857 int iwl_mac_change_interface(struct ieee80211_hw
*hw
, struct ieee80211_vif
*vif
,
1858 enum nl80211_iftype newtype
, bool newp2p
)
1860 struct iwl_priv
*priv
= hw
->priv
;
1861 struct iwl_rxon_context
*ctx
= iwl_rxon_ctx_from_vif(vif
);
1862 struct iwl_rxon_context
*tmp
;
1863 u32 interface_modes
;
1866 newtype
= ieee80211_iftype_p2p(newtype
, newp2p
);
1868 mutex_lock(&priv
->mutex
);
1870 interface_modes
= ctx
->interface_modes
| ctx
->exclusive_interface_modes
;
1872 if (!(interface_modes
& BIT(newtype
))) {
1877 if (ctx
->exclusive_interface_modes
& BIT(newtype
)) {
1878 for_each_context(priv
, tmp
) {
1886 * The current mode switch would be exclusive, but
1887 * another context is active ... refuse the switch.
1895 iwl_teardown_interface(priv
, vif
, true);
1896 vif
->type
= newtype
;
1897 err
= iwl_setup_interface(priv
, ctx
);
1900 * We've switched internally, but submitting to the
1901 * device may have failed for some reason. Mask this
1902 * error, because otherwise mac80211 will not switch
1903 * (and set the interface type back) and we'll be
1904 * out of sync with it.
1909 mutex_unlock(&priv
->mutex
);
1912 EXPORT_SYMBOL(iwl_mac_change_interface
);
1915 * On every watchdog tick we check (latest) time stamp. If it does not
1916 * change during timeout period and queue is not empty we reset firmware.
1918 static int iwl_check_stuck_queue(struct iwl_priv
*priv
, int cnt
)
1920 struct iwl_tx_queue
*txq
= &priv
->txq
[cnt
];
1921 struct iwl_queue
*q
= &txq
->q
;
1922 unsigned long timeout
;
1925 if (q
->read_ptr
== q
->write_ptr
) {
1926 txq
->time_stamp
= jiffies
;
1930 timeout
= txq
->time_stamp
+
1931 msecs_to_jiffies(priv
->cfg
->base_params
->wd_timeout
);
1933 if (time_after(jiffies
, timeout
)) {
1934 IWL_ERR(priv
, "Queue %d stuck for %u ms.\n",
1935 q
->id
, priv
->cfg
->base_params
->wd_timeout
);
1936 ret
= iwl_force_reset(priv
, IWL_FW_RESET
, false);
1937 return (ret
== -EAGAIN
) ? 0 : 1;
1944 * Making watchdog tick be a quarter of timeout assure we will
1945 * discover the queue hung between timeout and 1.25*timeout
1947 #define IWL_WD_TICK(timeout) ((timeout) / 4)
1950 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1951 * we reset the firmware. If everything is fine just rearm the timer.
1953 void iwl_bg_watchdog(unsigned long data
)
1955 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
1957 unsigned long timeout
;
1959 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
1962 timeout
= priv
->cfg
->base_params
->wd_timeout
;
1966 /* monitor and check for stuck cmd queue */
1967 if (iwl_check_stuck_queue(priv
, priv
->cmd_queue
))
1970 /* monitor and check for other stuck queues */
1971 if (iwl_is_any_associated(priv
)) {
1972 for (cnt
= 0; cnt
< priv
->hw_params
.max_txq_num
; cnt
++) {
1973 /* skip as we already checked the command queue */
1974 if (cnt
== priv
->cmd_queue
)
1976 if (iwl_check_stuck_queue(priv
, cnt
))
1981 mod_timer(&priv
->watchdog
, jiffies
+
1982 msecs_to_jiffies(IWL_WD_TICK(timeout
)));
1984 EXPORT_SYMBOL(iwl_bg_watchdog
);
1986 void iwl_setup_watchdog(struct iwl_priv
*priv
)
1988 unsigned int timeout
= priv
->cfg
->base_params
->wd_timeout
;
1991 mod_timer(&priv
->watchdog
,
1992 jiffies
+ msecs_to_jiffies(IWL_WD_TICK(timeout
)));
1994 del_timer(&priv
->watchdog
);
1996 EXPORT_SYMBOL(iwl_setup_watchdog
);
1999 * extended beacon time format
2000 * time in usec will be changed into a 32-bit value in extended:internal format
2001 * the extended part is the beacon counts
2002 * the internal part is the time in usec within one beacon interval
2004 u32
iwl_usecs_to_beacons(struct iwl_priv
*priv
, u32 usec
, u32 beacon_interval
)
2008 u32 interval
= beacon_interval
* TIME_UNIT
;
2010 if (!interval
|| !usec
)
2013 quot
= (usec
/ interval
) &
2014 (iwl_beacon_time_mask_high(priv
,
2015 priv
->hw_params
.beacon_time_tsf_bits
) >>
2016 priv
->hw_params
.beacon_time_tsf_bits
);
2017 rem
= (usec
% interval
) & iwl_beacon_time_mask_low(priv
,
2018 priv
->hw_params
.beacon_time_tsf_bits
);
2020 return (quot
<< priv
->hw_params
.beacon_time_tsf_bits
) + rem
;
2022 EXPORT_SYMBOL(iwl_usecs_to_beacons
);
2024 /* base is usually what we get from ucode with each received frame,
2025 * the same as HW timer counter counting down
2027 __le32
iwl_add_beacon_time(struct iwl_priv
*priv
, u32 base
,
2028 u32 addon
, u32 beacon_interval
)
2030 u32 base_low
= base
& iwl_beacon_time_mask_low(priv
,
2031 priv
->hw_params
.beacon_time_tsf_bits
);
2032 u32 addon_low
= addon
& iwl_beacon_time_mask_low(priv
,
2033 priv
->hw_params
.beacon_time_tsf_bits
);
2034 u32 interval
= beacon_interval
* TIME_UNIT
;
2035 u32 res
= (base
& iwl_beacon_time_mask_high(priv
,
2036 priv
->hw_params
.beacon_time_tsf_bits
)) +
2037 (addon
& iwl_beacon_time_mask_high(priv
,
2038 priv
->hw_params
.beacon_time_tsf_bits
));
2040 if (base_low
> addon_low
)
2041 res
+= base_low
- addon_low
;
2042 else if (base_low
< addon_low
) {
2043 res
+= interval
+ base_low
- addon_low
;
2044 res
+= (1 << priv
->hw_params
.beacon_time_tsf_bits
);
2046 res
+= (1 << priv
->hw_params
.beacon_time_tsf_bits
);
2048 return cpu_to_le32(res
);
2050 EXPORT_SYMBOL(iwl_add_beacon_time
);
2054 int iwl_pci_suspend(struct device
*device
)
2056 struct pci_dev
*pdev
= to_pci_dev(device
);
2057 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
2060 * This function is called when system goes into suspend state
2061 * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
2062 * first but since iwl_mac_stop() has no knowledge of who the caller is,
2063 * it will not call apm_ops.stop() to stop the DMA operation.
2064 * Calling apm_ops.stop here to make sure we stop the DMA.
2070 EXPORT_SYMBOL(iwl_pci_suspend
);
2072 int iwl_pci_resume(struct device
*device
)
2074 struct pci_dev
*pdev
= to_pci_dev(device
);
2075 struct iwl_priv
*priv
= pci_get_drvdata(pdev
);
2076 bool hw_rfkill
= false;
2079 * We disable the RETRY_TIMEOUT register (0x41) to keep
2080 * PCI Tx retries from interfering with C3 CPU state.
2082 pci_write_config_byte(pdev
, PCI_CFG_RETRY_TIMEOUT
, 0x00);
2084 iwl_enable_interrupts(priv
);
2086 if (!(iwl_read32(priv
, CSR_GP_CNTRL
) &
2087 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
))
2091 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2093 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2095 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
, hw_rfkill
);
2099 EXPORT_SYMBOL(iwl_pci_resume
);
2101 const struct dev_pm_ops iwl_pm_ops
= {
2102 .suspend
= iwl_pci_suspend
,
2103 .resume
= iwl_pci_resume
,
2104 .freeze
= iwl_pci_suspend
,
2105 .thaw
= iwl_pci_resume
,
2106 .poweroff
= iwl_pci_suspend
,
2107 .restore
= iwl_pci_resume
,
2109 EXPORT_SYMBOL(iwl_pm_ops
);
2111 #endif /* CONFIG_PM */