1 /******************************************************************************
5 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
29 #include <linux/etherdevice.h>
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
43 static inline u32
iwlagn_get_scd_ssn(struct iwlagn_tx_resp
*tx_resp
)
45 return le32_to_cpup((__le32
*)&tx_resp
->status
+
46 tx_resp
->frame_count
) & MAX_SN
;
49 static void iwlagn_count_tx_err_status(struct iwl_priv
*priv
, u16 status
)
51 status
&= TX_STATUS_MSK
;
54 case TX_STATUS_POSTPONE_DELAY
:
55 priv
->_agn
.reply_tx_stats
.pp_delay
++;
57 case TX_STATUS_POSTPONE_FEW_BYTES
:
58 priv
->_agn
.reply_tx_stats
.pp_few_bytes
++;
60 case TX_STATUS_POSTPONE_BT_PRIO
:
61 priv
->_agn
.reply_tx_stats
.pp_bt_prio
++;
63 case TX_STATUS_POSTPONE_QUIET_PERIOD
:
64 priv
->_agn
.reply_tx_stats
.pp_quiet_period
++;
66 case TX_STATUS_POSTPONE_CALC_TTAK
:
67 priv
->_agn
.reply_tx_stats
.pp_calc_ttak
++;
69 case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY
:
70 priv
->_agn
.reply_tx_stats
.int_crossed_retry
++;
72 case TX_STATUS_FAIL_SHORT_LIMIT
:
73 priv
->_agn
.reply_tx_stats
.short_limit
++;
75 case TX_STATUS_FAIL_LONG_LIMIT
:
76 priv
->_agn
.reply_tx_stats
.long_limit
++;
78 case TX_STATUS_FAIL_FIFO_UNDERRUN
:
79 priv
->_agn
.reply_tx_stats
.fifo_underrun
++;
81 case TX_STATUS_FAIL_DRAIN_FLOW
:
82 priv
->_agn
.reply_tx_stats
.drain_flow
++;
84 case TX_STATUS_FAIL_RFKILL_FLUSH
:
85 priv
->_agn
.reply_tx_stats
.rfkill_flush
++;
87 case TX_STATUS_FAIL_LIFE_EXPIRE
:
88 priv
->_agn
.reply_tx_stats
.life_expire
++;
90 case TX_STATUS_FAIL_DEST_PS
:
91 priv
->_agn
.reply_tx_stats
.dest_ps
++;
93 case TX_STATUS_FAIL_HOST_ABORTED
:
94 priv
->_agn
.reply_tx_stats
.host_abort
++;
96 case TX_STATUS_FAIL_BT_RETRY
:
97 priv
->_agn
.reply_tx_stats
.bt_retry
++;
99 case TX_STATUS_FAIL_STA_INVALID
:
100 priv
->_agn
.reply_tx_stats
.sta_invalid
++;
102 case TX_STATUS_FAIL_FRAG_DROPPED
:
103 priv
->_agn
.reply_tx_stats
.frag_drop
++;
105 case TX_STATUS_FAIL_TID_DISABLE
:
106 priv
->_agn
.reply_tx_stats
.tid_disable
++;
108 case TX_STATUS_FAIL_FIFO_FLUSHED
:
109 priv
->_agn
.reply_tx_stats
.fifo_flush
++;
111 case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL
:
112 priv
->_agn
.reply_tx_stats
.insuff_cf_poll
++;
114 case TX_STATUS_FAIL_PASSIVE_NO_RX
:
115 priv
->_agn
.reply_tx_stats
.fail_hw_drop
++;
117 case TX_STATUS_FAIL_NO_BEACON_ON_RADAR
:
118 priv
->_agn
.reply_tx_stats
.sta_color_mismatch
++;
121 priv
->_agn
.reply_tx_stats
.unknown
++;
126 static void iwlagn_count_agg_tx_err_status(struct iwl_priv
*priv
, u16 status
)
128 status
&= AGG_TX_STATUS_MSK
;
131 case AGG_TX_STATE_UNDERRUN_MSK
:
132 priv
->_agn
.reply_agg_tx_stats
.underrun
++;
134 case AGG_TX_STATE_BT_PRIO_MSK
:
135 priv
->_agn
.reply_agg_tx_stats
.bt_prio
++;
137 case AGG_TX_STATE_FEW_BYTES_MSK
:
138 priv
->_agn
.reply_agg_tx_stats
.few_bytes
++;
140 case AGG_TX_STATE_ABORT_MSK
:
141 priv
->_agn
.reply_agg_tx_stats
.abort
++;
143 case AGG_TX_STATE_LAST_SENT_TTL_MSK
:
144 priv
->_agn
.reply_agg_tx_stats
.last_sent_ttl
++;
146 case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK
:
147 priv
->_agn
.reply_agg_tx_stats
.last_sent_try
++;
149 case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK
:
150 priv
->_agn
.reply_agg_tx_stats
.last_sent_bt_kill
++;
152 case AGG_TX_STATE_SCD_QUERY_MSK
:
153 priv
->_agn
.reply_agg_tx_stats
.scd_query
++;
155 case AGG_TX_STATE_TEST_BAD_CRC32_MSK
:
156 priv
->_agn
.reply_agg_tx_stats
.bad_crc32
++;
158 case AGG_TX_STATE_RESPONSE_MSK
:
159 priv
->_agn
.reply_agg_tx_stats
.response
++;
161 case AGG_TX_STATE_DUMP_TX_MSK
:
162 priv
->_agn
.reply_agg_tx_stats
.dump_tx
++;
164 case AGG_TX_STATE_DELAY_TX_MSK
:
165 priv
->_agn
.reply_agg_tx_stats
.delay_tx
++;
168 priv
->_agn
.reply_agg_tx_stats
.unknown
++;
173 static void iwlagn_set_tx_status(struct iwl_priv
*priv
,
174 struct ieee80211_tx_info
*info
,
175 struct iwl_rxon_context
*ctx
,
176 struct iwlagn_tx_resp
*tx_resp
,
177 int txq_id
, bool is_agg
)
179 u16 status
= le16_to_cpu(tx_resp
->status
.status
);
181 info
->status
.rates
[0].count
= tx_resp
->failure_frame
+ 1;
183 info
->flags
&= ~IEEE80211_TX_CTL_AMPDU
;
184 info
->flags
|= iwl_tx_status_to_mac80211(status
);
185 iwlagn_hwrate_to_tx_control(priv
, le32_to_cpu(tx_resp
->rate_n_flags
),
187 if (!iwl_is_tx_success(status
))
188 iwlagn_count_tx_err_status(priv
, status
);
190 if (status
== TX_STATUS_FAIL_PASSIVE_NO_RX
&&
191 iwl_is_associated_ctx(ctx
) && ctx
->vif
&&
192 ctx
->vif
->type
== NL80211_IFTYPE_STATION
) {
193 ctx
->last_tx_rejected
= true;
194 iwl_stop_queue(priv
, &priv
->txq
[txq_id
]);
197 IWL_DEBUG_TX_REPLY(priv
, "TXQ %d status %s (0x%08x) rate_n_flags "
200 iwl_get_tx_fail_reason(status
), status
,
201 le32_to_cpu(tx_resp
->rate_n_flags
),
202 tx_resp
->failure_frame
);
205 #ifdef CONFIG_IWLWIFI_DEBUG
206 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
208 const char *iwl_get_agg_tx_fail_reason(u16 status
)
210 status
&= AGG_TX_STATUS_MSK
;
212 case AGG_TX_STATE_TRANSMITTED
:
214 AGG_TX_STATE_FAIL(UNDERRUN_MSK
);
215 AGG_TX_STATE_FAIL(BT_PRIO_MSK
);
216 AGG_TX_STATE_FAIL(FEW_BYTES_MSK
);
217 AGG_TX_STATE_FAIL(ABORT_MSK
);
218 AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK
);
219 AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK
);
220 AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK
);
221 AGG_TX_STATE_FAIL(SCD_QUERY_MSK
);
222 AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK
);
223 AGG_TX_STATE_FAIL(RESPONSE_MSK
);
224 AGG_TX_STATE_FAIL(DUMP_TX_MSK
);
225 AGG_TX_STATE_FAIL(DELAY_TX_MSK
);
230 #endif /* CONFIG_IWLWIFI_DEBUG */
232 static int iwlagn_tx_status_reply_tx(struct iwl_priv
*priv
,
233 struct iwl_ht_agg
*agg
,
234 struct iwlagn_tx_resp
*tx_resp
,
235 int txq_id
, u16 start_idx
)
238 struct agg_tx_status
*frame_status
= &tx_resp
->status
;
239 struct ieee80211_hdr
*hdr
= NULL
;
243 if (agg
->wait_for_ba
)
244 IWL_DEBUG_TX_REPLY(priv
, "got tx response w/o block-ack\n");
246 agg
->frame_count
= tx_resp
->frame_count
;
247 agg
->start_idx
= start_idx
;
248 agg
->rate_n_flags
= le32_to_cpu(tx_resp
->rate_n_flags
);
251 /* # frames attempted by Tx command */
252 if (agg
->frame_count
== 1) {
253 struct iwl_tx_info
*txb
;
255 /* Only one frame was attempted; no block-ack will arrive */
258 IWL_DEBUG_TX_REPLY(priv
, "FrameCnt = %d, StartIdx=%d idx=%d\n",
259 agg
->frame_count
, agg
->start_idx
, idx
);
260 txb
= &priv
->txq
[txq_id
].txb
[idx
];
261 iwlagn_set_tx_status(priv
, IEEE80211_SKB_CB(txb
->skb
),
262 txb
->ctx
, tx_resp
, txq_id
, true);
263 agg
->wait_for_ba
= 0;
265 /* Two or more frames were attempted; expect block-ack */
269 * Start is the lowest frame sent. It may not be the first
270 * frame in the batch; we figure this out dynamically during
271 * the following loop.
273 int start
= agg
->start_idx
;
275 /* Construct bit-map of pending frames within Tx window */
276 for (i
= 0; i
< agg
->frame_count
; i
++) {
278 status
= le16_to_cpu(frame_status
[i
].status
);
279 seq
= le16_to_cpu(frame_status
[i
].sequence
);
280 idx
= SEQ_TO_INDEX(seq
);
281 txq_id
= SEQ_TO_QUEUE(seq
);
283 if (status
& AGG_TX_STATUS_MSK
)
284 iwlagn_count_agg_tx_err_status(priv
, status
);
286 if (status
& (AGG_TX_STATE_FEW_BYTES_MSK
|
287 AGG_TX_STATE_ABORT_MSK
))
290 IWL_DEBUG_TX_REPLY(priv
, "FrameCnt = %d, txq_id=%d idx=%d\n",
291 agg
->frame_count
, txq_id
, idx
);
292 IWL_DEBUG_TX_REPLY(priv
, "status %s (0x%08x), "
293 "try-count (0x%08x)\n",
294 iwl_get_agg_tx_fail_reason(status
),
295 status
& AGG_TX_STATUS_MSK
,
296 status
& AGG_TX_TRY_MSK
);
298 hdr
= iwl_tx_queue_get_hdr(priv
, txq_id
, idx
);
301 "BUG_ON idx doesn't point to valid skb"
302 " idx=%d, txq_id=%d\n", idx
, txq_id
);
306 sc
= le16_to_cpu(hdr
->seq_ctrl
);
307 if (idx
!= (SEQ_TO_SN(sc
) & 0xff)) {
309 "BUG_ON idx doesn't match seq control"
310 " idx=%d, seq_idx=%d, seq=%d\n",
316 IWL_DEBUG_TX_REPLY(priv
, "AGG Frame i=%d idx %d seq=%d\n",
317 i
, idx
, SEQ_TO_SN(sc
));
320 * sh -> how many frames ahead of the starting frame is
323 * Note that all frames sent in the batch must be in a
324 * 64-frame window, so this number should be in [0,63].
325 * If outside of this window, then we've found a new
326 * "first" frame in the batch and need to change start.
331 * If >= 64, out of window. start must be at the front
332 * of the circular buffer, idx must be near the end of
333 * the buffer, and idx is the new "first" frame. Shift
334 * the indices around.
337 /* Shift bitmap by start - idx, wrapped */
338 sh
= 0x100 - idx
+ start
;
339 bitmap
= bitmap
<< sh
;
340 /* Now idx is the new start so sh = 0 */
344 * If <= -64 then wraps the 256-pkt circular buffer
345 * (e.g., start = 255 and idx = 0, sh should be 1)
347 } else if (sh
<= -64) {
348 sh
= 0x100 - start
+ idx
;
350 * If < 0 but > -64, out of window. idx is before start
351 * but not wrapped. Shift the indices around.
354 /* Shift by how far start is ahead of idx */
356 bitmap
= bitmap
<< sh
;
357 /* Now idx is the new start so sh = 0 */
361 /* Sequence number start + sh was sent in this batch */
362 bitmap
|= 1ULL << sh
;
363 IWL_DEBUG_TX_REPLY(priv
, "start=%d bitmap=0x%llx\n",
364 start
, (unsigned long long)bitmap
);
368 * Store the bitmap and possibly the new start, if we wrapped
371 agg
->bitmap
= bitmap
;
372 agg
->start_idx
= start
;
373 IWL_DEBUG_TX_REPLY(priv
, "Frames %d start_idx=%d bitmap=0x%llx\n",
374 agg
->frame_count
, agg
->start_idx
,
375 (unsigned long long)agg
->bitmap
);
378 agg
->wait_for_ba
= 1;
383 void iwl_check_abort_status(struct iwl_priv
*priv
,
384 u8 frame_count
, u32 status
)
386 if (frame_count
== 1 && status
== TX_STATUS_FAIL_RFKILL_FLUSH
) {
387 IWL_ERR(priv
, "Tx flush command to flush out all frames\n");
388 if (!test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
389 queue_work(priv
->workqueue
, &priv
->tx_flush
);
393 static void iwlagn_rx_reply_tx(struct iwl_priv
*priv
,
394 struct iwl_rx_mem_buffer
*rxb
)
396 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
397 u16 sequence
= le16_to_cpu(pkt
->hdr
.sequence
);
398 int txq_id
= SEQ_TO_QUEUE(sequence
);
399 int index
= SEQ_TO_INDEX(sequence
);
400 struct iwl_tx_queue
*txq
= &priv
->txq
[txq_id
];
401 struct ieee80211_tx_info
*info
;
402 struct iwlagn_tx_resp
*tx_resp
= (void *)&pkt
->u
.raw
[0];
403 struct iwl_tx_info
*txb
;
404 u32 status
= le16_to_cpu(tx_resp
->status
.status
);
410 if ((index
>= txq
->q
.n_bd
) || (iwl_queue_used(&txq
->q
, index
) == 0)) {
411 IWL_ERR(priv
, "Read index for DMA queue txq_id (%d) index %d "
412 "is out of range [0-%d] %d %d\n", txq_id
,
413 index
, txq
->q
.n_bd
, txq
->q
.write_ptr
,
418 txq
->time_stamp
= jiffies
;
419 txb
= &txq
->txb
[txq
->q
.read_ptr
];
420 info
= IEEE80211_SKB_CB(txb
->skb
);
421 memset(&info
->status
, 0, sizeof(info
->status
));
423 tid
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_TID_MSK
) >>
424 IWLAGN_TX_RES_TID_POS
;
425 sta_id
= (tx_resp
->ra_tid
& IWLAGN_TX_RES_RA_MSK
) >>
426 IWLAGN_TX_RES_RA_POS
;
428 spin_lock_irqsave(&priv
->sta_lock
, flags
);
429 if (txq
->sched_retry
) {
430 const u32 scd_ssn
= iwlagn_get_scd_ssn(tx_resp
);
431 struct iwl_ht_agg
*agg
;
433 agg
= &priv
->stations
[sta_id
].tid
[tid
].agg
;
435 * If the BT kill count is non-zero, we'll get this
436 * notification again.
438 if (tx_resp
->bt_kill_count
&& tx_resp
->frame_count
== 1 &&
439 priv
->cfg
->bt_params
&&
440 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
441 IWL_WARN(priv
, "receive reply tx with bt_kill\n");
443 iwlagn_tx_status_reply_tx(priv
, agg
, tx_resp
, txq_id
, index
);
445 /* check if BAR is needed */
446 if ((tx_resp
->frame_count
== 1) && !iwl_is_tx_success(status
))
447 info
->flags
|= IEEE80211_TX_STAT_AMPDU_NO_BACK
;
449 if (txq
->q
.read_ptr
!= (scd_ssn
& 0xff)) {
450 index
= iwl_queue_dec_wrap(scd_ssn
& 0xff, txq
->q
.n_bd
);
451 IWL_DEBUG_TX_REPLY(priv
, "Retry scheduler reclaim "
452 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
453 scd_ssn
, index
, txq_id
, txq
->swq_id
);
455 freed
= iwlagn_tx_queue_reclaim(priv
, txq_id
, index
);
456 iwl_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
458 if (priv
->mac80211_registered
&&
459 (iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
) &&
460 (agg
->state
!= IWL_EMPTYING_HW_QUEUE_DELBA
))
461 iwl_wake_queue(priv
, txq
);
464 iwlagn_set_tx_status(priv
, info
, txb
->ctx
, tx_resp
,
466 freed
= iwlagn_tx_queue_reclaim(priv
, txq_id
, index
);
467 iwl_free_tfds_in_queue(priv
, sta_id
, tid
, freed
);
469 if (priv
->mac80211_registered
&&
470 iwl_queue_space(&txq
->q
) > txq
->q
.low_mark
&&
471 status
!= TX_STATUS_FAIL_PASSIVE_NO_RX
)
472 iwl_wake_queue(priv
, txq
);
475 iwlagn_txq_check_empty(priv
, sta_id
, tid
, txq_id
);
477 iwl_check_abort_status(priv
, tx_resp
->frame_count
, status
);
478 spin_unlock_irqrestore(&priv
->sta_lock
, flags
);
481 void iwlagn_rx_handler_setup(struct iwl_priv
*priv
)
483 /* init calibration handlers */
484 priv
->rx_handlers
[CALIBRATION_RES_NOTIFICATION
] =
485 iwlagn_rx_calib_result
;
486 priv
->rx_handlers
[REPLY_TX
] = iwlagn_rx_reply_tx
;
488 /* set up notification wait support */
489 spin_lock_init(&priv
->_agn
.notif_wait_lock
);
490 INIT_LIST_HEAD(&priv
->_agn
.notif_waits
);
491 init_waitqueue_head(&priv
->_agn
.notif_waitq
);
494 void iwlagn_setup_deferred_work(struct iwl_priv
*priv
)
497 * nothing need to be done here anymore
498 * still keep for future use if needed
502 int iwlagn_hw_valid_rtc_data_addr(u32 addr
)
504 return (addr
>= IWLAGN_RTC_DATA_LOWER_BOUND
) &&
505 (addr
< IWLAGN_RTC_DATA_UPPER_BOUND
);
508 int iwlagn_send_tx_power(struct iwl_priv
*priv
)
510 struct iwlagn_tx_power_dbm_cmd tx_power_cmd
;
513 if (WARN_ONCE(test_bit(STATUS_SCAN_HW
, &priv
->status
),
514 "TX Power requested while scanning!\n"))
517 /* half dBm need to multiply */
518 tx_power_cmd
.global_lmt
= (s8
)(2 * priv
->tx_power_user_lmt
);
520 if (priv
->tx_power_lmt_in_half_dbm
&&
521 priv
->tx_power_lmt_in_half_dbm
< tx_power_cmd
.global_lmt
) {
523 * For the newer devices which using enhanced/extend tx power
524 * table in EEPROM, the format is in half dBm. driver need to
525 * convert to dBm format before report to mac80211.
526 * By doing so, there is a possibility of 1/2 dBm resolution
527 * lost. driver will perform "round-up" operation before
528 * reporting, but it will cause 1/2 dBm tx power over the
529 * regulatory limit. Perform the checking here, if the
530 * "tx_power_user_lmt" is higher than EEPROM value (in
531 * half-dBm format), lower the tx power based on EEPROM
533 tx_power_cmd
.global_lmt
= priv
->tx_power_lmt_in_half_dbm
;
535 tx_power_cmd
.flags
= IWLAGN_TX_POWER_NO_CLOSED
;
536 tx_power_cmd
.srv_chan_lmt
= IWLAGN_TX_POWER_AUTO
;
538 if (IWL_UCODE_API(priv
->ucode_ver
) == 1)
539 tx_ant_cfg_cmd
= REPLY_TX_POWER_DBM_CMD_V1
;
541 tx_ant_cfg_cmd
= REPLY_TX_POWER_DBM_CMD
;
543 return iwl_send_cmd_pdu(priv
, tx_ant_cfg_cmd
, sizeof(tx_power_cmd
),
547 void iwlagn_temperature(struct iwl_priv
*priv
)
549 /* store temperature from correct statistics (in Celsius) */
550 priv
->temperature
= le32_to_cpu(priv
->statistics
.common
.temperature
);
551 iwl_tt_handler(priv
);
554 u16
iwlagn_eeprom_calib_version(struct iwl_priv
*priv
)
556 struct iwl_eeprom_calib_hdr
{
562 hdr
= (struct iwl_eeprom_calib_hdr
*)iwl_eeprom_query_addr(priv
,
571 static u32
eeprom_indirect_address(const struct iwl_priv
*priv
, u32 address
)
575 if ((address
& INDIRECT_ADDRESS
) == 0)
578 switch (address
& INDIRECT_TYPE_MSK
) {
580 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_HOST
);
582 case INDIRECT_GENERAL
:
583 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_GENERAL
);
585 case INDIRECT_REGULATORY
:
586 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_REGULATORY
);
588 case INDIRECT_TXP_LIMIT
:
589 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_TXP_LIMIT
);
591 case INDIRECT_TXP_LIMIT_SIZE
:
592 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_TXP_LIMIT_SIZE
);
594 case INDIRECT_CALIBRATION
:
595 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_CALIBRATION
);
597 case INDIRECT_PROCESS_ADJST
:
598 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_PROCESS_ADJST
);
600 case INDIRECT_OTHERS
:
601 offset
= iwl_eeprom_query16(priv
, EEPROM_LINK_OTHERS
);
604 IWL_ERR(priv
, "illegal indirect type: 0x%X\n",
605 address
& INDIRECT_TYPE_MSK
);
609 /* translate the offset from words to byte */
610 return (address
& ADDRESS_MSK
) + (offset
<< 1);
613 const u8
*iwlagn_eeprom_query_addr(const struct iwl_priv
*priv
,
616 u32 address
= eeprom_indirect_address(priv
, offset
);
617 BUG_ON(address
>= priv
->cfg
->base_params
->eeprom_size
);
618 return &priv
->eeprom
[address
];
621 struct iwl_mod_params iwlagn_mod_params
= {
625 /* the rest are 0 by default */
628 void iwlagn_rx_queue_reset(struct iwl_priv
*priv
, struct iwl_rx_queue
*rxq
)
632 spin_lock_irqsave(&rxq
->lock
, flags
);
633 INIT_LIST_HEAD(&rxq
->rx_free
);
634 INIT_LIST_HEAD(&rxq
->rx_used
);
635 /* Fill the rx_used queue with _all_ of the Rx buffers */
636 for (i
= 0; i
< RX_FREE_BUFFERS
+ RX_QUEUE_SIZE
; i
++) {
637 /* In the reset function, these buffers may have been allocated
638 * to an SKB, so we need to unmap and free potential storage */
639 if (rxq
->pool
[i
].page
!= NULL
) {
640 pci_unmap_page(priv
->pci_dev
, rxq
->pool
[i
].page_dma
,
641 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
643 __iwl_free_pages(priv
, rxq
->pool
[i
].page
);
644 rxq
->pool
[i
].page
= NULL
;
646 list_add_tail(&rxq
->pool
[i
].list
, &rxq
->rx_used
);
649 for (i
= 0; i
< RX_QUEUE_SIZE
; i
++)
650 rxq
->queue
[i
] = NULL
;
652 /* Set us so that we have processed and used all buffers, but have
653 * not restocked the Rx queue with fresh buffers */
654 rxq
->read
= rxq
->write
= 0;
655 rxq
->write_actual
= 0;
657 spin_unlock_irqrestore(&rxq
->lock
, flags
);
660 int iwlagn_rx_init(struct iwl_priv
*priv
, struct iwl_rx_queue
*rxq
)
663 const u32 rfdnlog
= RX_QUEUE_SIZE_LOG
; /* 256 RBDs */
664 u32 rb_timeout
= 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
666 rb_timeout
= RX_RB_TIMEOUT
;
668 if (iwlagn_mod_params
.amsdu_size_8K
)
669 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K
;
671 rb_size
= FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
;
674 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
676 /* Reset driver's Rx queue write index */
677 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_RBDCB_WPTR_REG
, 0);
679 /* Tell device where to find RBD circular buffer in DRAM */
680 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
681 (u32
)(rxq
->bd_dma
>> 8));
683 /* Tell device where in DRAM to update its Rx status */
684 iwl_write_direct32(priv
, FH_RSCSR_CHNL0_STTS_WPTR_REG
,
685 rxq
->rb_stts_dma
>> 4);
688 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
689 * the credit mechanism in 5000 HW RX FIFO
690 * Direct rx interrupts to hosts
691 * Rx buffer size 4 or 8k
695 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
,
696 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
|
697 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
|
698 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
|
699 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK
|
701 (rb_timeout
<< FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
)|
702 (rfdnlog
<< FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
));
704 /* Set interrupt coalescing timer to default (2048 usecs) */
705 iwl_write8(priv
, CSR_INT_COALESCING
, IWL_HOST_INT_TIMEOUT_DEF
);
710 static void iwlagn_set_pwr_vmain(struct iwl_priv
*priv
)
713 * (for documentation purposes)
714 * to set power to V_AUX, do:
716 if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
717 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
718 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
719 ~APMG_PS_CTRL_MSK_PWR_SRC);
722 iwl_set_bits_mask_prph(priv
, APMG_PS_CTRL_REG
,
723 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN
,
724 ~APMG_PS_CTRL_MSK_PWR_SRC
);
727 int iwlagn_hw_nic_init(struct iwl_priv
*priv
)
730 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
734 spin_lock_irqsave(&priv
->lock
, flags
);
735 priv
->cfg
->ops
->lib
->apm_ops
.init(priv
);
737 /* Set interrupt coalescing calibration timer to default (512 usecs) */
738 iwl_write8(priv
, CSR_INT_COALESCING
, IWL_HOST_INT_CALIB_TIMEOUT_DEF
);
740 spin_unlock_irqrestore(&priv
->lock
, flags
);
742 iwlagn_set_pwr_vmain(priv
);
744 priv
->cfg
->ops
->lib
->apm_ops
.config(priv
);
746 /* Allocate the RX queue, or reset if it is already allocated */
748 ret
= iwl_rx_queue_alloc(priv
);
750 IWL_ERR(priv
, "Unable to initialize Rx queue\n");
754 iwlagn_rx_queue_reset(priv
, rxq
);
756 iwlagn_rx_replenish(priv
);
758 iwlagn_rx_init(priv
, rxq
);
760 spin_lock_irqsave(&priv
->lock
, flags
);
762 rxq
->need_update
= 1;
763 iwl_rx_queue_update_write_ptr(priv
, rxq
);
765 spin_unlock_irqrestore(&priv
->lock
, flags
);
767 /* Allocate or reset and init all Tx and Command queues */
769 ret
= iwlagn_txq_ctx_alloc(priv
);
773 iwlagn_txq_ctx_reset(priv
);
775 if (priv
->cfg
->base_params
->shadow_reg_enable
) {
776 /* enable shadow regs in HW */
777 iwl_set_bit(priv
, CSR_MAC_SHADOW_REG_CTRL
,
781 set_bit(STATUS_INIT
, &priv
->status
);
787 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
789 static inline __le32
iwlagn_dma_addr2rbd_ptr(struct iwl_priv
*priv
,
792 return cpu_to_le32((u32
)(dma_addr
>> 8));
796 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
798 * If there are slots in the RX queue that need to be restocked,
799 * and we have free pre-allocated buffers, fill the ranks as much
800 * as we can, pulling from rx_free.
802 * This moves the 'write' index forward to catch up with 'processed', and
803 * also updates the memory address in the firmware to reference the new
806 void iwlagn_rx_queue_restock(struct iwl_priv
*priv
)
808 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
809 struct list_head
*element
;
810 struct iwl_rx_mem_buffer
*rxb
;
813 spin_lock_irqsave(&rxq
->lock
, flags
);
814 while ((iwl_rx_queue_space(rxq
) > 0) && (rxq
->free_count
)) {
815 /* The overwritten rxb must be a used one */
816 rxb
= rxq
->queue
[rxq
->write
];
817 BUG_ON(rxb
&& rxb
->page
);
819 /* Get next free Rx buffer, remove from free list */
820 element
= rxq
->rx_free
.next
;
821 rxb
= list_entry(element
, struct iwl_rx_mem_buffer
, list
);
824 /* Point to Rx buffer via next RBD in circular buffer */
825 rxq
->bd
[rxq
->write
] = iwlagn_dma_addr2rbd_ptr(priv
,
827 rxq
->queue
[rxq
->write
] = rxb
;
828 rxq
->write
= (rxq
->write
+ 1) & RX_QUEUE_MASK
;
831 spin_unlock_irqrestore(&rxq
->lock
, flags
);
832 /* If the pre-allocated buffer pool is dropping low, schedule to
834 if (rxq
->free_count
<= RX_LOW_WATERMARK
)
835 queue_work(priv
->workqueue
, &priv
->rx_replenish
);
838 /* If we've added more space for the firmware to place data, tell it.
839 * Increment device's write pointer in multiples of 8. */
840 if (rxq
->write_actual
!= (rxq
->write
& ~0x7)) {
841 spin_lock_irqsave(&rxq
->lock
, flags
);
842 rxq
->need_update
= 1;
843 spin_unlock_irqrestore(&rxq
->lock
, flags
);
844 iwl_rx_queue_update_write_ptr(priv
, rxq
);
849 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
851 * When moving to rx_free an SKB is allocated for the slot.
853 * Also restock the Rx queue via iwl_rx_queue_restock.
854 * This is called as a scheduled work item (except for during initialization)
856 void iwlagn_rx_allocate(struct iwl_priv
*priv
, gfp_t priority
)
858 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
859 struct list_head
*element
;
860 struct iwl_rx_mem_buffer
*rxb
;
863 gfp_t gfp_mask
= priority
;
866 spin_lock_irqsave(&rxq
->lock
, flags
);
867 if (list_empty(&rxq
->rx_used
)) {
868 spin_unlock_irqrestore(&rxq
->lock
, flags
);
871 spin_unlock_irqrestore(&rxq
->lock
, flags
);
873 if (rxq
->free_count
> RX_LOW_WATERMARK
)
874 gfp_mask
|= __GFP_NOWARN
;
876 if (priv
->hw_params
.rx_page_order
> 0)
877 gfp_mask
|= __GFP_COMP
;
879 /* Alloc a new receive buffer */
880 page
= alloc_pages(gfp_mask
, priv
->hw_params
.rx_page_order
);
883 IWL_DEBUG_INFO(priv
, "alloc_pages failed, "
885 priv
->hw_params
.rx_page_order
);
887 if ((rxq
->free_count
<= RX_LOW_WATERMARK
) &&
889 IWL_CRIT(priv
, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
890 priority
== GFP_ATOMIC
? "GFP_ATOMIC" : "GFP_KERNEL",
892 /* We don't reschedule replenish work here -- we will
893 * call the restock method and if it still needs
894 * more buffers it will schedule replenish */
898 spin_lock_irqsave(&rxq
->lock
, flags
);
900 if (list_empty(&rxq
->rx_used
)) {
901 spin_unlock_irqrestore(&rxq
->lock
, flags
);
902 __free_pages(page
, priv
->hw_params
.rx_page_order
);
905 element
= rxq
->rx_used
.next
;
906 rxb
= list_entry(element
, struct iwl_rx_mem_buffer
, list
);
909 spin_unlock_irqrestore(&rxq
->lock
, flags
);
913 /* Get physical address of the RB */
914 rxb
->page_dma
= pci_map_page(priv
->pci_dev
, page
, 0,
915 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
917 /* dma address must be no more than 36 bits */
918 BUG_ON(rxb
->page_dma
& ~DMA_BIT_MASK(36));
919 /* and also 256 byte aligned! */
920 BUG_ON(rxb
->page_dma
& DMA_BIT_MASK(8));
922 spin_lock_irqsave(&rxq
->lock
, flags
);
924 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
927 spin_unlock_irqrestore(&rxq
->lock
, flags
);
931 void iwlagn_rx_replenish(struct iwl_priv
*priv
)
935 iwlagn_rx_allocate(priv
, GFP_KERNEL
);
937 spin_lock_irqsave(&priv
->lock
, flags
);
938 iwlagn_rx_queue_restock(priv
);
939 spin_unlock_irqrestore(&priv
->lock
, flags
);
942 void iwlagn_rx_replenish_now(struct iwl_priv
*priv
)
944 iwlagn_rx_allocate(priv
, GFP_ATOMIC
);
946 iwlagn_rx_queue_restock(priv
);
949 /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
950 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
951 * This free routine walks the list of POOL entries and if SKB is set to
952 * non NULL it is unmapped and freed
954 void iwlagn_rx_queue_free(struct iwl_priv
*priv
, struct iwl_rx_queue
*rxq
)
957 for (i
= 0; i
< RX_QUEUE_SIZE
+ RX_FREE_BUFFERS
; i
++) {
958 if (rxq
->pool
[i
].page
!= NULL
) {
959 pci_unmap_page(priv
->pci_dev
, rxq
->pool
[i
].page_dma
,
960 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
962 __iwl_free_pages(priv
, rxq
->pool
[i
].page
);
963 rxq
->pool
[i
].page
= NULL
;
967 dma_free_coherent(&priv
->pci_dev
->dev
, 4 * RX_QUEUE_SIZE
, rxq
->bd
,
969 dma_free_coherent(&priv
->pci_dev
->dev
, sizeof(struct iwl_rb_status
),
970 rxq
->rb_stts
, rxq
->rb_stts_dma
);
975 int iwlagn_rxq_stop(struct iwl_priv
*priv
)
979 iwl_write_direct32(priv
, FH_MEM_RCSR_CHNL0_CONFIG_REG
, 0);
980 iwl_poll_direct_bit(priv
, FH_MEM_RSSR_RX_STATUS_REG
,
981 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE
, 1000);
986 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags
, enum ieee80211_band band
)
991 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
992 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
993 idx
= (rate_n_flags
& 0xff);
995 /* Legacy rate format, search for match in table */
997 if (band
== IEEE80211_BAND_5GHZ
)
998 band_offset
= IWL_FIRST_OFDM_RATE
;
999 for (idx
= band_offset
; idx
< IWL_RATE_COUNT_LEGACY
; idx
++)
1000 if (iwl_rates
[idx
].plcp
== (rate_n_flags
& 0xFF))
1001 return idx
- band_offset
;
1007 static int iwl_get_single_channel_for_scan(struct iwl_priv
*priv
,
1008 struct ieee80211_vif
*vif
,
1009 enum ieee80211_band band
,
1010 struct iwl_scan_channel
*scan_ch
)
1012 const struct ieee80211_supported_band
*sband
;
1013 u16 passive_dwell
= 0;
1014 u16 active_dwell
= 0;
1018 sband
= iwl_get_hw_mode(priv
, band
);
1020 IWL_ERR(priv
, "invalid band\n");
1024 active_dwell
= iwl_get_active_dwell_time(priv
, band
, 0);
1025 passive_dwell
= iwl_get_passive_dwell_time(priv
, band
, vif
);
1027 if (passive_dwell
<= active_dwell
)
1028 passive_dwell
= active_dwell
+ 1;
1030 channel
= iwl_get_single_channel_number(priv
, band
);
1032 scan_ch
->channel
= cpu_to_le16(channel
);
1033 scan_ch
->type
= SCAN_CHANNEL_TYPE_PASSIVE
;
1034 scan_ch
->active_dwell
= cpu_to_le16(active_dwell
);
1035 scan_ch
->passive_dwell
= cpu_to_le16(passive_dwell
);
1036 /* Set txpower levels to defaults */
1037 scan_ch
->dsp_atten
= 110;
1038 if (band
== IEEE80211_BAND_5GHZ
)
1039 scan_ch
->tx_gain
= ((1 << 5) | (3 << 3)) | 3;
1041 scan_ch
->tx_gain
= ((1 << 5) | (5 << 3));
1044 IWL_ERR(priv
, "no valid channel found\n");
1048 static int iwl_get_channels_for_scan(struct iwl_priv
*priv
,
1049 struct ieee80211_vif
*vif
,
1050 enum ieee80211_band band
,
1051 u8 is_active
, u8 n_probes
,
1052 struct iwl_scan_channel
*scan_ch
)
1054 struct ieee80211_channel
*chan
;
1055 const struct ieee80211_supported_band
*sband
;
1056 const struct iwl_channel_info
*ch_info
;
1057 u16 passive_dwell
= 0;
1058 u16 active_dwell
= 0;
1062 sband
= iwl_get_hw_mode(priv
, band
);
1066 active_dwell
= iwl_get_active_dwell_time(priv
, band
, n_probes
);
1067 passive_dwell
= iwl_get_passive_dwell_time(priv
, band
, vif
);
1069 if (passive_dwell
<= active_dwell
)
1070 passive_dwell
= active_dwell
+ 1;
1072 for (i
= 0, added
= 0; i
< priv
->scan_request
->n_channels
; i
++) {
1073 chan
= priv
->scan_request
->channels
[i
];
1075 if (chan
->band
!= band
)
1078 channel
= chan
->hw_value
;
1079 scan_ch
->channel
= cpu_to_le16(channel
);
1081 ch_info
= iwl_get_channel_info(priv
, band
, channel
);
1082 if (!is_channel_valid(ch_info
)) {
1083 IWL_DEBUG_SCAN(priv
, "Channel %d is INVALID for this band.\n",
1088 if (!is_active
|| is_channel_passive(ch_info
) ||
1089 (chan
->flags
& IEEE80211_CHAN_PASSIVE_SCAN
))
1090 scan_ch
->type
= SCAN_CHANNEL_TYPE_PASSIVE
;
1092 scan_ch
->type
= SCAN_CHANNEL_TYPE_ACTIVE
;
1095 scan_ch
->type
|= IWL_SCAN_PROBE_MASK(n_probes
);
1097 scan_ch
->active_dwell
= cpu_to_le16(active_dwell
);
1098 scan_ch
->passive_dwell
= cpu_to_le16(passive_dwell
);
1100 /* Set txpower levels to defaults */
1101 scan_ch
->dsp_atten
= 110;
1103 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1105 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1107 if (band
== IEEE80211_BAND_5GHZ
)
1108 scan_ch
->tx_gain
= ((1 << 5) | (3 << 3)) | 3;
1110 scan_ch
->tx_gain
= ((1 << 5) | (5 << 3));
1112 IWL_DEBUG_SCAN(priv
, "Scanning ch=%d prob=0x%X [%s %d]\n",
1113 channel
, le32_to_cpu(scan_ch
->type
),
1114 (scan_ch
->type
& SCAN_CHANNEL_TYPE_ACTIVE
) ?
1115 "ACTIVE" : "PASSIVE",
1116 (scan_ch
->type
& SCAN_CHANNEL_TYPE_ACTIVE
) ?
1117 active_dwell
: passive_dwell
);
1123 IWL_DEBUG_SCAN(priv
, "total channels to scan %d\n", added
);
1127 static int iwl_fill_offch_tx(struct iwl_priv
*priv
, void *data
, size_t maxlen
)
1129 struct sk_buff
*skb
= priv
->_agn
.offchan_tx_skb
;
1131 if (skb
->len
< maxlen
)
1134 memcpy(data
, skb
->data
, maxlen
);
1139 int iwlagn_request_scan(struct iwl_priv
*priv
, struct ieee80211_vif
*vif
)
1141 struct iwl_host_cmd cmd
= {
1142 .id
= REPLY_SCAN_CMD
,
1143 .len
= { sizeof(struct iwl_scan_cmd
), },
1145 struct iwl_scan_cmd
*scan
;
1146 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1150 enum ieee80211_band band
;
1152 u8 rx_ant
= priv
->hw_params
.valid_rx_ant
;
1154 bool is_active
= false;
1157 u8 scan_tx_antennas
= priv
->hw_params
.valid_tx_ant
;
1160 lockdep_assert_held(&priv
->mutex
);
1163 ctx
= iwl_rxon_ctx_from_vif(vif
);
1165 if (!priv
->scan_cmd
) {
1166 priv
->scan_cmd
= kmalloc(sizeof(struct iwl_scan_cmd
) +
1167 IWL_MAX_SCAN_SIZE
, GFP_KERNEL
);
1168 if (!priv
->scan_cmd
) {
1169 IWL_DEBUG_SCAN(priv
,
1170 "fail to allocate memory for scan\n");
1174 scan
= priv
->scan_cmd
;
1175 memset(scan
, 0, sizeof(struct iwl_scan_cmd
) + IWL_MAX_SCAN_SIZE
);
1177 scan
->quiet_plcp_th
= IWL_PLCP_QUIET_THRESH
;
1178 scan
->quiet_time
= IWL_ACTIVE_QUIET_TIME
;
1180 if (priv
->scan_type
!= IWL_SCAN_OFFCH_TX
&&
1181 iwl_is_any_associated(priv
)) {
1184 u32 suspend_time
= 100;
1185 u32 scan_suspend_time
= 100;
1187 IWL_DEBUG_INFO(priv
, "Scanning while associated...\n");
1188 switch (priv
->scan_type
) {
1189 case IWL_SCAN_OFFCH_TX
:
1192 case IWL_SCAN_RADIO_RESET
:
1195 case IWL_SCAN_NORMAL
:
1196 interval
= vif
->bss_conf
.beacon_int
;
1200 scan
->suspend_time
= 0;
1201 scan
->max_out_time
= cpu_to_le32(200 * 1024);
1203 interval
= suspend_time
;
1205 extra
= (suspend_time
/ interval
) << 22;
1206 scan_suspend_time
= (extra
|
1207 ((suspend_time
% interval
) * 1024));
1208 scan
->suspend_time
= cpu_to_le32(scan_suspend_time
);
1209 IWL_DEBUG_SCAN(priv
, "suspend_time 0x%X beacon interval %d\n",
1210 scan_suspend_time
, interval
);
1211 } else if (priv
->scan_type
== IWL_SCAN_OFFCH_TX
) {
1212 scan
->suspend_time
= 0;
1213 scan
->max_out_time
=
1214 cpu_to_le32(1024 * priv
->_agn
.offchan_tx_timeout
);
1217 switch (priv
->scan_type
) {
1218 case IWL_SCAN_RADIO_RESET
:
1219 IWL_DEBUG_SCAN(priv
, "Start internal passive scan.\n");
1221 case IWL_SCAN_NORMAL
:
1222 if (priv
->scan_request
->n_ssids
) {
1224 IWL_DEBUG_SCAN(priv
, "Kicking off active scan\n");
1225 for (i
= 0; i
< priv
->scan_request
->n_ssids
; i
++) {
1226 /* always does wildcard anyway */
1227 if (!priv
->scan_request
->ssids
[i
].ssid_len
)
1229 scan
->direct_scan
[p
].id
= WLAN_EID_SSID
;
1230 scan
->direct_scan
[p
].len
=
1231 priv
->scan_request
->ssids
[i
].ssid_len
;
1232 memcpy(scan
->direct_scan
[p
].ssid
,
1233 priv
->scan_request
->ssids
[i
].ssid
,
1234 priv
->scan_request
->ssids
[i
].ssid_len
);
1240 IWL_DEBUG_SCAN(priv
, "Start passive scan.\n");
1242 case IWL_SCAN_OFFCH_TX
:
1243 IWL_DEBUG_SCAN(priv
, "Start offchannel TX scan.\n");
1247 scan
->tx_cmd
.tx_flags
= TX_CMD_FLG_SEQ_CTL_MSK
;
1248 scan
->tx_cmd
.sta_id
= ctx
->bcast_sta_id
;
1249 scan
->tx_cmd
.stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
1251 switch (priv
->scan_band
) {
1252 case IEEE80211_BAND_2GHZ
:
1253 scan
->flags
= RXON_FLG_BAND_24G_MSK
| RXON_FLG_AUTO_DETECT_MSK
;
1254 chan_mod
= le32_to_cpu(
1255 priv
->contexts
[IWL_RXON_CTX_BSS
].active
.flags
&
1256 RXON_FLG_CHANNEL_MODE_MSK
)
1257 >> RXON_FLG_CHANNEL_MODE_POS
;
1258 if (chan_mod
== CHANNEL_MODE_PURE_40
) {
1259 rate
= IWL_RATE_6M_PLCP
;
1261 rate
= IWL_RATE_1M_PLCP
;
1262 rate_flags
= RATE_MCS_CCK_MSK
;
1265 * Internal scans are passive, so we can indiscriminately set
1266 * the BT ignore flag on 2.4 GHz since it applies to TX only.
1268 if (priv
->cfg
->bt_params
&&
1269 priv
->cfg
->bt_params
->advanced_bt_coexist
)
1270 scan
->tx_cmd
.tx_flags
|= TX_CMD_FLG_IGNORE_BT
;
1272 case IEEE80211_BAND_5GHZ
:
1273 rate
= IWL_RATE_6M_PLCP
;
1276 IWL_WARN(priv
, "Invalid scan band\n");
1281 * If active scanning is requested but a certain channel is
1282 * marked passive, we can do active scanning if we detect
1285 * There is an issue with some firmware versions that triggers
1286 * a sysassert on a "good CRC threshold" of zero (== disabled),
1287 * on a radar channel even though this means that we should NOT
1290 * The "good CRC threshold" is the number of frames that we
1291 * need to receive during our dwell time on a channel before
1292 * sending out probes -- setting this to a huge value will
1293 * mean we never reach it, but at the same time work around
1294 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1295 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1297 * This was fixed in later versions along with some other
1298 * scan changes, and the threshold behaves as a flag in those
1301 if (priv
->new_scan_threshold_behaviour
)
1302 scan
->good_CRC_th
= is_active
? IWL_GOOD_CRC_TH_DEFAULT
:
1303 IWL_GOOD_CRC_TH_DISABLED
;
1305 scan
->good_CRC_th
= is_active
? IWL_GOOD_CRC_TH_DEFAULT
:
1306 IWL_GOOD_CRC_TH_NEVER
;
1308 band
= priv
->scan_band
;
1310 if (priv
->cfg
->scan_rx_antennas
[band
])
1311 rx_ant
= priv
->cfg
->scan_rx_antennas
[band
];
1313 if (band
== IEEE80211_BAND_2GHZ
&&
1314 priv
->cfg
->bt_params
&&
1315 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
1316 /* transmit 2.4 GHz probes only on first antenna */
1317 scan_tx_antennas
= first_antenna(scan_tx_antennas
);
1320 priv
->scan_tx_ant
[band
] = iwl_toggle_tx_ant(priv
, priv
->scan_tx_ant
[band
],
1322 rate_flags
|= iwl_ant_idx_to_flags(priv
->scan_tx_ant
[band
]);
1323 scan
->tx_cmd
.rate_n_flags
= iwl_hw_set_rate_n_flags(rate
, rate_flags
);
1325 /* In power save mode use one chain, otherwise use all chains */
1326 if (test_bit(STATUS_POWER_PMI
, &priv
->status
)) {
1327 /* rx_ant has been set to all valid chains previously */
1328 active_chains
= rx_ant
&
1329 ((u8
)(priv
->chain_noise_data
.active_chains
));
1331 active_chains
= rx_ant
;
1333 IWL_DEBUG_SCAN(priv
, "chain_noise_data.active_chains: %u\n",
1334 priv
->chain_noise_data
.active_chains
);
1336 rx_ant
= first_antenna(active_chains
);
1338 if (priv
->cfg
->bt_params
&&
1339 priv
->cfg
->bt_params
->advanced_bt_coexist
&&
1340 priv
->bt_full_concurrent
) {
1341 /* operated as 1x1 in full concurrency mode */
1342 rx_ant
= first_antenna(rx_ant
);
1345 /* MIMO is not used here, but value is required */
1346 rx_chain
|= priv
->hw_params
.valid_rx_ant
<< RXON_RX_CHAIN_VALID_POS
;
1347 rx_chain
|= rx_ant
<< RXON_RX_CHAIN_FORCE_MIMO_SEL_POS
;
1348 rx_chain
|= rx_ant
<< RXON_RX_CHAIN_FORCE_SEL_POS
;
1349 rx_chain
|= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS
;
1350 scan
->rx_chain
= cpu_to_le16(rx_chain
);
1351 switch (priv
->scan_type
) {
1352 case IWL_SCAN_NORMAL
:
1353 cmd_len
= iwl_fill_probe_req(priv
,
1354 (struct ieee80211_mgmt
*)scan
->data
,
1356 priv
->scan_request
->ie
,
1357 priv
->scan_request
->ie_len
,
1358 IWL_MAX_SCAN_SIZE
- sizeof(*scan
));
1360 case IWL_SCAN_RADIO_RESET
:
1361 /* use bcast addr, will not be transmitted but must be valid */
1362 cmd_len
= iwl_fill_probe_req(priv
,
1363 (struct ieee80211_mgmt
*)scan
->data
,
1364 iwl_bcast_addr
, NULL
, 0,
1365 IWL_MAX_SCAN_SIZE
- sizeof(*scan
));
1367 case IWL_SCAN_OFFCH_TX
:
1368 cmd_len
= iwl_fill_offch_tx(priv
, scan
->data
,
1371 - sizeof(struct iwl_scan_channel
));
1372 scan
->scan_flags
|= IWL_SCAN_FLAGS_ACTION_FRAME_TX
;
1377 scan
->tx_cmd
.len
= cpu_to_le16(cmd_len
);
1379 scan
->filter_flags
|= (RXON_FILTER_ACCEPT_GRP_MSK
|
1380 RXON_FILTER_BCON_AWARE_MSK
);
1382 switch (priv
->scan_type
) {
1383 case IWL_SCAN_RADIO_RESET
:
1384 scan
->channel_count
=
1385 iwl_get_single_channel_for_scan(priv
, vif
, band
,
1386 (void *)&scan
->data
[cmd_len
]);
1388 case IWL_SCAN_NORMAL
:
1389 scan
->channel_count
=
1390 iwl_get_channels_for_scan(priv
, vif
, band
,
1391 is_active
, n_probes
,
1392 (void *)&scan
->data
[cmd_len
]);
1394 case IWL_SCAN_OFFCH_TX
: {
1395 struct iwl_scan_channel
*scan_ch
;
1397 scan
->channel_count
= 1;
1399 scan_ch
= (void *)&scan
->data
[cmd_len
];
1400 scan_ch
->type
= SCAN_CHANNEL_TYPE_ACTIVE
;
1402 cpu_to_le16(priv
->_agn
.offchan_tx_chan
->hw_value
);
1403 scan_ch
->active_dwell
=
1404 cpu_to_le16(priv
->_agn
.offchan_tx_timeout
);
1405 scan_ch
->passive_dwell
= 0;
1407 /* Set txpower levels to defaults */
1408 scan_ch
->dsp_atten
= 110;
1410 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1412 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1414 if (priv
->_agn
.offchan_tx_chan
->band
== IEEE80211_BAND_5GHZ
)
1415 scan_ch
->tx_gain
= ((1 << 5) | (3 << 3)) | 3;
1417 scan_ch
->tx_gain
= ((1 << 5) | (5 << 3));
1422 if (scan
->channel_count
== 0) {
1423 IWL_DEBUG_SCAN(priv
, "channel count %d\n", scan
->channel_count
);
1427 cmd
.len
[0] += le16_to_cpu(scan
->tx_cmd
.len
) +
1428 scan
->channel_count
* sizeof(struct iwl_scan_channel
);
1430 cmd
.dataflags
[0] = IWL_HCMD_DFL_NOCOPY
;
1431 scan
->len
= cpu_to_le16(cmd
.len
[0]);
1433 /* set scan bit here for PAN params */
1434 set_bit(STATUS_SCAN_HW
, &priv
->status
);
1436 if (priv
->cfg
->ops
->hcmd
->set_pan_params
) {
1437 ret
= priv
->cfg
->ops
->hcmd
->set_pan_params(priv
);
1442 ret
= iwl_send_cmd_sync(priv
, &cmd
);
1444 clear_bit(STATUS_SCAN_HW
, &priv
->status
);
1445 if (priv
->cfg
->ops
->hcmd
->set_pan_params
)
1446 priv
->cfg
->ops
->hcmd
->set_pan_params(priv
);
1452 int iwlagn_manage_ibss_station(struct iwl_priv
*priv
,
1453 struct ieee80211_vif
*vif
, bool add
)
1455 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
1458 return iwlagn_add_bssid_station(priv
, vif_priv
->ctx
,
1459 vif
->bss_conf
.bssid
,
1460 &vif_priv
->ibss_bssid_sta_id
);
1461 return iwl_remove_station(priv
, vif_priv
->ibss_bssid_sta_id
,
1462 vif
->bss_conf
.bssid
);
1465 void iwl_free_tfds_in_queue(struct iwl_priv
*priv
,
1466 int sta_id
, int tid
, int freed
)
1468 lockdep_assert_held(&priv
->sta_lock
);
1470 if (priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
>= freed
)
1471 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
-= freed
;
1473 IWL_DEBUG_TX(priv
, "free more than tfds_in_queue (%u:%d)\n",
1474 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
,
1476 priv
->stations
[sta_id
].tid
[tid
].tfds_in_queue
= 0;
1480 #define IWL_FLUSH_WAIT_MS 2000
1482 int iwlagn_wait_tx_queue_empty(struct iwl_priv
*priv
)
1484 struct iwl_tx_queue
*txq
;
1485 struct iwl_queue
*q
;
1487 unsigned long now
= jiffies
;
1490 /* waiting for all the tx frames complete might take a while */
1491 for (cnt
= 0; cnt
< priv
->hw_params
.max_txq_num
; cnt
++) {
1492 if (cnt
== priv
->cmd_queue
)
1494 txq
= &priv
->txq
[cnt
];
1496 while (q
->read_ptr
!= q
->write_ptr
&& !time_after(jiffies
,
1497 now
+ msecs_to_jiffies(IWL_FLUSH_WAIT_MS
)))
1500 if (q
->read_ptr
!= q
->write_ptr
) {
1501 IWL_ERR(priv
, "fail to flush all tx fifo queues\n");
1509 #define IWL_TX_QUEUE_MSK 0xfffff
1512 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
1515 * 1. acquire mutex before calling
1516 * 2. make sure rf is on and not in exit state
1518 int iwlagn_txfifo_flush(struct iwl_priv
*priv
, u16 flush_control
)
1520 struct iwl_txfifo_flush_cmd flush_cmd
;
1521 struct iwl_host_cmd cmd
= {
1522 .id
= REPLY_TXFIFO_FLUSH
,
1523 .len
= { sizeof(struct iwl_txfifo_flush_cmd
), },
1525 .data
= { &flush_cmd
, },
1530 memset(&flush_cmd
, 0, sizeof(flush_cmd
));
1531 flush_cmd
.fifo_control
= IWL_TX_FIFO_VO_MSK
| IWL_TX_FIFO_VI_MSK
|
1532 IWL_TX_FIFO_BE_MSK
| IWL_TX_FIFO_BK_MSK
;
1533 if (priv
->cfg
->sku
& IWL_SKU_N
)
1534 flush_cmd
.fifo_control
|= IWL_AGG_TX_QUEUE_MSK
;
1536 IWL_DEBUG_INFO(priv
, "fifo queue control: 0X%x\n",
1537 flush_cmd
.fifo_control
);
1538 flush_cmd
.flush_control
= cpu_to_le16(flush_control
);
1540 return iwl_send_cmd(priv
, &cmd
);
1543 void iwlagn_dev_txfifo_flush(struct iwl_priv
*priv
, u16 flush_control
)
1545 mutex_lock(&priv
->mutex
);
1546 ieee80211_stop_queues(priv
->hw
);
1547 if (priv
->cfg
->ops
->lib
->txfifo_flush(priv
, IWL_DROP_ALL
)) {
1548 IWL_ERR(priv
, "flush request fail\n");
1551 IWL_DEBUG_INFO(priv
, "wait transmit/flush all frames\n");
1552 iwlagn_wait_tx_queue_empty(priv
);
1554 ieee80211_wake_queues(priv
->hw
);
1555 mutex_unlock(&priv
->mutex
);
1562 * Macros to access the lookup table.
1564 * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
1565 * wifi_prio, wifi_txrx and wifi_sh_ant_req.
1567 * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
1569 * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
1570 * one after another in 32-bit registers, and "registers" 0 through 7 contain
1571 * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
1573 * These macros encode that format.
1575 #define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
1576 wifi_txrx, wifi_sh_ant_req) \
1577 (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
1578 (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
1580 #define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
1581 lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
1582 #define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1583 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1584 (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
1585 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1587 #define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1588 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1589 LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
1590 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1592 #define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
1593 wifi_req, wifi_prio, wifi_txrx, \
1595 LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
1596 bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
1599 #define LUT_WLAN_KILL_OP(lut, op, val) \
1600 lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
1601 #define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1602 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1603 (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1604 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
1605 #define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1606 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1607 LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1608 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1609 #define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1610 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1611 LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1612 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1614 #define LUT_ANT_SWITCH_OP(lut, op, val) \
1615 lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
1616 #define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1617 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1618 (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1619 wifi_req, wifi_prio, wifi_txrx, \
1621 #define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1622 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1623 LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1624 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1625 #define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
1626 wifi_prio, wifi_txrx, wifi_sh_ant_req) \
1627 LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
1628 wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
1630 static const __le32 iwlagn_def_3w_lookup
[12] = {
1631 cpu_to_le32(0xaaaaaaaa),
1632 cpu_to_le32(0xaaaaaaaa),
1633 cpu_to_le32(0xaeaaaaaa),
1634 cpu_to_le32(0xaaaaaaaa),
1635 cpu_to_le32(0xcc00ff28),
1636 cpu_to_le32(0x0000aaaa),
1637 cpu_to_le32(0xcc00aaaa),
1638 cpu_to_le32(0x0000aaaa),
1639 cpu_to_le32(0xc0004000),
1640 cpu_to_le32(0x00004000),
1641 cpu_to_le32(0xf0005000),
1642 cpu_to_le32(0xf0005000),
1645 static const __le32 iwlagn_concurrent_lookup
[12] = {
1646 cpu_to_le32(0xaaaaaaaa),
1647 cpu_to_le32(0xaaaaaaaa),
1648 cpu_to_le32(0xaaaaaaaa),
1649 cpu_to_le32(0xaaaaaaaa),
1650 cpu_to_le32(0xaaaaaaaa),
1651 cpu_to_le32(0xaaaaaaaa),
1652 cpu_to_le32(0xaaaaaaaa),
1653 cpu_to_le32(0xaaaaaaaa),
1654 cpu_to_le32(0x00000000),
1655 cpu_to_le32(0x00000000),
1656 cpu_to_le32(0x00000000),
1657 cpu_to_le32(0x00000000),
1660 void iwlagn_send_advance_bt_config(struct iwl_priv
*priv
)
1662 struct iwl_basic_bt_cmd basic
= {
1663 .max_kill
= IWLAGN_BT_MAX_KILL_DEFAULT
,
1664 .bt3_timer_t7_value
= IWLAGN_BT3_T7_DEFAULT
,
1665 .bt3_prio_sample_time
= IWLAGN_BT3_PRIO_SAMPLE_DEFAULT
,
1666 .bt3_timer_t2_value
= IWLAGN_BT3_T2_DEFAULT
,
1668 struct iwl6000_bt_cmd bt_cmd_6000
;
1669 struct iwl2000_bt_cmd bt_cmd_2000
;
1672 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup
) !=
1673 sizeof(basic
.bt3_lookup_table
));
1675 if (priv
->cfg
->bt_params
) {
1676 if (priv
->cfg
->bt_params
->bt_session_2
) {
1677 bt_cmd_2000
.prio_boost
= cpu_to_le32(
1678 priv
->cfg
->bt_params
->bt_prio_boost
);
1679 bt_cmd_2000
.tx_prio_boost
= 0;
1680 bt_cmd_2000
.rx_prio_boost
= 0;
1682 bt_cmd_6000
.prio_boost
=
1683 priv
->cfg
->bt_params
->bt_prio_boost
;
1684 bt_cmd_6000
.tx_prio_boost
= 0;
1685 bt_cmd_6000
.rx_prio_boost
= 0;
1688 IWL_ERR(priv
, "failed to construct BT Coex Config\n");
1692 basic
.kill_ack_mask
= priv
->kill_ack_mask
;
1693 basic
.kill_cts_mask
= priv
->kill_cts_mask
;
1694 basic
.valid
= priv
->bt_valid
;
1697 * Configure BT coex mode to "no coexistence" when the
1698 * user disabled BT coexistence, we have no interface
1699 * (might be in monitor mode), or the interface is in
1700 * IBSS mode (no proper uCode support for coex then).
1702 if (!bt_coex_active
|| priv
->iw_mode
== NL80211_IFTYPE_ADHOC
) {
1703 basic
.flags
= IWLAGN_BT_FLAG_COEX_MODE_DISABLED
;
1705 basic
.flags
= IWLAGN_BT_FLAG_COEX_MODE_3W
<<
1706 IWLAGN_BT_FLAG_COEX_MODE_SHIFT
;
1707 if (priv
->cfg
->bt_params
&&
1708 priv
->cfg
->bt_params
->bt_sco_disable
)
1709 basic
.flags
|= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE
;
1711 if (priv
->bt_ch_announce
)
1712 basic
.flags
|= IWLAGN_BT_FLAG_CHANNEL_INHIBITION
;
1713 IWL_DEBUG_INFO(priv
, "BT coex flag: 0X%x\n", basic
.flags
);
1715 priv
->bt_enable_flag
= basic
.flags
;
1716 if (priv
->bt_full_concurrent
)
1717 memcpy(basic
.bt3_lookup_table
, iwlagn_concurrent_lookup
,
1718 sizeof(iwlagn_concurrent_lookup
));
1720 memcpy(basic
.bt3_lookup_table
, iwlagn_def_3w_lookup
,
1721 sizeof(iwlagn_def_3w_lookup
));
1723 IWL_DEBUG_INFO(priv
, "BT coex %s in %s mode\n",
1724 basic
.flags
? "active" : "disabled",
1725 priv
->bt_full_concurrent
?
1726 "full concurrency" : "3-wire");
1728 if (priv
->cfg
->bt_params
->bt_session_2
) {
1729 memcpy(&bt_cmd_2000
.basic
, &basic
,
1731 ret
= iwl_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1732 sizeof(bt_cmd_2000
), &bt_cmd_2000
);
1734 memcpy(&bt_cmd_6000
.basic
, &basic
,
1736 ret
= iwl_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
1737 sizeof(bt_cmd_6000
), &bt_cmd_6000
);
1740 IWL_ERR(priv
, "failed to send BT Coex Config\n");
1744 static void iwlagn_bt_traffic_change_work(struct work_struct
*work
)
1746 struct iwl_priv
*priv
=
1747 container_of(work
, struct iwl_priv
, bt_traffic_change_work
);
1748 struct iwl_rxon_context
*ctx
;
1749 int smps_request
= -1;
1751 if (priv
->bt_enable_flag
== IWLAGN_BT_FLAG_COEX_MODE_DISABLED
) {
1752 /* bt coex disabled */
1757 * Note: bt_traffic_load can be overridden by scan complete and
1758 * coex profile notifications. Ignore that since only bad consequence
1759 * can be not matching debug print with actual state.
1761 IWL_DEBUG_INFO(priv
, "BT traffic load changes: %d\n",
1762 priv
->bt_traffic_load
);
1764 switch (priv
->bt_traffic_load
) {
1765 case IWL_BT_COEX_TRAFFIC_LOAD_NONE
:
1766 if (priv
->bt_status
)
1767 smps_request
= IEEE80211_SMPS_DYNAMIC
;
1769 smps_request
= IEEE80211_SMPS_AUTOMATIC
;
1771 case IWL_BT_COEX_TRAFFIC_LOAD_LOW
:
1772 smps_request
= IEEE80211_SMPS_DYNAMIC
;
1774 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH
:
1775 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS
:
1776 smps_request
= IEEE80211_SMPS_STATIC
;
1779 IWL_ERR(priv
, "Invalid BT traffic load: %d\n",
1780 priv
->bt_traffic_load
);
1784 mutex_lock(&priv
->mutex
);
1787 * We can not send command to firmware while scanning. When the scan
1788 * complete we will schedule this work again. We do check with mutex
1789 * locked to prevent new scan request to arrive. We do not check
1790 * STATUS_SCANNING to avoid race when queue_work two times from
1791 * different notifications, but quit and not perform any work at all.
1793 if (test_bit(STATUS_SCAN_HW
, &priv
->status
))
1796 if (priv
->cfg
->ops
->lib
->update_chain_flags
)
1797 priv
->cfg
->ops
->lib
->update_chain_flags(priv
);
1799 if (smps_request
!= -1) {
1800 for_each_context(priv
, ctx
) {
1801 if (ctx
->vif
&& ctx
->vif
->type
== NL80211_IFTYPE_STATION
)
1802 ieee80211_request_smps(ctx
->vif
, smps_request
);
1806 mutex_unlock(&priv
->mutex
);
1809 static void iwlagn_print_uartmsg(struct iwl_priv
*priv
,
1810 struct iwl_bt_uart_msg
*uart_msg
)
1812 IWL_DEBUG_NOTIF(priv
, "Message Type = 0x%X, SSN = 0x%X, "
1813 "Update Req = 0x%X",
1814 (BT_UART_MSG_FRAME1MSGTYPE_MSK
& uart_msg
->frame1
) >>
1815 BT_UART_MSG_FRAME1MSGTYPE_POS
,
1816 (BT_UART_MSG_FRAME1SSN_MSK
& uart_msg
->frame1
) >>
1817 BT_UART_MSG_FRAME1SSN_POS
,
1818 (BT_UART_MSG_FRAME1UPDATEREQ_MSK
& uart_msg
->frame1
) >>
1819 BT_UART_MSG_FRAME1UPDATEREQ_POS
);
1821 IWL_DEBUG_NOTIF(priv
, "Open connections = 0x%X, Traffic load = 0x%X, "
1822 "Chl_SeqN = 0x%X, In band = 0x%X",
1823 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK
& uart_msg
->frame2
) >>
1824 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS
,
1825 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK
& uart_msg
->frame2
) >>
1826 BT_UART_MSG_FRAME2TRAFFICLOAD_POS
,
1827 (BT_UART_MSG_FRAME2CHLSEQN_MSK
& uart_msg
->frame2
) >>
1828 BT_UART_MSG_FRAME2CHLSEQN_POS
,
1829 (BT_UART_MSG_FRAME2INBAND_MSK
& uart_msg
->frame2
) >>
1830 BT_UART_MSG_FRAME2INBAND_POS
);
1832 IWL_DEBUG_NOTIF(priv
, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
1833 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
1834 (BT_UART_MSG_FRAME3SCOESCO_MSK
& uart_msg
->frame3
) >>
1835 BT_UART_MSG_FRAME3SCOESCO_POS
,
1836 (BT_UART_MSG_FRAME3SNIFF_MSK
& uart_msg
->frame3
) >>
1837 BT_UART_MSG_FRAME3SNIFF_POS
,
1838 (BT_UART_MSG_FRAME3A2DP_MSK
& uart_msg
->frame3
) >>
1839 BT_UART_MSG_FRAME3A2DP_POS
,
1840 (BT_UART_MSG_FRAME3ACL_MSK
& uart_msg
->frame3
) >>
1841 BT_UART_MSG_FRAME3ACL_POS
,
1842 (BT_UART_MSG_FRAME3MASTER_MSK
& uart_msg
->frame3
) >>
1843 BT_UART_MSG_FRAME3MASTER_POS
,
1844 (BT_UART_MSG_FRAME3OBEX_MSK
& uart_msg
->frame3
) >>
1845 BT_UART_MSG_FRAME3OBEX_POS
);
1847 IWL_DEBUG_NOTIF(priv
, "Idle duration = 0x%X",
1848 (BT_UART_MSG_FRAME4IDLEDURATION_MSK
& uart_msg
->frame4
) >>
1849 BT_UART_MSG_FRAME4IDLEDURATION_POS
);
1851 IWL_DEBUG_NOTIF(priv
, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
1852 "eSCO Retransmissions = 0x%X",
1853 (BT_UART_MSG_FRAME5TXACTIVITY_MSK
& uart_msg
->frame5
) >>
1854 BT_UART_MSG_FRAME5TXACTIVITY_POS
,
1855 (BT_UART_MSG_FRAME5RXACTIVITY_MSK
& uart_msg
->frame5
) >>
1856 BT_UART_MSG_FRAME5RXACTIVITY_POS
,
1857 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK
& uart_msg
->frame5
) >>
1858 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS
);
1860 IWL_DEBUG_NOTIF(priv
, "Sniff Interval = 0x%X, Discoverable = 0x%X",
1861 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK
& uart_msg
->frame6
) >>
1862 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS
,
1863 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK
& uart_msg
->frame6
) >>
1864 BT_UART_MSG_FRAME6DISCOVERABLE_POS
);
1866 IWL_DEBUG_NOTIF(priv
, "Sniff Activity = 0x%X, Page = "
1867 "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
1868 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK
& uart_msg
->frame7
) >>
1869 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS
,
1870 (BT_UART_MSG_FRAME7PAGE_MSK
& uart_msg
->frame7
) >>
1871 BT_UART_MSG_FRAME7PAGE_POS
,
1872 (BT_UART_MSG_FRAME7INQUIRY_MSK
& uart_msg
->frame7
) >>
1873 BT_UART_MSG_FRAME7INQUIRY_POS
,
1874 (BT_UART_MSG_FRAME7CONNECTABLE_MSK
& uart_msg
->frame7
) >>
1875 BT_UART_MSG_FRAME7CONNECTABLE_POS
);
1878 static void iwlagn_set_kill_msk(struct iwl_priv
*priv
,
1879 struct iwl_bt_uart_msg
*uart_msg
)
1882 static const __le32 bt_kill_ack_msg
[2] = {
1883 IWLAGN_BT_KILL_ACK_MASK_DEFAULT
,
1884 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO
};
1885 static const __le32 bt_kill_cts_msg
[2] = {
1886 IWLAGN_BT_KILL_CTS_MASK_DEFAULT
,
1887 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO
};
1889 kill_msk
= (BT_UART_MSG_FRAME3SCOESCO_MSK
& uart_msg
->frame3
)
1891 if (priv
->kill_ack_mask
!= bt_kill_ack_msg
[kill_msk
] ||
1892 priv
->kill_cts_mask
!= bt_kill_cts_msg
[kill_msk
]) {
1893 priv
->bt_valid
|= IWLAGN_BT_VALID_KILL_ACK_MASK
;
1894 priv
->kill_ack_mask
= bt_kill_ack_msg
[kill_msk
];
1895 priv
->bt_valid
|= IWLAGN_BT_VALID_KILL_CTS_MASK
;
1896 priv
->kill_cts_mask
= bt_kill_cts_msg
[kill_msk
];
1898 /* schedule to send runtime bt_config */
1899 queue_work(priv
->workqueue
, &priv
->bt_runtime_config
);
1903 void iwlagn_bt_coex_profile_notif(struct iwl_priv
*priv
,
1904 struct iwl_rx_mem_buffer
*rxb
)
1906 unsigned long flags
;
1907 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1908 struct iwl_bt_coex_profile_notif
*coex
= &pkt
->u
.bt_coex_profile_notif
;
1909 struct iwl_bt_uart_msg
*uart_msg
= &coex
->last_bt_uart_msg
;
1911 if (priv
->bt_enable_flag
== IWLAGN_BT_FLAG_COEX_MODE_DISABLED
) {
1912 /* bt coex disabled */
1916 IWL_DEBUG_NOTIF(priv
, "BT Coex notification:\n");
1917 IWL_DEBUG_NOTIF(priv
, " status: %d\n", coex
->bt_status
);
1918 IWL_DEBUG_NOTIF(priv
, " traffic load: %d\n", coex
->bt_traffic_load
);
1919 IWL_DEBUG_NOTIF(priv
, " CI compliance: %d\n",
1920 coex
->bt_ci_compliance
);
1921 iwlagn_print_uartmsg(priv
, uart_msg
);
1923 priv
->last_bt_traffic_load
= priv
->bt_traffic_load
;
1924 if (priv
->iw_mode
!= NL80211_IFTYPE_ADHOC
) {
1925 if (priv
->bt_status
!= coex
->bt_status
||
1926 priv
->last_bt_traffic_load
!= coex
->bt_traffic_load
) {
1927 if (coex
->bt_status
) {
1929 if (!priv
->bt_ch_announce
)
1930 priv
->bt_traffic_load
=
1931 IWL_BT_COEX_TRAFFIC_LOAD_HIGH
;
1933 priv
->bt_traffic_load
=
1934 coex
->bt_traffic_load
;
1937 priv
->bt_traffic_load
=
1938 IWL_BT_COEX_TRAFFIC_LOAD_NONE
;
1940 priv
->bt_status
= coex
->bt_status
;
1941 queue_work(priv
->workqueue
,
1942 &priv
->bt_traffic_change_work
);
1946 iwlagn_set_kill_msk(priv
, uart_msg
);
1948 /* FIXME: based on notification, adjust the prio_boost */
1950 spin_lock_irqsave(&priv
->lock
, flags
);
1951 priv
->bt_ci_compliance
= coex
->bt_ci_compliance
;
1952 spin_unlock_irqrestore(&priv
->lock
, flags
);
1955 void iwlagn_bt_rx_handler_setup(struct iwl_priv
*priv
)
1957 iwlagn_rx_handler_setup(priv
);
1958 priv
->rx_handlers
[REPLY_BT_COEX_PROFILE_NOTIF
] =
1959 iwlagn_bt_coex_profile_notif
;
1962 void iwlagn_bt_setup_deferred_work(struct iwl_priv
*priv
)
1964 iwlagn_setup_deferred_work(priv
);
1966 INIT_WORK(&priv
->bt_traffic_change_work
,
1967 iwlagn_bt_traffic_change_work
);
1970 void iwlagn_bt_cancel_deferred_work(struct iwl_priv
*priv
)
1972 cancel_work_sync(&priv
->bt_traffic_change_work
);
1975 static bool is_single_rx_stream(struct iwl_priv
*priv
)
1977 return priv
->current_ht_config
.smps
== IEEE80211_SMPS_STATIC
||
1978 priv
->current_ht_config
.single_chain_sufficient
;
1981 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
1982 #define IWL_NUM_RX_CHAINS_SINGLE 2
1983 #define IWL_NUM_IDLE_CHAINS_DUAL 2
1984 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
1987 * Determine how many receiver/antenna chains to use.
1989 * More provides better reception via diversity. Fewer saves power
1990 * at the expense of throughput, but only when not in powersave to
1993 * MIMO (dual stream) requires at least 2, but works better with 3.
1994 * This does not determine *which* chains to use, just how many.
1996 static int iwl_get_active_rx_chain_count(struct iwl_priv
*priv
)
1998 if (priv
->cfg
->bt_params
&&
1999 priv
->cfg
->bt_params
->advanced_bt_coexist
&&
2000 (priv
->bt_full_concurrent
||
2001 priv
->bt_traffic_load
>= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
)) {
2003 * only use chain 'A' in bt high traffic load or
2004 * full concurrency mode
2006 return IWL_NUM_RX_CHAINS_SINGLE
;
2008 /* # of Rx chains to use when expecting MIMO. */
2009 if (is_single_rx_stream(priv
))
2010 return IWL_NUM_RX_CHAINS_SINGLE
;
2012 return IWL_NUM_RX_CHAINS_MULTIPLE
;
2016 * When we are in power saving mode, unless device support spatial
2017 * multiplexing power save, use the active count for rx chain count.
2019 static int iwl_get_idle_rx_chain_count(struct iwl_priv
*priv
, int active_cnt
)
2021 /* # Rx chains when idling, depending on SMPS mode */
2022 switch (priv
->current_ht_config
.smps
) {
2023 case IEEE80211_SMPS_STATIC
:
2024 case IEEE80211_SMPS_DYNAMIC
:
2025 return IWL_NUM_IDLE_CHAINS_SINGLE
;
2026 case IEEE80211_SMPS_OFF
:
2029 WARN(1, "invalid SMPS mode %d",
2030 priv
->current_ht_config
.smps
);
2035 /* up to 4 chains */
2036 static u8
iwl_count_chain_bitmap(u32 chain_bitmap
)
2039 res
= (chain_bitmap
& BIT(0)) >> 0;
2040 res
+= (chain_bitmap
& BIT(1)) >> 1;
2041 res
+= (chain_bitmap
& BIT(2)) >> 2;
2042 res
+= (chain_bitmap
& BIT(3)) >> 3;
2047 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
2049 * Selects how many and which Rx receivers/antennas/chains to use.
2050 * This should not be used for scan command ... it puts data in wrong place.
2052 void iwlagn_set_rxon_chain(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
2054 bool is_single
= is_single_rx_stream(priv
);
2055 bool is_cam
= !test_bit(STATUS_POWER_PMI
, &priv
->status
);
2056 u8 idle_rx_cnt
, active_rx_cnt
, valid_rx_cnt
;
2060 /* Tell uCode which antennas are actually connected.
2061 * Before first association, we assume all antennas are connected.
2062 * Just after first association, iwl_chain_noise_calibration()
2063 * checks which antennas actually *are* connected. */
2064 if (priv
->chain_noise_data
.active_chains
)
2065 active_chains
= priv
->chain_noise_data
.active_chains
;
2067 active_chains
= priv
->hw_params
.valid_rx_ant
;
2069 if (priv
->cfg
->bt_params
&&
2070 priv
->cfg
->bt_params
->advanced_bt_coexist
&&
2071 (priv
->bt_full_concurrent
||
2072 priv
->bt_traffic_load
>= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
)) {
2074 * only use chain 'A' in bt high traffic load or
2075 * full concurrency mode
2077 active_chains
= first_antenna(active_chains
);
2080 rx_chain
= active_chains
<< RXON_RX_CHAIN_VALID_POS
;
2082 /* How many receivers should we use? */
2083 active_rx_cnt
= iwl_get_active_rx_chain_count(priv
);
2084 idle_rx_cnt
= iwl_get_idle_rx_chain_count(priv
, active_rx_cnt
);
2087 /* correct rx chain count according hw settings
2088 * and chain noise calibration
2090 valid_rx_cnt
= iwl_count_chain_bitmap(active_chains
);
2091 if (valid_rx_cnt
< active_rx_cnt
)
2092 active_rx_cnt
= valid_rx_cnt
;
2094 if (valid_rx_cnt
< idle_rx_cnt
)
2095 idle_rx_cnt
= valid_rx_cnt
;
2097 rx_chain
|= active_rx_cnt
<< RXON_RX_CHAIN_MIMO_CNT_POS
;
2098 rx_chain
|= idle_rx_cnt
<< RXON_RX_CHAIN_CNT_POS
;
2100 ctx
->staging
.rx_chain
= cpu_to_le16(rx_chain
);
2102 if (!is_single
&& (active_rx_cnt
>= IWL_NUM_RX_CHAINS_SINGLE
) && is_cam
)
2103 ctx
->staging
.rx_chain
|= RXON_RX_CHAIN_MIMO_FORCE_MSK
;
2105 ctx
->staging
.rx_chain
&= ~RXON_RX_CHAIN_MIMO_FORCE_MSK
;
2107 IWL_DEBUG_ASSOC(priv
, "rx_chain=0x%X active=%d idle=%d\n",
2108 ctx
->staging
.rx_chain
,
2109 active_rx_cnt
, idle_rx_cnt
);
2111 WARN_ON(active_rx_cnt
== 0 || idle_rx_cnt
== 0 ||
2112 active_rx_cnt
< idle_rx_cnt
);
2115 u8
iwl_toggle_tx_ant(struct iwl_priv
*priv
, u8 ant
, u8 valid
)
2120 if (priv
->band
== IEEE80211_BAND_2GHZ
&&
2121 priv
->bt_traffic_load
>= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
)
2124 for (i
= 0; i
< RATE_ANT_NUM
- 1; i
++) {
2125 ind
= (ind
+ 1) < RATE_ANT_NUM
? ind
+ 1 : 0;
2126 if (valid
& BIT(ind
))
2132 static const char *get_csr_string(int cmd
)
2135 IWL_CMD(CSR_HW_IF_CONFIG_REG
);
2136 IWL_CMD(CSR_INT_COALESCING
);
2138 IWL_CMD(CSR_INT_MASK
);
2139 IWL_CMD(CSR_FH_INT_STATUS
);
2140 IWL_CMD(CSR_GPIO_IN
);
2142 IWL_CMD(CSR_GP_CNTRL
);
2143 IWL_CMD(CSR_HW_REV
);
2144 IWL_CMD(CSR_EEPROM_REG
);
2145 IWL_CMD(CSR_EEPROM_GP
);
2146 IWL_CMD(CSR_OTP_GP_REG
);
2147 IWL_CMD(CSR_GIO_REG
);
2148 IWL_CMD(CSR_GP_UCODE_REG
);
2149 IWL_CMD(CSR_GP_DRIVER_REG
);
2150 IWL_CMD(CSR_UCODE_DRV_GP1
);
2151 IWL_CMD(CSR_UCODE_DRV_GP2
);
2152 IWL_CMD(CSR_LED_REG
);
2153 IWL_CMD(CSR_DRAM_INT_TBL_REG
);
2154 IWL_CMD(CSR_GIO_CHICKEN_BITS
);
2155 IWL_CMD(CSR_ANA_PLL_CFG
);
2156 IWL_CMD(CSR_HW_REV_WA_REG
);
2157 IWL_CMD(CSR_DBG_HPET_MEM_REG
);
2163 void iwl_dump_csr(struct iwl_priv
*priv
)
2166 static const u32 csr_tbl
[] = {
2167 CSR_HW_IF_CONFIG_REG
,
2185 CSR_DRAM_INT_TBL_REG
,
2186 CSR_GIO_CHICKEN_BITS
,
2189 CSR_DBG_HPET_MEM_REG
2191 IWL_ERR(priv
, "CSR values:\n");
2192 IWL_ERR(priv
, "(2nd byte of CSR_INT_COALESCING is "
2193 "CSR_INT_PERIODIC_REG)\n");
2194 for (i
= 0; i
< ARRAY_SIZE(csr_tbl
); i
++) {
2195 IWL_ERR(priv
, " %25s: 0X%08x\n",
2196 get_csr_string(csr_tbl
[i
]),
2197 iwl_read32(priv
, csr_tbl
[i
]));
2201 static const char *get_fh_string(int cmd
)
2204 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG
);
2205 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG
);
2206 IWL_CMD(FH_RSCSR_CHNL0_WPTR
);
2207 IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG
);
2208 IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG
);
2209 IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG
);
2210 IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
);
2211 IWL_CMD(FH_TSSR_TX_STATUS_REG
);
2212 IWL_CMD(FH_TSSR_TX_ERROR_REG
);
2218 int iwl_dump_fh(struct iwl_priv
*priv
, char **buf
, bool display
)
2221 #ifdef CONFIG_IWLWIFI_DEBUG
2225 static const u32 fh_tbl
[] = {
2226 FH_RSCSR_CHNL0_STTS_WPTR_REG
,
2227 FH_RSCSR_CHNL0_RBDCB_BASE_REG
,
2228 FH_RSCSR_CHNL0_WPTR
,
2229 FH_MEM_RCSR_CHNL0_CONFIG_REG
,
2230 FH_MEM_RSSR_SHARED_CTRL_REG
,
2231 FH_MEM_RSSR_RX_STATUS_REG
,
2232 FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
,
2233 FH_TSSR_TX_STATUS_REG
,
2234 FH_TSSR_TX_ERROR_REG
2236 #ifdef CONFIG_IWLWIFI_DEBUG
2238 bufsz
= ARRAY_SIZE(fh_tbl
) * 48 + 40;
2239 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
2242 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
2243 "FH register values:\n");
2244 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
2245 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
2247 get_fh_string(fh_tbl
[i
]),
2248 iwl_read_direct32(priv
, fh_tbl
[i
]));
2253 IWL_ERR(priv
, "FH register values:\n");
2254 for (i
= 0; i
< ARRAY_SIZE(fh_tbl
); i
++) {
2255 IWL_ERR(priv
, " %34s: 0X%08x\n",
2256 get_fh_string(fh_tbl
[i
]),
2257 iwl_read_direct32(priv
, fh_tbl
[i
]));
2262 /* notification wait support */
2263 void iwlagn_init_notification_wait(struct iwl_priv
*priv
,
2264 struct iwl_notification_wait
*wait_entry
,
2266 void (*fn
)(struct iwl_priv
*priv
,
2267 struct iwl_rx_packet
*pkt
,
2271 wait_entry
->fn
= fn
;
2272 wait_entry
->fn_data
= fn_data
;
2273 wait_entry
->cmd
= cmd
;
2274 wait_entry
->triggered
= false;
2275 wait_entry
->aborted
= false;
2277 spin_lock_bh(&priv
->_agn
.notif_wait_lock
);
2278 list_add(&wait_entry
->list
, &priv
->_agn
.notif_waits
);
2279 spin_unlock_bh(&priv
->_agn
.notif_wait_lock
);
2282 int iwlagn_wait_notification(struct iwl_priv
*priv
,
2283 struct iwl_notification_wait
*wait_entry
,
2284 unsigned long timeout
)
2288 ret
= wait_event_timeout(priv
->_agn
.notif_waitq
,
2289 wait_entry
->triggered
|| wait_entry
->aborted
,
2292 spin_lock_bh(&priv
->_agn
.notif_wait_lock
);
2293 list_del(&wait_entry
->list
);
2294 spin_unlock_bh(&priv
->_agn
.notif_wait_lock
);
2296 if (wait_entry
->aborted
)
2299 /* return value is always >= 0 */
2305 void iwlagn_remove_notification(struct iwl_priv
*priv
,
2306 struct iwl_notification_wait
*wait_entry
)
2308 spin_lock_bh(&priv
->_agn
.notif_wait_lock
);
2309 list_del(&wait_entry
->list
);
2310 spin_unlock_bh(&priv
->_agn
.notif_wait_lock
);
2313 int iwlagn_start_device(struct iwl_priv
*priv
)
2317 if (iwl_prepare_card_hw(priv
)) {
2318 IWL_WARN(priv
, "Exit HW not ready\n");
2322 /* If platform's RF_KILL switch is NOT set to KILL */
2323 if (iwl_read32(priv
, CSR_GP_CNTRL
) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
)
2324 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2326 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
2328 if (iwl_is_rfkill(priv
)) {
2329 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
, true);
2330 iwl_enable_interrupts(priv
);
2334 iwl_write32(priv
, CSR_INT
, 0xFFFFFFFF);
2336 ret
= iwlagn_hw_nic_init(priv
);
2338 IWL_ERR(priv
, "Unable to init nic\n");
2342 /* make sure rfkill handshake bits are cleared */
2343 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
2344 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
,
2345 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
);
2347 /* clear (again), then enable host interrupts */
2348 iwl_write32(priv
, CSR_INT
, 0xFFFFFFFF);
2349 iwl_enable_interrupts(priv
);
2351 /* really make sure rfkill handshake bits are cleared */
2352 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
2353 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
, CSR_UCODE_SW_BIT_RFKILL
);
2358 void iwlagn_stop_device(struct iwl_priv
*priv
)
2360 unsigned long flags
;
2362 /* stop and reset the on-board processor */
2363 iwl_write32(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
2365 /* tell the device to stop sending interrupts */
2366 spin_lock_irqsave(&priv
->lock
, flags
);
2367 iwl_disable_interrupts(priv
);
2368 spin_unlock_irqrestore(&priv
->lock
, flags
);
2369 iwl_synchronize_irq(priv
);
2371 /* device going down, Stop using ICT table */
2372 iwl_disable_ict(priv
);
2375 * If a HW restart happens during firmware loading,
2376 * then the firmware loading might call this function
2377 * and later it might be called again due to the
2378 * restart. So don't process again if the device is
2381 if (test_bit(STATUS_DEVICE_ENABLED
, &priv
->status
)) {
2382 iwlagn_txq_ctx_stop(priv
);
2383 iwlagn_rxq_stop(priv
);
2385 /* Power-down device's busmaster DMA clocks */
2386 iwl_write_prph(priv
, APMG_CLK_DIS_REG
, APMG_CLK_VAL_DMA_CLK_RQT
);
2390 /* Make sure (redundant) we've released our request to stay awake */
2391 iwl_clear_bit(priv
, CSR_GP_CNTRL
, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
);
2393 /* Stop the device, and put it in low power state */