1 /******************************************************************************
3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/kernel.h>
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/delay.h>
38 #include <linux/sched.h>
39 #include <linux/skbuff.h>
40 #include <linux/netdevice.h>
41 #include <linux/wireless.h>
42 #include <linux/firmware.h>
43 #include <linux/etherdevice.h>
44 #include <linux/if_arp.h>
46 #include <net/mac80211.h>
48 #include <asm/div64.h>
50 #include "iwl-eeprom.h"
54 #include "iwl-helpers.h"
56 #include "iwl-agn-calib.h"
61 /******************************************************************************
65 ******************************************************************************/
68 * module name, copyright, version, etc.
70 #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
72 #ifdef CONFIG_IWLWIFI_DEBUG
78 #define DRV_VERSION IWLWIFI_VERSION VD
81 MODULE_DESCRIPTION(DRV_DESCRIPTION
);
82 MODULE_VERSION(DRV_VERSION
);
83 MODULE_AUTHOR(DRV_COPYRIGHT
" " DRV_AUTHOR
);
84 MODULE_LICENSE("GPL");
86 static int iwlagn_ant_coupling
;
87 static bool iwlagn_bt_ch_announce
= 1;
89 void iwl_update_chain_flags(struct iwl_priv
*priv
)
91 struct iwl_rxon_context
*ctx
;
93 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
) {
94 for_each_context(priv
, ctx
) {
95 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
96 if (ctx
->active
.rx_chain
!= ctx
->staging
.rx_chain
)
97 iwlagn_commit_rxon(priv
, ctx
);
102 /* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
103 static void iwl_set_beacon_tim(struct iwl_priv
*priv
,
104 struct iwl_tx_beacon_cmd
*tx_beacon_cmd
,
105 u8
*beacon
, u32 frame_size
)
108 struct ieee80211_mgmt
*mgmt
= (struct ieee80211_mgmt
*)beacon
;
111 * The index is relative to frame start but we start looking at the
112 * variable-length part of the beacon.
114 tim_idx
= mgmt
->u
.beacon
.variable
- beacon
;
116 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
117 while ((tim_idx
< (frame_size
- 2)) &&
118 (beacon
[tim_idx
] != WLAN_EID_TIM
))
119 tim_idx
+= beacon
[tim_idx
+1] + 2;
121 /* If TIM field was found, set variables */
122 if ((tim_idx
< (frame_size
- 1)) && (beacon
[tim_idx
] == WLAN_EID_TIM
)) {
123 tx_beacon_cmd
->tim_idx
= cpu_to_le16(tim_idx
);
124 tx_beacon_cmd
->tim_size
= beacon
[tim_idx
+1];
126 IWL_WARN(priv
, "Unable to find TIM Element in beacon\n");
129 int iwlagn_send_beacon_cmd(struct iwl_priv
*priv
)
131 struct iwl_tx_beacon_cmd
*tx_beacon_cmd
;
132 struct iwl_host_cmd cmd
= {
133 .id
= REPLY_TX_BEACON
,
140 * We have to set up the TX command, the TX Beacon command, and the
144 lockdep_assert_held(&priv
->mutex
);
146 if (!priv
->beacon_ctx
) {
147 IWL_ERR(priv
, "trying to build beacon w/o beacon context!\n");
151 if (WARN_ON(!priv
->beacon_skb
))
154 /* Allocate beacon command */
155 if (!priv
->beacon_cmd
)
156 priv
->beacon_cmd
= kzalloc(sizeof(*tx_beacon_cmd
), GFP_KERNEL
);
157 tx_beacon_cmd
= priv
->beacon_cmd
;
161 frame_size
= priv
->beacon_skb
->len
;
163 /* Set up TX command fields */
164 tx_beacon_cmd
->tx
.len
= cpu_to_le16((u16
)frame_size
);
165 tx_beacon_cmd
->tx
.sta_id
= priv
->beacon_ctx
->bcast_sta_id
;
166 tx_beacon_cmd
->tx
.stop_time
.life_time
= TX_CMD_LIFE_TIME_INFINITE
;
167 tx_beacon_cmd
->tx
.tx_flags
= TX_CMD_FLG_SEQ_CTL_MSK
|
168 TX_CMD_FLG_TSF_MSK
| TX_CMD_FLG_STA_RATE_MSK
;
170 /* Set up TX beacon command fields */
171 iwl_set_beacon_tim(priv
, tx_beacon_cmd
, priv
->beacon_skb
->data
,
174 /* Set up packet rate and flags */
175 rate
= iwl_rate_get_lowest_plcp(priv
, priv
->beacon_ctx
);
176 priv
->mgmt_tx_ant
= iwl_toggle_tx_ant(priv
, priv
->mgmt_tx_ant
,
177 priv
->hw_params
.valid_tx_ant
);
178 rate_flags
= iwl_ant_idx_to_flags(priv
->mgmt_tx_ant
);
179 if ((rate
>= IWL_FIRST_CCK_RATE
) && (rate
<= IWL_LAST_CCK_RATE
))
180 rate_flags
|= RATE_MCS_CCK_MSK
;
181 tx_beacon_cmd
->tx
.rate_n_flags
= iwl_hw_set_rate_n_flags(rate
,
185 cmd
.len
[0] = sizeof(*tx_beacon_cmd
);
186 cmd
.data
[0] = tx_beacon_cmd
;
187 cmd
.dataflags
[0] = IWL_HCMD_DFL_NOCOPY
;
188 cmd
.len
[1] = frame_size
;
189 cmd
.data
[1] = priv
->beacon_skb
->data
;
190 cmd
.dataflags
[1] = IWL_HCMD_DFL_NOCOPY
;
192 return iwl_send_cmd_sync(priv
, &cmd
);
195 static void iwl_bg_beacon_update(struct work_struct
*work
)
197 struct iwl_priv
*priv
=
198 container_of(work
, struct iwl_priv
, beacon_update
);
199 struct sk_buff
*beacon
;
201 mutex_lock(&priv
->mutex
);
202 if (!priv
->beacon_ctx
) {
203 IWL_ERR(priv
, "updating beacon w/o beacon context!\n");
207 if (priv
->beacon_ctx
->vif
->type
!= NL80211_IFTYPE_AP
) {
209 * The ucode will send beacon notifications even in
210 * IBSS mode, but we don't want to process them. But
211 * we need to defer the type check to here due to
212 * requiring locking around the beacon_ctx access.
217 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
218 beacon
= ieee80211_beacon_get(priv
->hw
, priv
->beacon_ctx
->vif
);
220 IWL_ERR(priv
, "update beacon failed -- keeping old\n");
224 /* new beacon skb is allocated every time; dispose previous.*/
225 dev_kfree_skb(priv
->beacon_skb
);
227 priv
->beacon_skb
= beacon
;
229 iwlagn_send_beacon_cmd(priv
);
231 mutex_unlock(&priv
->mutex
);
234 static void iwl_bg_bt_runtime_config(struct work_struct
*work
)
236 struct iwl_priv
*priv
=
237 container_of(work
, struct iwl_priv
, bt_runtime_config
);
239 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
242 /* dont send host command if rf-kill is on */
243 if (!iwl_is_ready_rf(priv
))
245 priv
->cfg
->ops
->hcmd
->send_bt_config(priv
);
248 static void iwl_bg_bt_full_concurrency(struct work_struct
*work
)
250 struct iwl_priv
*priv
=
251 container_of(work
, struct iwl_priv
, bt_full_concurrency
);
252 struct iwl_rxon_context
*ctx
;
254 mutex_lock(&priv
->mutex
);
256 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
259 /* dont send host command if rf-kill is on */
260 if (!iwl_is_ready_rf(priv
))
263 IWL_DEBUG_INFO(priv
, "BT coex in %s mode\n",
264 priv
->bt_full_concurrent
?
265 "full concurrency" : "3-wire");
268 * LQ & RXON updated cmds must be sent before BT Config cmd
269 * to avoid 3-wire collisions
271 for_each_context(priv
, ctx
) {
272 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
273 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
274 iwlagn_commit_rxon(priv
, ctx
);
277 priv
->cfg
->ops
->hcmd
->send_bt_config(priv
);
279 mutex_unlock(&priv
->mutex
);
283 * iwl_bg_statistics_periodic - Timer callback to queue statistics
285 * This callback is provided in order to send a statistics request.
287 * This timer function is continually reset to execute within
288 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
289 * was received. We need to ensure we receive the statistics in order
290 * to update the temperature used for calibrating the TXPOWER.
292 static void iwl_bg_statistics_periodic(unsigned long data
)
294 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
296 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
299 /* dont send host command if rf-kill is on */
300 if (!iwl_is_ready_rf(priv
))
303 iwl_send_statistics_request(priv
, CMD_ASYNC
, false);
307 static void iwl_print_cont_event_trace(struct iwl_priv
*priv
, u32 base
,
308 u32 start_idx
, u32 num_events
,
312 u32 ptr
; /* SRAM byte address of log data */
313 u32 ev
, time
, data
; /* event log data */
314 unsigned long reg_flags
;
317 ptr
= base
+ (4 * sizeof(u32
)) + (start_idx
* 2 * sizeof(u32
));
319 ptr
= base
+ (4 * sizeof(u32
)) + (start_idx
* 3 * sizeof(u32
));
321 /* Make sure device is powered up for SRAM reads */
322 spin_lock_irqsave(&priv
->reg_lock
, reg_flags
);
323 if (iwl_grab_nic_access(priv
)) {
324 spin_unlock_irqrestore(&priv
->reg_lock
, reg_flags
);
328 /* Set starting address; reads will auto-increment */
329 iwl_write32(priv
, HBUS_TARG_MEM_RADDR
, ptr
);
333 * "time" is actually "data" for mode 0 (no timestamp).
334 * place event id # at far right for easier visual parsing.
336 for (i
= 0; i
< num_events
; i
++) {
337 ev
= iwl_read32(priv
, HBUS_TARG_MEM_RDAT
);
338 time
= iwl_read32(priv
, HBUS_TARG_MEM_RDAT
);
340 trace_iwlwifi_dev_ucode_cont_event(priv
,
343 data
= iwl_read32(priv
, HBUS_TARG_MEM_RDAT
);
344 trace_iwlwifi_dev_ucode_cont_event(priv
,
348 /* Allow device to power down */
349 iwl_release_nic_access(priv
);
350 spin_unlock_irqrestore(&priv
->reg_lock
, reg_flags
);
353 static void iwl_continuous_event_trace(struct iwl_priv
*priv
)
355 u32 capacity
; /* event log capacity in # entries */
356 u32 base
; /* SRAM byte address of event log header */
357 u32 mode
; /* 0 - no timestamp, 1 - timestamp recorded */
358 u32 num_wraps
; /* # times uCode wrapped to top of log */
359 u32 next_entry
; /* index of next entry to be written by uCode */
361 base
= priv
->device_pointers
.error_event_table
;
362 if (priv
->cfg
->ops
->lib
->is_valid_rtc_data_addr(base
)) {
363 capacity
= iwl_read_targ_mem(priv
, base
);
364 num_wraps
= iwl_read_targ_mem(priv
, base
+ (2 * sizeof(u32
)));
365 mode
= iwl_read_targ_mem(priv
, base
+ (1 * sizeof(u32
)));
366 next_entry
= iwl_read_targ_mem(priv
, base
+ (3 * sizeof(u32
)));
370 if (num_wraps
== priv
->event_log
.num_wraps
) {
371 iwl_print_cont_event_trace(priv
,
372 base
, priv
->event_log
.next_entry
,
373 next_entry
- priv
->event_log
.next_entry
,
375 priv
->event_log
.non_wraps_count
++;
377 if ((num_wraps
- priv
->event_log
.num_wraps
) > 1)
378 priv
->event_log
.wraps_more_count
++;
380 priv
->event_log
.wraps_once_count
++;
381 trace_iwlwifi_dev_ucode_wrap_event(priv
,
382 num_wraps
- priv
->event_log
.num_wraps
,
383 next_entry
, priv
->event_log
.next_entry
);
384 if (next_entry
< priv
->event_log
.next_entry
) {
385 iwl_print_cont_event_trace(priv
, base
,
386 priv
->event_log
.next_entry
,
387 capacity
- priv
->event_log
.next_entry
,
390 iwl_print_cont_event_trace(priv
, base
, 0,
393 iwl_print_cont_event_trace(priv
, base
,
394 next_entry
, capacity
- next_entry
,
397 iwl_print_cont_event_trace(priv
, base
, 0,
401 priv
->event_log
.num_wraps
= num_wraps
;
402 priv
->event_log
.next_entry
= next_entry
;
406 * iwl_bg_ucode_trace - Timer callback to log ucode event
408 * The timer is continually set to execute every
409 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
410 * this function is to perform continuous uCode event logging operation
413 static void iwl_bg_ucode_trace(unsigned long data
)
415 struct iwl_priv
*priv
= (struct iwl_priv
*)data
;
417 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
420 if (priv
->event_log
.ucode_trace
) {
421 iwl_continuous_event_trace(priv
);
422 /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
423 mod_timer(&priv
->ucode_trace
,
424 jiffies
+ msecs_to_jiffies(UCODE_TRACE_PERIOD
));
428 static void iwl_bg_tx_flush(struct work_struct
*work
)
430 struct iwl_priv
*priv
=
431 container_of(work
, struct iwl_priv
, tx_flush
);
433 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
436 /* do nothing if rf-kill is on */
437 if (!iwl_is_ready_rf(priv
))
440 IWL_DEBUG_INFO(priv
, "device request: flush all tx frames\n");
441 iwlagn_dev_txfifo_flush(priv
, IWL_DROP_ALL
);
445 * iwl_rx_handle - Main entry function for receiving responses from uCode
447 * Uses the priv->rx_handlers callback function array to invoke
448 * the appropriate handlers, including command responses,
449 * frame-received notifications, and other notifications.
451 static void iwl_rx_handle(struct iwl_priv
*priv
)
453 struct iwl_rx_mem_buffer
*rxb
;
454 struct iwl_rx_packet
*pkt
;
455 struct iwl_rx_queue
*rxq
= &priv
->rxq
;
463 /* uCode's read index (stored in shared DRAM) indicates the last Rx
464 * buffer that the driver may process (last buffer filled by ucode). */
465 r
= le16_to_cpu(rxq
->rb_stts
->closed_rb_num
) & 0x0FFF;
468 /* Rx interrupt, but nothing sent from uCode */
470 IWL_DEBUG_RX(priv
, "r = %d, i = %d\n", r
, i
);
472 /* calculate total frames need to be restock after handling RX */
473 total_empty
= r
- rxq
->write_actual
;
475 total_empty
+= RX_QUEUE_SIZE
;
477 if (total_empty
> (RX_QUEUE_SIZE
/ 2))
485 /* If an RXB doesn't have a Rx queue slot associated with it,
486 * then a bug has been introduced in the queue refilling
487 * routines -- catch it here */
488 if (WARN_ON(rxb
== NULL
)) {
489 i
= (i
+ 1) & RX_QUEUE_MASK
;
493 rxq
->queue
[i
] = NULL
;
495 dma_unmap_page(priv
->bus
.dev
, rxb
->page_dma
,
496 PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
500 len
= le32_to_cpu(pkt
->len_n_flags
) & FH_RSCSR_FRAME_SIZE_MSK
;
501 len
+= sizeof(u32
); /* account for status word */
502 trace_iwlwifi_dev_rx(priv
, pkt
, len
);
504 /* Reclaim a command buffer only if this packet is a response
505 * to a (driver-originated) command.
506 * If the packet (e.g. Rx frame) originated from uCode,
507 * there is no command buffer to reclaim.
508 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
509 * but apparently a few don't get set; catch them here. */
510 reclaim
= !(pkt
->hdr
.sequence
& SEQ_RX_FRAME
) &&
511 (pkt
->hdr
.cmd
!= REPLY_RX_PHY_CMD
) &&
512 (pkt
->hdr
.cmd
!= REPLY_RX
) &&
513 (pkt
->hdr
.cmd
!= REPLY_RX_MPDU_CMD
) &&
514 (pkt
->hdr
.cmd
!= REPLY_COMPRESSED_BA
) &&
515 (pkt
->hdr
.cmd
!= STATISTICS_NOTIFICATION
) &&
516 (pkt
->hdr
.cmd
!= REPLY_TX
);
519 * Do the notification wait before RX handlers so
520 * even if the RX handler consumes the RXB we have
521 * access to it in the notification wait entry.
523 if (!list_empty(&priv
->_agn
.notif_waits
)) {
524 struct iwl_notification_wait
*w
;
526 spin_lock(&priv
->_agn
.notif_wait_lock
);
527 list_for_each_entry(w
, &priv
->_agn
.notif_waits
, list
) {
528 if (w
->cmd
== pkt
->hdr
.cmd
) {
531 w
->fn(priv
, pkt
, w
->fn_data
);
534 spin_unlock(&priv
->_agn
.notif_wait_lock
);
536 wake_up_all(&priv
->_agn
.notif_waitq
);
538 if (priv
->pre_rx_handler
)
539 priv
->pre_rx_handler(priv
, rxb
);
541 /* Based on type of command response or notification,
542 * handle those that need handling via function in
543 * rx_handlers table. See iwl_setup_rx_handlers() */
544 if (priv
->rx_handlers
[pkt
->hdr
.cmd
]) {
545 IWL_DEBUG_RX(priv
, "r = %d, i = %d, %s, 0x%02x\n", r
,
546 i
, get_cmd_string(pkt
->hdr
.cmd
), pkt
->hdr
.cmd
);
547 priv
->isr_stats
.rx_handlers
[pkt
->hdr
.cmd
]++;
548 priv
->rx_handlers
[pkt
->hdr
.cmd
] (priv
, rxb
);
550 /* No handling needed */
552 "r %d i %d No handler needed for %s, 0x%02x\n",
553 r
, i
, get_cmd_string(pkt
->hdr
.cmd
),
558 * XXX: After here, we should always check rxb->page
559 * against NULL before touching it or its virtual
560 * memory (pkt). Because some rx_handler might have
561 * already taken or freed the pages.
565 /* Invoke any callbacks, transfer the buffer to caller,
566 * and fire off the (possibly) blocking iwl_send_cmd()
567 * as we reclaim the driver command queue */
569 iwl_tx_cmd_complete(priv
, rxb
);
571 IWL_WARN(priv
, "Claim null rxb?\n");
574 /* Reuse the page if possible. For notification packets and
575 * SKBs that fail to Rx correctly, add them back into the
576 * rx_free list for reuse later. */
577 spin_lock_irqsave(&rxq
->lock
, flags
);
578 if (rxb
->page
!= NULL
) {
579 rxb
->page_dma
= dma_map_page(priv
->bus
.dev
, rxb
->page
,
580 0, PAGE_SIZE
<< priv
->hw_params
.rx_page_order
,
582 list_add_tail(&rxb
->list
, &rxq
->rx_free
);
585 list_add_tail(&rxb
->list
, &rxq
->rx_used
);
587 spin_unlock_irqrestore(&rxq
->lock
, flags
);
589 i
= (i
+ 1) & RX_QUEUE_MASK
;
590 /* If there are a lot of unused frames,
591 * restock the Rx queue so ucode wont assert. */
596 iwlagn_rx_replenish_now(priv
);
602 /* Backtrack one entry */
605 iwlagn_rx_replenish_now(priv
);
607 iwlagn_rx_queue_restock(priv
);
610 /* tasklet for iwlagn interrupt */
611 static void iwl_irq_tasklet(struct iwl_priv
*priv
)
617 #ifdef CONFIG_IWLWIFI_DEBUG
621 spin_lock_irqsave(&priv
->lock
, flags
);
623 /* Ack/clear/reset pending uCode interrupts.
624 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
626 /* There is a hardware bug in the interrupt mask function that some
627 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
628 * they are disabled in the CSR_INT_MASK register. Furthermore the
629 * ICT interrupt handling mechanism has another bug that might cause
630 * these unmasked interrupts fail to be detected. We workaround the
631 * hardware bugs here by ACKing all the possible interrupts so that
632 * interrupt coalescing can still be achieved.
634 iwl_write32(priv
, CSR_INT
, priv
->_agn
.inta
| ~priv
->inta_mask
);
636 inta
= priv
->_agn
.inta
;
638 #ifdef CONFIG_IWLWIFI_DEBUG
639 if (iwl_get_debug_level(priv
) & IWL_DL_ISR
) {
641 inta_mask
= iwl_read32(priv
, CSR_INT_MASK
);
642 IWL_DEBUG_ISR(priv
, "inta 0x%08x, enabled 0x%08x\n ",
647 spin_unlock_irqrestore(&priv
->lock
, flags
);
649 /* saved interrupt in inta variable now we can reset priv->_agn.inta */
652 /* Now service all interrupt bits discovered above. */
653 if (inta
& CSR_INT_BIT_HW_ERR
) {
654 IWL_ERR(priv
, "Hardware error detected. Restarting.\n");
656 /* Tell the device to stop sending interrupts */
657 iwl_disable_interrupts(priv
);
659 priv
->isr_stats
.hw
++;
660 iwl_irq_handle_error(priv
);
662 handled
|= CSR_INT_BIT_HW_ERR
;
667 #ifdef CONFIG_IWLWIFI_DEBUG
668 if (iwl_get_debug_level(priv
) & (IWL_DL_ISR
)) {
669 /* NIC fires this, but we don't use it, redundant with WAKEUP */
670 if (inta
& CSR_INT_BIT_SCD
) {
671 IWL_DEBUG_ISR(priv
, "Scheduler finished to transmit "
672 "the frame/frames.\n");
673 priv
->isr_stats
.sch
++;
676 /* Alive notification via Rx interrupt will do the real work */
677 if (inta
& CSR_INT_BIT_ALIVE
) {
678 IWL_DEBUG_ISR(priv
, "Alive interrupt\n");
679 priv
->isr_stats
.alive
++;
683 /* Safely ignore these bits for debug checks below */
684 inta
&= ~(CSR_INT_BIT_SCD
| CSR_INT_BIT_ALIVE
);
686 /* HW RF KILL switch toggled */
687 if (inta
& CSR_INT_BIT_RF_KILL
) {
689 if (!(iwl_read32(priv
, CSR_GP_CNTRL
) &
690 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
))
693 IWL_WARN(priv
, "RF_KILL bit toggled to %s.\n",
694 hw_rf_kill
? "disable radio" : "enable radio");
696 priv
->isr_stats
.rfkill
++;
698 /* driver only loads ucode once setting the interface up.
699 * the driver allows loading the ucode even if the radio
700 * is killed. Hence update the killswitch state here. The
701 * rfkill handler will care about restarting if needed.
703 if (!test_bit(STATUS_ALIVE
, &priv
->status
)) {
705 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
707 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
708 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
, hw_rf_kill
);
711 handled
|= CSR_INT_BIT_RF_KILL
;
714 /* Chip got too hot and stopped itself */
715 if (inta
& CSR_INT_BIT_CT_KILL
) {
716 IWL_ERR(priv
, "Microcode CT kill error detected.\n");
717 priv
->isr_stats
.ctkill
++;
718 handled
|= CSR_INT_BIT_CT_KILL
;
721 /* Error detected by uCode */
722 if (inta
& CSR_INT_BIT_SW_ERR
) {
723 IWL_ERR(priv
, "Microcode SW error detected. "
724 " Restarting 0x%X.\n", inta
);
725 priv
->isr_stats
.sw
++;
726 iwl_irq_handle_error(priv
);
727 handled
|= CSR_INT_BIT_SW_ERR
;
730 /* uCode wakes up after power-down sleep */
731 if (inta
& CSR_INT_BIT_WAKEUP
) {
732 IWL_DEBUG_ISR(priv
, "Wakeup interrupt\n");
733 iwl_rx_queue_update_write_ptr(priv
, &priv
->rxq
);
734 for (i
= 0; i
< priv
->hw_params
.max_txq_num
; i
++)
735 iwl_txq_update_write_ptr(priv
, &priv
->txq
[i
]);
737 priv
->isr_stats
.wakeup
++;
739 handled
|= CSR_INT_BIT_WAKEUP
;
742 /* All uCode command responses, including Tx command responses,
743 * Rx "responses" (frame-received notification), and other
744 * notifications from uCode come through here*/
745 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
|
746 CSR_INT_BIT_RX_PERIODIC
)) {
747 IWL_DEBUG_ISR(priv
, "Rx interrupt\n");
748 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
)) {
749 handled
|= (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
);
750 iwl_write32(priv
, CSR_FH_INT_STATUS
,
753 if (inta
& CSR_INT_BIT_RX_PERIODIC
) {
754 handled
|= CSR_INT_BIT_RX_PERIODIC
;
755 iwl_write32(priv
, CSR_INT
, CSR_INT_BIT_RX_PERIODIC
);
757 /* Sending RX interrupt require many steps to be done in the
759 * 1- write interrupt to current index in ICT table.
761 * 3- update RX shared data to indicate last write index.
763 * This could lead to RX race, driver could receive RX interrupt
764 * but the shared data changes does not reflect this;
765 * periodic interrupt will detect any dangling Rx activity.
768 /* Disable periodic interrupt; we use it as just a one-shot. */
769 iwl_write8(priv
, CSR_INT_PERIODIC_REG
,
770 CSR_INT_PERIODIC_DIS
);
774 * Enable periodic interrupt in 8 msec only if we received
775 * real RX interrupt (instead of just periodic int), to catch
776 * any dangling Rx interrupt. If it was just the periodic
777 * interrupt, there was no dangling Rx activity, and no need
778 * to extend the periodic interrupt; one-shot is enough.
780 if (inta
& (CSR_INT_BIT_FH_RX
| CSR_INT_BIT_SW_RX
))
781 iwl_write8(priv
, CSR_INT_PERIODIC_REG
,
782 CSR_INT_PERIODIC_ENA
);
784 priv
->isr_stats
.rx
++;
787 /* This "Tx" DMA channel is used only for loading uCode */
788 if (inta
& CSR_INT_BIT_FH_TX
) {
789 iwl_write32(priv
, CSR_FH_INT_STATUS
, CSR_FH_INT_TX_MASK
);
790 IWL_DEBUG_ISR(priv
, "uCode load interrupt\n");
791 priv
->isr_stats
.tx
++;
792 handled
|= CSR_INT_BIT_FH_TX
;
793 /* Wake up uCode load routine, now that load is complete */
794 priv
->ucode_write_complete
= 1;
795 wake_up_interruptible(&priv
->wait_command_queue
);
798 if (inta
& ~handled
) {
799 IWL_ERR(priv
, "Unhandled INTA bits 0x%08x\n", inta
& ~handled
);
800 priv
->isr_stats
.unhandled
++;
803 if (inta
& ~(priv
->inta_mask
)) {
804 IWL_WARN(priv
, "Disabled INTA bits 0x%08x were pending\n",
805 inta
& ~priv
->inta_mask
);
808 /* Re-enable all interrupts */
809 /* only Re-enable if disabled by irq */
810 if (test_bit(STATUS_INT_ENABLED
, &priv
->status
))
811 iwl_enable_interrupts(priv
);
812 /* Re-enable RF_KILL if it occurred */
813 else if (handled
& CSR_INT_BIT_RF_KILL
)
814 iwl_enable_rfkill_int(priv
);
817 /*****************************************************************************
821 *****************************************************************************/
823 #ifdef CONFIG_IWLWIFI_DEBUG
826 * The following adds a new attribute to the sysfs representation
827 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
828 * used for controlling the debug level.
830 * See the level definitions in iwl for details.
832 * The debug_level being managed using sysfs below is a per device debug
833 * level that is used instead of the global debug level if it (the per
834 * device debug level) is set.
836 static ssize_t
show_debug_level(struct device
*d
,
837 struct device_attribute
*attr
, char *buf
)
839 struct iwl_priv
*priv
= dev_get_drvdata(d
);
840 return sprintf(buf
, "0x%08X\n", iwl_get_debug_level(priv
));
842 static ssize_t
store_debug_level(struct device
*d
,
843 struct device_attribute
*attr
,
844 const char *buf
, size_t count
)
846 struct iwl_priv
*priv
= dev_get_drvdata(d
);
850 ret
= strict_strtoul(buf
, 0, &val
);
852 IWL_ERR(priv
, "%s is not in hex or decimal form.\n", buf
);
854 priv
->debug_level
= val
;
855 if (iwl_alloc_traffic_mem(priv
))
857 "Not enough memory to generate traffic log\n");
859 return strnlen(buf
, count
);
862 static DEVICE_ATTR(debug_level
, S_IWUSR
| S_IRUGO
,
863 show_debug_level
, store_debug_level
);
866 #endif /* CONFIG_IWLWIFI_DEBUG */
869 static ssize_t
show_temperature(struct device
*d
,
870 struct device_attribute
*attr
, char *buf
)
872 struct iwl_priv
*priv
= dev_get_drvdata(d
);
874 if (!iwl_is_alive(priv
))
877 return sprintf(buf
, "%d\n", priv
->temperature
);
880 static DEVICE_ATTR(temperature
, S_IRUGO
, show_temperature
, NULL
);
882 static ssize_t
show_tx_power(struct device
*d
,
883 struct device_attribute
*attr
, char *buf
)
885 struct iwl_priv
*priv
= dev_get_drvdata(d
);
887 if (!iwl_is_ready_rf(priv
))
888 return sprintf(buf
, "off\n");
890 return sprintf(buf
, "%d\n", priv
->tx_power_user_lmt
);
893 static ssize_t
store_tx_power(struct device
*d
,
894 struct device_attribute
*attr
,
895 const char *buf
, size_t count
)
897 struct iwl_priv
*priv
= dev_get_drvdata(d
);
901 ret
= strict_strtoul(buf
, 10, &val
);
903 IWL_INFO(priv
, "%s is not in decimal form.\n", buf
);
905 ret
= iwl_set_tx_power(priv
, val
, false);
907 IWL_ERR(priv
, "failed setting tx power (0x%d).\n",
915 static DEVICE_ATTR(tx_power
, S_IWUSR
| S_IRUGO
, show_tx_power
, store_tx_power
);
917 static struct attribute
*iwl_sysfs_entries
[] = {
918 &dev_attr_temperature
.attr
,
919 &dev_attr_tx_power
.attr
,
920 #ifdef CONFIG_IWLWIFI_DEBUG
921 &dev_attr_debug_level
.attr
,
926 static struct attribute_group iwl_attribute_group
= {
927 .name
= NULL
, /* put in device directory */
928 .attrs
= iwl_sysfs_entries
,
931 /******************************************************************************
933 * uCode download functions
935 ******************************************************************************/
937 static void iwl_free_fw_desc(struct iwl_priv
*priv
, struct fw_desc
*desc
)
940 dma_free_coherent(priv
->bus
.dev
, desc
->len
,
941 desc
->v_addr
, desc
->p_addr
);
946 static void iwl_free_fw_img(struct iwl_priv
*priv
, struct fw_img
*img
)
948 iwl_free_fw_desc(priv
, &img
->code
);
949 iwl_free_fw_desc(priv
, &img
->data
);
952 static void iwl_dealloc_ucode(struct iwl_priv
*priv
)
954 iwl_free_fw_img(priv
, &priv
->ucode_rt
);
955 iwl_free_fw_img(priv
, &priv
->ucode_init
);
958 static int iwl_alloc_fw_desc(struct iwl_priv
*priv
, struct fw_desc
*desc
,
959 const void *data
, size_t len
)
966 desc
->v_addr
= dma_alloc_coherent(priv
->bus
.dev
, len
,
967 &desc
->p_addr
, GFP_KERNEL
);
972 memcpy(desc
->v_addr
, data
, len
);
976 struct iwlagn_ucode_capabilities
{
977 u32 max_probe_length
;
978 u32 standard_phy_calibration_size
;
982 static void iwl_ucode_callback(const struct firmware
*ucode_raw
, void *context
);
983 static int iwl_mac_setup_register(struct iwl_priv
*priv
,
984 struct iwlagn_ucode_capabilities
*capa
);
986 #define UCODE_EXPERIMENTAL_INDEX 100
987 #define UCODE_EXPERIMENTAL_TAG "exp"
989 static int __must_check
iwl_request_firmware(struct iwl_priv
*priv
, bool first
)
991 const char *name_pre
= priv
->cfg
->fw_name_pre
;
995 #ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
996 priv
->fw_index
= UCODE_EXPERIMENTAL_INDEX
;
997 strcpy(tag
, UCODE_EXPERIMENTAL_TAG
);
998 } else if (priv
->fw_index
== UCODE_EXPERIMENTAL_INDEX
) {
1000 priv
->fw_index
= priv
->cfg
->ucode_api_max
;
1001 sprintf(tag
, "%d", priv
->fw_index
);
1004 sprintf(tag
, "%d", priv
->fw_index
);
1007 if (priv
->fw_index
< priv
->cfg
->ucode_api_min
) {
1008 IWL_ERR(priv
, "no suitable firmware found!\n");
1012 sprintf(priv
->firmware_name
, "%s%s%s", name_pre
, tag
, ".ucode");
1014 IWL_DEBUG_INFO(priv
, "attempting to load firmware %s'%s'\n",
1015 (priv
->fw_index
== UCODE_EXPERIMENTAL_INDEX
)
1016 ? "EXPERIMENTAL " : "",
1017 priv
->firmware_name
);
1019 return request_firmware_nowait(THIS_MODULE
, 1, priv
->firmware_name
,
1021 GFP_KERNEL
, priv
, iwl_ucode_callback
);
1024 struct iwlagn_firmware_pieces
{
1025 const void *inst
, *data
, *init
, *init_data
;
1026 size_t inst_size
, data_size
, init_size
, init_data_size
;
1030 u32 init_evtlog_ptr
, init_evtlog_size
, init_errlog_ptr
;
1031 u32 inst_evtlog_ptr
, inst_evtlog_size
, inst_errlog_ptr
;
1034 static int iwlagn_load_legacy_firmware(struct iwl_priv
*priv
,
1035 const struct firmware
*ucode_raw
,
1036 struct iwlagn_firmware_pieces
*pieces
)
1038 struct iwl_ucode_header
*ucode
= (void *)ucode_raw
->data
;
1039 u32 api_ver
, hdr_size
;
1042 priv
->ucode_ver
= le32_to_cpu(ucode
->ver
);
1043 api_ver
= IWL_UCODE_API(priv
->ucode_ver
);
1048 if (ucode_raw
->size
< hdr_size
) {
1049 IWL_ERR(priv
, "File size too small!\n");
1052 pieces
->build
= le32_to_cpu(ucode
->u
.v2
.build
);
1053 pieces
->inst_size
= le32_to_cpu(ucode
->u
.v2
.inst_size
);
1054 pieces
->data_size
= le32_to_cpu(ucode
->u
.v2
.data_size
);
1055 pieces
->init_size
= le32_to_cpu(ucode
->u
.v2
.init_size
);
1056 pieces
->init_data_size
= le32_to_cpu(ucode
->u
.v2
.init_data_size
);
1057 src
= ucode
->u
.v2
.data
;
1063 if (ucode_raw
->size
< hdr_size
) {
1064 IWL_ERR(priv
, "File size too small!\n");
1068 pieces
->inst_size
= le32_to_cpu(ucode
->u
.v1
.inst_size
);
1069 pieces
->data_size
= le32_to_cpu(ucode
->u
.v1
.data_size
);
1070 pieces
->init_size
= le32_to_cpu(ucode
->u
.v1
.init_size
);
1071 pieces
->init_data_size
= le32_to_cpu(ucode
->u
.v1
.init_data_size
);
1072 src
= ucode
->u
.v1
.data
;
1076 /* Verify size of file vs. image size info in file's header */
1077 if (ucode_raw
->size
!= hdr_size
+ pieces
->inst_size
+
1078 pieces
->data_size
+ pieces
->init_size
+
1079 pieces
->init_data_size
) {
1082 "uCode file size %d does not match expected size\n",
1083 (int)ucode_raw
->size
);
1088 src
+= pieces
->inst_size
;
1090 src
+= pieces
->data_size
;
1092 src
+= pieces
->init_size
;
1093 pieces
->init_data
= src
;
1094 src
+= pieces
->init_data_size
;
1099 static int iwlagn_wanted_ucode_alternative
= 1;
1101 static int iwlagn_load_firmware(struct iwl_priv
*priv
,
1102 const struct firmware
*ucode_raw
,
1103 struct iwlagn_firmware_pieces
*pieces
,
1104 struct iwlagn_ucode_capabilities
*capa
)
1106 struct iwl_tlv_ucode_header
*ucode
= (void *)ucode_raw
->data
;
1107 struct iwl_ucode_tlv
*tlv
;
1108 size_t len
= ucode_raw
->size
;
1110 int wanted_alternative
= iwlagn_wanted_ucode_alternative
, tmp
;
1113 enum iwl_ucode_tlv_type tlv_type
;
1116 if (len
< sizeof(*ucode
)) {
1117 IWL_ERR(priv
, "uCode has invalid length: %zd\n", len
);
1121 if (ucode
->magic
!= cpu_to_le32(IWL_TLV_UCODE_MAGIC
)) {
1122 IWL_ERR(priv
, "invalid uCode magic: 0X%x\n",
1123 le32_to_cpu(ucode
->magic
));
1128 * Check which alternatives are present, and "downgrade"
1129 * when the chosen alternative is not present, warning
1130 * the user when that happens. Some files may not have
1131 * any alternatives, so don't warn in that case.
1133 alternatives
= le64_to_cpu(ucode
->alternatives
);
1134 tmp
= wanted_alternative
;
1135 if (wanted_alternative
> 63)
1136 wanted_alternative
= 63;
1137 while (wanted_alternative
&& !(alternatives
& BIT(wanted_alternative
)))
1138 wanted_alternative
--;
1139 if (wanted_alternative
&& wanted_alternative
!= tmp
)
1141 "uCode alternative %d not available, choosing %d\n",
1142 tmp
, wanted_alternative
);
1144 priv
->ucode_ver
= le32_to_cpu(ucode
->ver
);
1145 pieces
->build
= le32_to_cpu(ucode
->build
);
1148 len
-= sizeof(*ucode
);
1150 while (len
>= sizeof(*tlv
)) {
1153 len
-= sizeof(*tlv
);
1156 tlv_len
= le32_to_cpu(tlv
->length
);
1157 tlv_type
= le16_to_cpu(tlv
->type
);
1158 tlv_alt
= le16_to_cpu(tlv
->alternative
);
1159 tlv_data
= tlv
->data
;
1161 if (len
< tlv_len
) {
1162 IWL_ERR(priv
, "invalid TLV len: %zd/%u\n",
1166 len
-= ALIGN(tlv_len
, 4);
1167 data
+= sizeof(*tlv
) + ALIGN(tlv_len
, 4);
1170 * Alternative 0 is always valid.
1172 * Skip alternative TLVs that are not selected.
1174 if (tlv_alt
!= 0 && tlv_alt
!= wanted_alternative
)
1178 case IWL_UCODE_TLV_INST
:
1179 pieces
->inst
= tlv_data
;
1180 pieces
->inst_size
= tlv_len
;
1182 case IWL_UCODE_TLV_DATA
:
1183 pieces
->data
= tlv_data
;
1184 pieces
->data_size
= tlv_len
;
1186 case IWL_UCODE_TLV_INIT
:
1187 pieces
->init
= tlv_data
;
1188 pieces
->init_size
= tlv_len
;
1190 case IWL_UCODE_TLV_INIT_DATA
:
1191 pieces
->init_data
= tlv_data
;
1192 pieces
->init_data_size
= tlv_len
;
1194 case IWL_UCODE_TLV_BOOT
:
1195 IWL_ERR(priv
, "Found unexpected BOOT ucode\n");
1197 case IWL_UCODE_TLV_PROBE_MAX_LEN
:
1198 if (tlv_len
!= sizeof(u32
))
1199 goto invalid_tlv_len
;
1200 capa
->max_probe_length
=
1201 le32_to_cpup((__le32
*)tlv_data
);
1203 case IWL_UCODE_TLV_PAN
:
1205 goto invalid_tlv_len
;
1206 capa
->flags
|= IWL_UCODE_TLV_FLAGS_PAN
;
1208 case IWL_UCODE_TLV_FLAGS
:
1209 /* must be at least one u32 */
1210 if (tlv_len
< sizeof(u32
))
1211 goto invalid_tlv_len
;
1212 /* and a proper number of u32s */
1213 if (tlv_len
% sizeof(u32
))
1214 goto invalid_tlv_len
;
1216 * This driver only reads the first u32 as
1217 * right now no more features are defined,
1218 * if that changes then either the driver
1219 * will not work with the new firmware, or
1220 * it'll not take advantage of new features.
1222 capa
->flags
= le32_to_cpup((__le32
*)tlv_data
);
1224 case IWL_UCODE_TLV_INIT_EVTLOG_PTR
:
1225 if (tlv_len
!= sizeof(u32
))
1226 goto invalid_tlv_len
;
1227 pieces
->init_evtlog_ptr
=
1228 le32_to_cpup((__le32
*)tlv_data
);
1230 case IWL_UCODE_TLV_INIT_EVTLOG_SIZE
:
1231 if (tlv_len
!= sizeof(u32
))
1232 goto invalid_tlv_len
;
1233 pieces
->init_evtlog_size
=
1234 le32_to_cpup((__le32
*)tlv_data
);
1236 case IWL_UCODE_TLV_INIT_ERRLOG_PTR
:
1237 if (tlv_len
!= sizeof(u32
))
1238 goto invalid_tlv_len
;
1239 pieces
->init_errlog_ptr
=
1240 le32_to_cpup((__le32
*)tlv_data
);
1242 case IWL_UCODE_TLV_RUNT_EVTLOG_PTR
:
1243 if (tlv_len
!= sizeof(u32
))
1244 goto invalid_tlv_len
;
1245 pieces
->inst_evtlog_ptr
=
1246 le32_to_cpup((__le32
*)tlv_data
);
1248 case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE
:
1249 if (tlv_len
!= sizeof(u32
))
1250 goto invalid_tlv_len
;
1251 pieces
->inst_evtlog_size
=
1252 le32_to_cpup((__le32
*)tlv_data
);
1254 case IWL_UCODE_TLV_RUNT_ERRLOG_PTR
:
1255 if (tlv_len
!= sizeof(u32
))
1256 goto invalid_tlv_len
;
1257 pieces
->inst_errlog_ptr
=
1258 le32_to_cpup((__le32
*)tlv_data
);
1260 case IWL_UCODE_TLV_ENHANCE_SENS_TBL
:
1262 goto invalid_tlv_len
;
1263 priv
->enhance_sensitivity_table
= true;
1265 case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE
:
1266 if (tlv_len
!= sizeof(u32
))
1267 goto invalid_tlv_len
;
1268 capa
->standard_phy_calibration_size
=
1269 le32_to_cpup((__le32
*)tlv_data
);
1272 IWL_DEBUG_INFO(priv
, "unknown TLV: %d\n", tlv_type
);
1278 IWL_ERR(priv
, "invalid TLV after parsing: %zd\n", len
);
1279 iwl_print_hex_dump(priv
, IWL_DL_FW
, (u8
*)data
, len
);
1286 IWL_ERR(priv
, "TLV %d has invalid size: %u\n", tlv_type
, tlv_len
);
1287 iwl_print_hex_dump(priv
, IWL_DL_FW
, tlv_data
, tlv_len
);
1293 * iwl_ucode_callback - callback when firmware was loaded
1295 * If loaded successfully, copies the firmware into buffers
1296 * for the card to fetch (via DMA).
1298 static void iwl_ucode_callback(const struct firmware
*ucode_raw
, void *context
)
1300 struct iwl_priv
*priv
= context
;
1301 struct iwl_ucode_header
*ucode
;
1303 struct iwlagn_firmware_pieces pieces
;
1304 const unsigned int api_max
= priv
->cfg
->ucode_api_max
;
1305 const unsigned int api_min
= priv
->cfg
->ucode_api_min
;
1309 struct iwlagn_ucode_capabilities ucode_capa
= {
1310 .max_probe_length
= 200,
1311 .standard_phy_calibration_size
=
1312 IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE
,
1315 memset(&pieces
, 0, sizeof(pieces
));
1318 if (priv
->fw_index
<= priv
->cfg
->ucode_api_max
)
1320 "request for firmware file '%s' failed.\n",
1321 priv
->firmware_name
);
1325 IWL_DEBUG_INFO(priv
, "Loaded firmware file '%s' (%zd bytes).\n",
1326 priv
->firmware_name
, ucode_raw
->size
);
1328 /* Make sure that we got at least the API version number */
1329 if (ucode_raw
->size
< 4) {
1330 IWL_ERR(priv
, "File size way too small!\n");
1334 /* Data from ucode file: header followed by uCode images */
1335 ucode
= (struct iwl_ucode_header
*)ucode_raw
->data
;
1338 err
= iwlagn_load_legacy_firmware(priv
, ucode_raw
, &pieces
);
1340 err
= iwlagn_load_firmware(priv
, ucode_raw
, &pieces
,
1346 api_ver
= IWL_UCODE_API(priv
->ucode_ver
);
1347 build
= pieces
.build
;
1350 * api_ver should match the api version forming part of the
1351 * firmware filename ... but we don't check for that and only rely
1352 * on the API version read from firmware header from here on forward
1354 /* no api version check required for experimental uCode */
1355 if (priv
->fw_index
!= UCODE_EXPERIMENTAL_INDEX
) {
1356 if (api_ver
< api_min
|| api_ver
> api_max
) {
1358 "Driver unable to support your firmware API. "
1359 "Driver supports v%u, firmware is v%u.\n",
1364 if (api_ver
!= api_max
)
1366 "Firmware has old API version. Expected v%u, "
1367 "got v%u. New firmware can be obtained "
1368 "from http://www.intellinuxwireless.org.\n",
1373 sprintf(buildstr
, " build %u%s", build
,
1374 (priv
->fw_index
== UCODE_EXPERIMENTAL_INDEX
)
1379 IWL_INFO(priv
, "loaded firmware version %u.%u.%u.%u%s\n",
1380 IWL_UCODE_MAJOR(priv
->ucode_ver
),
1381 IWL_UCODE_MINOR(priv
->ucode_ver
),
1382 IWL_UCODE_API(priv
->ucode_ver
),
1383 IWL_UCODE_SERIAL(priv
->ucode_ver
),
1386 snprintf(priv
->hw
->wiphy
->fw_version
,
1387 sizeof(priv
->hw
->wiphy
->fw_version
),
1389 IWL_UCODE_MAJOR(priv
->ucode_ver
),
1390 IWL_UCODE_MINOR(priv
->ucode_ver
),
1391 IWL_UCODE_API(priv
->ucode_ver
),
1392 IWL_UCODE_SERIAL(priv
->ucode_ver
),
1396 * For any of the failures below (before allocating pci memory)
1397 * we will try to load a version with a smaller API -- maybe the
1398 * user just got a corrupted version of the latest API.
1401 IWL_DEBUG_INFO(priv
, "f/w package hdr ucode version raw = 0x%x\n",
1403 IWL_DEBUG_INFO(priv
, "f/w package hdr runtime inst size = %Zd\n",
1405 IWL_DEBUG_INFO(priv
, "f/w package hdr runtime data size = %Zd\n",
1407 IWL_DEBUG_INFO(priv
, "f/w package hdr init inst size = %Zd\n",
1409 IWL_DEBUG_INFO(priv
, "f/w package hdr init data size = %Zd\n",
1410 pieces
.init_data_size
);
1412 /* Verify that uCode images will fit in card's SRAM */
1413 if (pieces
.inst_size
> priv
->hw_params
.max_inst_size
) {
1414 IWL_ERR(priv
, "uCode instr len %Zd too large to fit in\n",
1419 if (pieces
.data_size
> priv
->hw_params
.max_data_size
) {
1420 IWL_ERR(priv
, "uCode data len %Zd too large to fit in\n",
1425 if (pieces
.init_size
> priv
->hw_params
.max_inst_size
) {
1426 IWL_ERR(priv
, "uCode init instr len %Zd too large to fit in\n",
1431 if (pieces
.init_data_size
> priv
->hw_params
.max_data_size
) {
1432 IWL_ERR(priv
, "uCode init data len %Zd too large to fit in\n",
1433 pieces
.init_data_size
);
1437 /* Allocate ucode buffers for card's bus-master loading ... */
1439 /* Runtime instructions and 2 copies of data:
1440 * 1) unmodified from disk
1441 * 2) backup cache for save/restore during power-downs */
1442 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_rt
.code
,
1443 pieces
.inst
, pieces
.inst_size
))
1445 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_rt
.data
,
1446 pieces
.data
, pieces
.data_size
))
1449 /* Initialization instructions and data */
1450 if (pieces
.init_size
&& pieces
.init_data_size
) {
1451 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_init
.code
,
1452 pieces
.init
, pieces
.init_size
))
1454 if (iwl_alloc_fw_desc(priv
, &priv
->ucode_init
.data
,
1455 pieces
.init_data
, pieces
.init_data_size
))
1459 /* Now that we can no longer fail, copy information */
1462 * The (size - 16) / 12 formula is based on the information recorded
1463 * for each event, which is of mode 1 (including timestamp) for all
1464 * new microcodes that include this information.
1466 priv
->_agn
.init_evtlog_ptr
= pieces
.init_evtlog_ptr
;
1467 if (pieces
.init_evtlog_size
)
1468 priv
->_agn
.init_evtlog_size
= (pieces
.init_evtlog_size
- 16)/12;
1470 priv
->_agn
.init_evtlog_size
=
1471 priv
->cfg
->base_params
->max_event_log_size
;
1472 priv
->_agn
.init_errlog_ptr
= pieces
.init_errlog_ptr
;
1473 priv
->_agn
.inst_evtlog_ptr
= pieces
.inst_evtlog_ptr
;
1474 if (pieces
.inst_evtlog_size
)
1475 priv
->_agn
.inst_evtlog_size
= (pieces
.inst_evtlog_size
- 16)/12;
1477 priv
->_agn
.inst_evtlog_size
=
1478 priv
->cfg
->base_params
->max_event_log_size
;
1479 priv
->_agn
.inst_errlog_ptr
= pieces
.inst_errlog_ptr
;
1481 priv
->new_scan_threshold_behaviour
=
1482 !!(ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_NEWSCAN
);
1484 if ((priv
->cfg
->sku
& EEPROM_SKU_CAP_IPAN_ENABLE
) &&
1485 (ucode_capa
.flags
& IWL_UCODE_TLV_FLAGS_PAN
)) {
1486 priv
->valid_contexts
|= BIT(IWL_RXON_CTX_PAN
);
1487 priv
->sta_key_max_num
= STA_KEY_MAX_NUM_PAN
;
1489 priv
->sta_key_max_num
= STA_KEY_MAX_NUM
;
1491 if (priv
->valid_contexts
!= BIT(IWL_RXON_CTX_BSS
))
1492 priv
->cmd_queue
= IWL_IPAN_CMD_QUEUE_NUM
;
1494 priv
->cmd_queue
= IWL_DEFAULT_CMD_QUEUE_NUM
;
1497 * figure out the offset of chain noise reset and gain commands
1498 * base on the size of standard phy calibration commands table size
1500 if (ucode_capa
.standard_phy_calibration_size
>
1501 IWL_MAX_PHY_CALIBRATE_TBL_SIZE
)
1502 ucode_capa
.standard_phy_calibration_size
=
1503 IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE
;
1505 priv
->_agn
.phy_calib_chain_noise_reset_cmd
=
1506 ucode_capa
.standard_phy_calibration_size
;
1507 priv
->_agn
.phy_calib_chain_noise_gain_cmd
=
1508 ucode_capa
.standard_phy_calibration_size
+ 1;
1510 /**************************************************
1511 * This is still part of probe() in a sense...
1513 * 9. Setup and register with mac80211 and debugfs
1514 **************************************************/
1515 err
= iwl_mac_setup_register(priv
, &ucode_capa
);
1519 err
= iwl_dbgfs_register(priv
, DRV_NAME
);
1521 IWL_ERR(priv
, "failed to create debugfs files. Ignoring error: %d\n", err
);
1523 err
= sysfs_create_group(&(priv
->bus
.dev
->kobj
),
1524 &iwl_attribute_group
);
1526 IWL_ERR(priv
, "failed to create sysfs device attributes\n");
1530 /* We have our copies now, allow OS release its copies */
1531 release_firmware(ucode_raw
);
1532 complete(&priv
->_agn
.firmware_loading_complete
);
1536 /* try next, if any */
1537 if (iwl_request_firmware(priv
, false))
1539 release_firmware(ucode_raw
);
1543 IWL_ERR(priv
, "failed to allocate pci memory\n");
1544 iwl_dealloc_ucode(priv
);
1546 complete(&priv
->_agn
.firmware_loading_complete
);
1547 device_release_driver(priv
->bus
.dev
);
1548 release_firmware(ucode_raw
);
1551 static const char *desc_lookup_text
[] = {
1556 "NMI_INTERRUPT_WDG",
1560 "HW_ERROR_TUNE_LOCK",
1561 "HW_ERROR_TEMPERATURE",
1562 "ILLEGAL_CHAN_FREQ",
1565 "NMI_INTERRUPT_HOST",
1566 "NMI_INTERRUPT_ACTION_PT",
1567 "NMI_INTERRUPT_UNKNOWN",
1568 "UCODE_VERSION_MISMATCH",
1569 "HW_ERROR_ABS_LOCK",
1570 "HW_ERROR_CAL_LOCK_FAIL",
1571 "NMI_INTERRUPT_INST_ACTION_PT",
1572 "NMI_INTERRUPT_DATA_ACTION_PT",
1574 "NMI_INTERRUPT_TRM",
1575 "NMI_INTERRUPT_BREAK_POINT"
1582 static struct { char *name
; u8 num
; } advanced_lookup
[] = {
1583 { "NMI_INTERRUPT_WDG", 0x34 },
1584 { "SYSASSERT", 0x35 },
1585 { "UCODE_VERSION_MISMATCH", 0x37 },
1586 { "BAD_COMMAND", 0x38 },
1587 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1588 { "FATAL_ERROR", 0x3D },
1589 { "NMI_TRM_HW_ERR", 0x46 },
1590 { "NMI_INTERRUPT_TRM", 0x4C },
1591 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1592 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1593 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1594 { "NMI_INTERRUPT_HOST", 0x66 },
1595 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1596 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1597 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1598 { "ADVANCED_SYSASSERT", 0 },
1601 static const char *desc_lookup(u32 num
)
1604 int max
= ARRAY_SIZE(desc_lookup_text
);
1607 return desc_lookup_text
[num
];
1609 max
= ARRAY_SIZE(advanced_lookup
) - 1;
1610 for (i
= 0; i
< max
; i
++) {
1611 if (advanced_lookup
[i
].num
== num
)
1614 return advanced_lookup
[i
].name
;
1617 #define ERROR_START_OFFSET (1 * sizeof(u32))
1618 #define ERROR_ELEM_SIZE (7 * sizeof(u32))
1620 void iwl_dump_nic_error_log(struct iwl_priv
*priv
)
1623 struct iwl_error_event_table table
;
1625 base
= priv
->device_pointers
.error_event_table
;
1626 if (priv
->ucode_type
== IWL_UCODE_INIT
) {
1628 base
= priv
->_agn
.init_errlog_ptr
;
1631 base
= priv
->_agn
.inst_errlog_ptr
;
1634 if (!priv
->cfg
->ops
->lib
->is_valid_rtc_data_addr(base
)) {
1636 "Not valid error log pointer 0x%08X for %s uCode\n",
1638 (priv
->ucode_type
== IWL_UCODE_INIT
)
1643 iwl_read_targ_mem_words(priv
, base
, &table
, sizeof(table
));
1645 if (ERROR_START_OFFSET
<= table
.valid
* ERROR_ELEM_SIZE
) {
1646 IWL_ERR(priv
, "Start IWL Error Log Dump:\n");
1647 IWL_ERR(priv
, "Status: 0x%08lX, count: %d\n",
1648 priv
->status
, table
.valid
);
1651 priv
->isr_stats
.err_code
= table
.error_id
;
1653 trace_iwlwifi_dev_ucode_error(priv
, table
.error_id
, table
.tsf_low
,
1654 table
.data1
, table
.data2
, table
.line
,
1655 table
.blink1
, table
.blink2
, table
.ilink1
,
1656 table
.ilink2
, table
.bcon_time
, table
.gp1
,
1657 table
.gp2
, table
.gp3
, table
.ucode_ver
,
1658 table
.hw_ver
, table
.brd_ver
);
1659 IWL_ERR(priv
, "0x%08X | %-28s\n", table
.error_id
,
1660 desc_lookup(table
.error_id
));
1661 IWL_ERR(priv
, "0x%08X | uPc\n", table
.pc
);
1662 IWL_ERR(priv
, "0x%08X | branchlink1\n", table
.blink1
);
1663 IWL_ERR(priv
, "0x%08X | branchlink2\n", table
.blink2
);
1664 IWL_ERR(priv
, "0x%08X | interruptlink1\n", table
.ilink1
);
1665 IWL_ERR(priv
, "0x%08X | interruptlink2\n", table
.ilink2
);
1666 IWL_ERR(priv
, "0x%08X | data1\n", table
.data1
);
1667 IWL_ERR(priv
, "0x%08X | data2\n", table
.data2
);
1668 IWL_ERR(priv
, "0x%08X | line\n", table
.line
);
1669 IWL_ERR(priv
, "0x%08X | beacon time\n", table
.bcon_time
);
1670 IWL_ERR(priv
, "0x%08X | tsf low\n", table
.tsf_low
);
1671 IWL_ERR(priv
, "0x%08X | tsf hi\n", table
.tsf_hi
);
1672 IWL_ERR(priv
, "0x%08X | time gp1\n", table
.gp1
);
1673 IWL_ERR(priv
, "0x%08X | time gp2\n", table
.gp2
);
1674 IWL_ERR(priv
, "0x%08X | time gp3\n", table
.gp3
);
1675 IWL_ERR(priv
, "0x%08X | uCode version\n", table
.ucode_ver
);
1676 IWL_ERR(priv
, "0x%08X | hw version\n", table
.hw_ver
);
1677 IWL_ERR(priv
, "0x%08X | board version\n", table
.brd_ver
);
1678 IWL_ERR(priv
, "0x%08X | hcmd\n", table
.hcmd
);
1681 #define EVENT_START_OFFSET (4 * sizeof(u32))
1684 * iwl_print_event_log - Dump error event log to syslog
1687 static int iwl_print_event_log(struct iwl_priv
*priv
, u32 start_idx
,
1688 u32 num_events
, u32 mode
,
1689 int pos
, char **buf
, size_t bufsz
)
1692 u32 base
; /* SRAM byte address of event log header */
1693 u32 event_size
; /* 2 u32s, or 3 u32s if timestamp recorded */
1694 u32 ptr
; /* SRAM byte address of log data */
1695 u32 ev
, time
, data
; /* event log data */
1696 unsigned long reg_flags
;
1698 if (num_events
== 0)
1701 base
= priv
->device_pointers
.log_event_table
;
1702 if (priv
->ucode_type
== IWL_UCODE_INIT
) {
1704 base
= priv
->_agn
.init_evtlog_ptr
;
1707 base
= priv
->_agn
.inst_evtlog_ptr
;
1711 event_size
= 2 * sizeof(u32
);
1713 event_size
= 3 * sizeof(u32
);
1715 ptr
= base
+ EVENT_START_OFFSET
+ (start_idx
* event_size
);
1717 /* Make sure device is powered up for SRAM reads */
1718 spin_lock_irqsave(&priv
->reg_lock
, reg_flags
);
1719 iwl_grab_nic_access(priv
);
1721 /* Set starting address; reads will auto-increment */
1722 iwl_write32(priv
, HBUS_TARG_MEM_RADDR
, ptr
);
1725 /* "time" is actually "data" for mode 0 (no timestamp).
1726 * place event id # at far right for easier visual parsing. */
1727 for (i
= 0; i
< num_events
; i
++) {
1728 ev
= iwl_read32(priv
, HBUS_TARG_MEM_RDAT
);
1729 time
= iwl_read32(priv
, HBUS_TARG_MEM_RDAT
);
1733 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1734 "EVT_LOG:0x%08x:%04u\n",
1737 trace_iwlwifi_dev_ucode_event(priv
, 0,
1739 IWL_ERR(priv
, "EVT_LOG:0x%08x:%04u\n",
1743 data
= iwl_read32(priv
, HBUS_TARG_MEM_RDAT
);
1745 pos
+= scnprintf(*buf
+ pos
, bufsz
- pos
,
1746 "EVT_LOGT:%010u:0x%08x:%04u\n",
1749 IWL_ERR(priv
, "EVT_LOGT:%010u:0x%08x:%04u\n",
1751 trace_iwlwifi_dev_ucode_event(priv
, time
,
1757 /* Allow device to power down */
1758 iwl_release_nic_access(priv
);
1759 spin_unlock_irqrestore(&priv
->reg_lock
, reg_flags
);
1764 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1766 static int iwl_print_last_event_logs(struct iwl_priv
*priv
, u32 capacity
,
1767 u32 num_wraps
, u32 next_entry
,
1769 int pos
, char **buf
, size_t bufsz
)
1772 * display the newest DEFAULT_LOG_ENTRIES entries
1773 * i.e the entries just before the next ont that uCode would fill.
1776 if (next_entry
< size
) {
1777 pos
= iwl_print_event_log(priv
,
1778 capacity
- (size
- next_entry
),
1779 size
- next_entry
, mode
,
1781 pos
= iwl_print_event_log(priv
, 0,
1785 pos
= iwl_print_event_log(priv
, next_entry
- size
,
1786 size
, mode
, pos
, buf
, bufsz
);
1788 if (next_entry
< size
) {
1789 pos
= iwl_print_event_log(priv
, 0, next_entry
,
1790 mode
, pos
, buf
, bufsz
);
1792 pos
= iwl_print_event_log(priv
, next_entry
- size
,
1793 size
, mode
, pos
, buf
, bufsz
);
1799 #define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1801 int iwl_dump_nic_event_log(struct iwl_priv
*priv
, bool full_log
,
1802 char **buf
, bool display
)
1804 u32 base
; /* SRAM byte address of event log header */
1805 u32 capacity
; /* event log capacity in # entries */
1806 u32 mode
; /* 0 - no timestamp, 1 - timestamp recorded */
1807 u32 num_wraps
; /* # times uCode wrapped to top of log */
1808 u32 next_entry
; /* index of next entry to be written by uCode */
1809 u32 size
; /* # entries that we'll print */
1814 base
= priv
->device_pointers
.log_event_table
;
1815 if (priv
->ucode_type
== IWL_UCODE_INIT
) {
1816 logsize
= priv
->_agn
.init_evtlog_size
;
1818 base
= priv
->_agn
.init_evtlog_ptr
;
1820 logsize
= priv
->_agn
.inst_evtlog_size
;
1822 base
= priv
->_agn
.inst_evtlog_ptr
;
1825 if (!priv
->cfg
->ops
->lib
->is_valid_rtc_data_addr(base
)) {
1827 "Invalid event log pointer 0x%08X for %s uCode\n",
1829 (priv
->ucode_type
== IWL_UCODE_INIT
)
1834 /* event log header */
1835 capacity
= iwl_read_targ_mem(priv
, base
);
1836 mode
= iwl_read_targ_mem(priv
, base
+ (1 * sizeof(u32
)));
1837 num_wraps
= iwl_read_targ_mem(priv
, base
+ (2 * sizeof(u32
)));
1838 next_entry
= iwl_read_targ_mem(priv
, base
+ (3 * sizeof(u32
)));
1840 if (capacity
> logsize
) {
1841 IWL_ERR(priv
, "Log capacity %d is bogus, limit to %d entries\n",
1846 if (next_entry
> logsize
) {
1847 IWL_ERR(priv
, "Log write index %d is bogus, limit to %d\n",
1848 next_entry
, logsize
);
1849 next_entry
= logsize
;
1852 size
= num_wraps
? capacity
: next_entry
;
1854 /* bail out if nothing in log */
1856 IWL_ERR(priv
, "Start IWL Event Log Dump: nothing in log\n");
1860 /* enable/disable bt channel inhibition */
1861 priv
->bt_ch_announce
= iwlagn_bt_ch_announce
;
1863 #ifdef CONFIG_IWLWIFI_DEBUG
1864 if (!(iwl_get_debug_level(priv
) & IWL_DL_FW_ERRORS
) && !full_log
)
1865 size
= (size
> DEFAULT_DUMP_EVENT_LOG_ENTRIES
)
1866 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES
: size
;
1868 size
= (size
> DEFAULT_DUMP_EVENT_LOG_ENTRIES
)
1869 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES
: size
;
1871 IWL_ERR(priv
, "Start IWL Event Log Dump: display last %u entries\n",
1874 #ifdef CONFIG_IWLWIFI_DEBUG
1877 bufsz
= capacity
* 48;
1880 *buf
= kmalloc(bufsz
, GFP_KERNEL
);
1884 if ((iwl_get_debug_level(priv
) & IWL_DL_FW_ERRORS
) || full_log
) {
1886 * if uCode has wrapped back to top of log,
1887 * start at the oldest entry,
1888 * i.e the next one that uCode would fill.
1891 pos
= iwl_print_event_log(priv
, next_entry
,
1892 capacity
- next_entry
, mode
,
1894 /* (then/else) start at top of log */
1895 pos
= iwl_print_event_log(priv
, 0,
1896 next_entry
, mode
, pos
, buf
, bufsz
);
1898 pos
= iwl_print_last_event_logs(priv
, capacity
, num_wraps
,
1899 next_entry
, size
, mode
,
1902 pos
= iwl_print_last_event_logs(priv
, capacity
, num_wraps
,
1903 next_entry
, size
, mode
,
1909 static void iwl_rf_kill_ct_config(struct iwl_priv
*priv
)
1911 struct iwl_ct_kill_config cmd
;
1912 struct iwl_ct_kill_throttling_config adv_cmd
;
1913 unsigned long flags
;
1916 spin_lock_irqsave(&priv
->lock
, flags
);
1917 iwl_write32(priv
, CSR_UCODE_DRV_GP1_CLR
,
1918 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT
);
1919 spin_unlock_irqrestore(&priv
->lock
, flags
);
1920 priv
->thermal_throttle
.ct_kill_toggle
= false;
1922 if (priv
->cfg
->base_params
->support_ct_kill_exit
) {
1923 adv_cmd
.critical_temperature_enter
=
1924 cpu_to_le32(priv
->hw_params
.ct_kill_threshold
);
1925 adv_cmd
.critical_temperature_exit
=
1926 cpu_to_le32(priv
->hw_params
.ct_kill_exit_threshold
);
1928 ret
= iwl_send_cmd_pdu(priv
, REPLY_CT_KILL_CONFIG_CMD
,
1929 sizeof(adv_cmd
), &adv_cmd
);
1931 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1933 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
1935 "critical temperature enter is %d,"
1937 priv
->hw_params
.ct_kill_threshold
,
1938 priv
->hw_params
.ct_kill_exit_threshold
);
1940 cmd
.critical_temperature_R
=
1941 cpu_to_le32(priv
->hw_params
.ct_kill_threshold
);
1943 ret
= iwl_send_cmd_pdu(priv
, REPLY_CT_KILL_CONFIG_CMD
,
1946 IWL_ERR(priv
, "REPLY_CT_KILL_CONFIG_CMD failed\n");
1948 IWL_DEBUG_INFO(priv
, "REPLY_CT_KILL_CONFIG_CMD "
1950 "critical temperature is %d\n",
1951 priv
->hw_params
.ct_kill_threshold
);
1955 static int iwlagn_send_calib_cfg_rt(struct iwl_priv
*priv
, u32 cfg
)
1957 struct iwl_calib_cfg_cmd calib_cfg_cmd
;
1958 struct iwl_host_cmd cmd
= {
1959 .id
= CALIBRATION_CFG_CMD
,
1960 .len
= { sizeof(struct iwl_calib_cfg_cmd
), },
1961 .data
= { &calib_cfg_cmd
, },
1964 memset(&calib_cfg_cmd
, 0, sizeof(calib_cfg_cmd
));
1965 calib_cfg_cmd
.ucd_calib_cfg
.once
.is_enable
= IWL_CALIB_INIT_CFG_ALL
;
1966 calib_cfg_cmd
.ucd_calib_cfg
.once
.start
= cpu_to_le32(cfg
);
1968 return iwl_send_cmd(priv
, &cmd
);
1973 * iwl_alive_start - called after REPLY_ALIVE notification received
1974 * from protocol/runtime uCode (initialization uCode's
1975 * Alive gets handled by iwl_init_alive_start()).
1977 int iwl_alive_start(struct iwl_priv
*priv
)
1980 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1982 iwl_reset_ict(priv
);
1984 IWL_DEBUG_INFO(priv
, "Runtime Alive received.\n");
1986 /* After the ALIVE response, we can send host commands to the uCode */
1987 set_bit(STATUS_ALIVE
, &priv
->status
);
1989 /* Enable watchdog to monitor the driver tx queues */
1990 iwl_setup_watchdog(priv
);
1992 if (iwl_is_rfkill(priv
))
1995 /* download priority table before any calibration request */
1996 if (priv
->cfg
->bt_params
&&
1997 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
1998 /* Configure Bluetooth device coexistence support */
1999 priv
->bt_valid
= IWLAGN_BT_ALL_VALID_MSK
;
2000 priv
->kill_ack_mask
= IWLAGN_BT_KILL_ACK_MASK_DEFAULT
;
2001 priv
->kill_cts_mask
= IWLAGN_BT_KILL_CTS_MASK_DEFAULT
;
2002 priv
->cfg
->ops
->hcmd
->send_bt_config(priv
);
2003 priv
->bt_valid
= IWLAGN_BT_VALID_ENABLE_FLAGS
;
2004 iwlagn_send_prio_tbl(priv
);
2006 /* FIXME: w/a to force change uCode BT state machine */
2007 ret
= iwlagn_send_bt_env(priv
, IWL_BT_COEX_ENV_OPEN
,
2008 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2
);
2011 ret
= iwlagn_send_bt_env(priv
, IWL_BT_COEX_ENV_CLOSE
,
2012 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2
);
2016 if (priv
->hw_params
.calib_rt_cfg
)
2017 iwlagn_send_calib_cfg_rt(priv
, priv
->hw_params
.calib_rt_cfg
);
2019 ieee80211_wake_queues(priv
->hw
);
2021 priv
->active_rate
= IWL_RATES_MASK
;
2023 /* Configure Tx antenna selection based on H/W config */
2024 if (priv
->cfg
->ops
->hcmd
->set_tx_ant
)
2025 priv
->cfg
->ops
->hcmd
->set_tx_ant(priv
, priv
->cfg
->valid_tx_ant
);
2027 if (iwl_is_associated_ctx(ctx
)) {
2028 struct iwl_rxon_cmd
*active_rxon
=
2029 (struct iwl_rxon_cmd
*)&ctx
->active
;
2030 /* apply any changes in staging */
2031 ctx
->staging
.filter_flags
|= RXON_FILTER_ASSOC_MSK
;
2032 active_rxon
->filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
2034 struct iwl_rxon_context
*tmp
;
2035 /* Initialize our rx_config data */
2036 for_each_context(priv
, tmp
)
2037 iwl_connection_init_rx_config(priv
, tmp
);
2039 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
2040 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
, ctx
);
2043 if (!priv
->cfg
->bt_params
|| (priv
->cfg
->bt_params
&&
2044 !priv
->cfg
->bt_params
->advanced_bt_coexist
)) {
2046 * default is 2-wire BT coexexistence support
2048 priv
->cfg
->ops
->hcmd
->send_bt_config(priv
);
2051 iwl_reset_run_time_calib(priv
);
2053 set_bit(STATUS_READY
, &priv
->status
);
2055 /* Configure the adapter for unassociated operation */
2056 ret
= iwlagn_commit_rxon(priv
, ctx
);
2060 /* At this point, the NIC is initialized and operational */
2061 iwl_rf_kill_ct_config(priv
);
2063 IWL_DEBUG_INFO(priv
, "ALIVE processing complete.\n");
2065 return iwl_power_update_mode(priv
, true);
2068 static void iwl_cancel_deferred_work(struct iwl_priv
*priv
);
2070 static void __iwl_down(struct iwl_priv
*priv
)
2074 IWL_DEBUG_INFO(priv
, DRV_NAME
" is going down\n");
2076 iwl_scan_cancel_timeout(priv
, 200);
2078 exit_pending
= test_and_set_bit(STATUS_EXIT_PENDING
, &priv
->status
);
2080 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2081 * to prevent rearm timer */
2082 del_timer_sync(&priv
->watchdog
);
2084 iwl_clear_ucode_stations(priv
, NULL
);
2085 iwl_dealloc_bcast_stations(priv
);
2086 iwl_clear_driver_stations(priv
);
2088 /* reset BT coex data */
2089 priv
->bt_status
= 0;
2090 if (priv
->cfg
->bt_params
)
2091 priv
->bt_traffic_load
=
2092 priv
->cfg
->bt_params
->bt_init_traffic_load
;
2094 priv
->bt_traffic_load
= 0;
2095 priv
->bt_full_concurrent
= false;
2096 priv
->bt_ci_compliance
= 0;
2098 /* Wipe out the EXIT_PENDING status bit if we are not actually
2099 * exiting the module */
2101 clear_bit(STATUS_EXIT_PENDING
, &priv
->status
);
2103 if (priv
->mac80211_registered
)
2104 ieee80211_stop_queues(priv
->hw
);
2106 /* Clear out all status bits but a few that are stable across reset */
2107 priv
->status
&= test_bit(STATUS_RF_KILL_HW
, &priv
->status
) <<
2109 test_bit(STATUS_GEO_CONFIGURED
, &priv
->status
) <<
2110 STATUS_GEO_CONFIGURED
|
2111 test_bit(STATUS_FW_ERROR
, &priv
->status
) <<
2113 test_bit(STATUS_EXIT_PENDING
, &priv
->status
) <<
2114 STATUS_EXIT_PENDING
;
2116 iwlagn_stop_device(priv
);
2118 dev_kfree_skb(priv
->beacon_skb
);
2119 priv
->beacon_skb
= NULL
;
2122 static void iwl_down(struct iwl_priv
*priv
)
2124 mutex_lock(&priv
->mutex
);
2126 mutex_unlock(&priv
->mutex
);
2128 iwl_cancel_deferred_work(priv
);
2131 #define HW_READY_TIMEOUT (50)
2133 /* Note: returns poll_bit return value, which is >= 0 if success */
2134 static int iwl_set_hw_ready(struct iwl_priv
*priv
)
2138 iwl_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
2139 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
);
2141 /* See if we got it */
2142 ret
= iwl_poll_bit(priv
, CSR_HW_IF_CONFIG_REG
,
2143 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
2144 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY
,
2147 IWL_DEBUG_INFO(priv
, "hardware%s ready\n", ret
< 0 ? " not" : "");
2151 /* Note: returns standard 0/-ERROR code */
2152 int iwl_prepare_card_hw(struct iwl_priv
*priv
)
2156 IWL_DEBUG_INFO(priv
, "iwl_prepare_card_hw enter\n");
2158 ret
= iwl_set_hw_ready(priv
);
2162 /* If HW is not ready, prepare the conditions to check again */
2163 iwl_set_bit(priv
, CSR_HW_IF_CONFIG_REG
,
2164 CSR_HW_IF_CONFIG_REG_PREPARE
);
2166 ret
= iwl_poll_bit(priv
, CSR_HW_IF_CONFIG_REG
,
2167 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
,
2168 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE
, 150000);
2173 /* HW should be ready by now, check again. */
2174 ret
= iwl_set_hw_ready(priv
);
2180 #define MAX_HW_RESTARTS 5
2182 static int __iwl_up(struct iwl_priv
*priv
)
2184 struct iwl_rxon_context
*ctx
;
2187 lockdep_assert_held(&priv
->mutex
);
2189 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
2190 IWL_WARN(priv
, "Exit pending; will not bring the NIC up\n");
2194 for_each_context(priv
, ctx
) {
2195 ret
= iwlagn_alloc_bcast_station(priv
, ctx
);
2197 iwl_dealloc_bcast_stations(priv
);
2202 ret
= iwlagn_run_init_ucode(priv
);
2204 IWL_ERR(priv
, "Failed to run INIT ucode: %d\n", ret
);
2208 ret
= iwlagn_load_ucode_wait_alive(priv
,
2212 IWL_ERR(priv
, "Failed to start RT ucode: %d\n", ret
);
2216 ret
= iwl_alive_start(priv
);
2222 set_bit(STATUS_EXIT_PENDING
, &priv
->status
);
2224 clear_bit(STATUS_EXIT_PENDING
, &priv
->status
);
2226 IWL_ERR(priv
, "Unable to initialize device.\n");
2231 /*****************************************************************************
2233 * Workqueue callbacks
2235 *****************************************************************************/
2237 static void iwl_bg_run_time_calib_work(struct work_struct
*work
)
2239 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
,
2240 run_time_calib_work
);
2242 mutex_lock(&priv
->mutex
);
2244 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
) ||
2245 test_bit(STATUS_SCANNING
, &priv
->status
)) {
2246 mutex_unlock(&priv
->mutex
);
2250 if (priv
->start_calib
) {
2251 iwl_chain_noise_calibration(priv
);
2252 iwl_sensitivity_calibration(priv
);
2255 mutex_unlock(&priv
->mutex
);
2258 static void iwlagn_prepare_restart(struct iwl_priv
*priv
)
2260 struct iwl_rxon_context
*ctx
;
2261 bool bt_full_concurrent
;
2262 u8 bt_ci_compliance
;
2266 lockdep_assert_held(&priv
->mutex
);
2268 for_each_context(priv
, ctx
)
2273 * __iwl_down() will clear the BT status variables,
2274 * which is correct, but when we restart we really
2275 * want to keep them so restore them afterwards.
2277 * The restart process will later pick them up and
2278 * re-configure the hw when we reconfigure the BT
2281 bt_full_concurrent
= priv
->bt_full_concurrent
;
2282 bt_ci_compliance
= priv
->bt_ci_compliance
;
2283 bt_load
= priv
->bt_traffic_load
;
2284 bt_status
= priv
->bt_status
;
2288 priv
->bt_full_concurrent
= bt_full_concurrent
;
2289 priv
->bt_ci_compliance
= bt_ci_compliance
;
2290 priv
->bt_traffic_load
= bt_load
;
2291 priv
->bt_status
= bt_status
;
2294 static void iwl_bg_restart(struct work_struct
*data
)
2296 struct iwl_priv
*priv
= container_of(data
, struct iwl_priv
, restart
);
2298 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2301 if (test_and_clear_bit(STATUS_FW_ERROR
, &priv
->status
)) {
2302 mutex_lock(&priv
->mutex
);
2303 iwlagn_prepare_restart(priv
);
2304 mutex_unlock(&priv
->mutex
);
2305 iwl_cancel_deferred_work(priv
);
2306 ieee80211_restart_hw(priv
->hw
);
2312 static void iwl_bg_rx_replenish(struct work_struct
*data
)
2314 struct iwl_priv
*priv
=
2315 container_of(data
, struct iwl_priv
, rx_replenish
);
2317 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2320 mutex_lock(&priv
->mutex
);
2321 iwlagn_rx_replenish(priv
);
2322 mutex_unlock(&priv
->mutex
);
2325 static int iwl_mac_offchannel_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
,
2326 struct ieee80211_channel
*chan
,
2327 enum nl80211_channel_type channel_type
,
2330 struct iwl_priv
*priv
= hw
->priv
;
2333 /* Not supported if we don't have PAN */
2334 if (!(priv
->valid_contexts
& BIT(IWL_RXON_CTX_PAN
))) {
2339 /* Not supported on pre-P2P firmware */
2340 if (!(priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
&
2341 BIT(NL80211_IFTYPE_P2P_CLIENT
))) {
2346 mutex_lock(&priv
->mutex
);
2348 if (!priv
->contexts
[IWL_RXON_CTX_PAN
].is_active
) {
2350 * If the PAN context is free, use the normal
2351 * way of doing remain-on-channel offload + TX.
2357 /* TODO: queue up if scanning? */
2358 if (test_bit(STATUS_SCANNING
, &priv
->status
) ||
2359 priv
->_agn
.offchan_tx_skb
) {
2365 * max_scan_ie_len doesn't include the blank SSID or the header,
2366 * so need to add that again here.
2368 if (skb
->len
> hw
->wiphy
->max_scan_ie_len
+ 24 + 2) {
2373 priv
->_agn
.offchan_tx_skb
= skb
;
2374 priv
->_agn
.offchan_tx_timeout
= wait
;
2375 priv
->_agn
.offchan_tx_chan
= chan
;
2377 ret
= iwl_scan_initiate(priv
, priv
->contexts
[IWL_RXON_CTX_PAN
].vif
,
2378 IWL_SCAN_OFFCH_TX
, chan
->band
);
2380 priv
->_agn
.offchan_tx_skb
= NULL
;
2382 mutex_unlock(&priv
->mutex
);
2390 static int iwl_mac_offchannel_tx_cancel_wait(struct ieee80211_hw
*hw
)
2392 struct iwl_priv
*priv
= hw
->priv
;
2395 mutex_lock(&priv
->mutex
);
2397 if (!priv
->_agn
.offchan_tx_skb
) {
2402 priv
->_agn
.offchan_tx_skb
= NULL
;
2404 ret
= iwl_scan_cancel_timeout(priv
, 200);
2408 mutex_unlock(&priv
->mutex
);
2413 /*****************************************************************************
2415 * mac80211 entry point functions
2417 *****************************************************************************/
2419 static const struct ieee80211_iface_limit iwlagn_sta_ap_limits
[] = {
2422 .types
= BIT(NL80211_IFTYPE_STATION
),
2426 .types
= BIT(NL80211_IFTYPE_AP
),
2430 static const struct ieee80211_iface_limit iwlagn_2sta_limits
[] = {
2433 .types
= BIT(NL80211_IFTYPE_STATION
),
2437 static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits
[] = {
2440 .types
= BIT(NL80211_IFTYPE_STATION
),
2444 .types
= BIT(NL80211_IFTYPE_P2P_GO
) |
2445 BIT(NL80211_IFTYPE_AP
),
2449 static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits
[] = {
2452 .types
= BIT(NL80211_IFTYPE_STATION
),
2456 .types
= BIT(NL80211_IFTYPE_P2P_CLIENT
),
2460 static const struct ieee80211_iface_combination
2461 iwlagn_iface_combinations_dualmode
[] = {
2462 { .num_different_channels
= 1,
2463 .max_interfaces
= 2,
2464 .beacon_int_infra_match
= true,
2465 .limits
= iwlagn_sta_ap_limits
,
2466 .n_limits
= ARRAY_SIZE(iwlagn_sta_ap_limits
),
2468 { .num_different_channels
= 1,
2469 .max_interfaces
= 2,
2470 .limits
= iwlagn_2sta_limits
,
2471 .n_limits
= ARRAY_SIZE(iwlagn_2sta_limits
),
2475 static const struct ieee80211_iface_combination
2476 iwlagn_iface_combinations_p2p
[] = {
2477 { .num_different_channels
= 1,
2478 .max_interfaces
= 2,
2479 .beacon_int_infra_match
= true,
2480 .limits
= iwlagn_p2p_sta_go_limits
,
2481 .n_limits
= ARRAY_SIZE(iwlagn_p2p_sta_go_limits
),
2483 { .num_different_channels
= 1,
2484 .max_interfaces
= 2,
2485 .limits
= iwlagn_p2p_2sta_limits
,
2486 .n_limits
= ARRAY_SIZE(iwlagn_p2p_2sta_limits
),
2491 * Not a mac80211 entry point function, but it fits in with all the
2492 * other mac80211 functions grouped here.
2494 static int iwl_mac_setup_register(struct iwl_priv
*priv
,
2495 struct iwlagn_ucode_capabilities
*capa
)
2498 struct ieee80211_hw
*hw
= priv
->hw
;
2499 struct iwl_rxon_context
*ctx
;
2501 hw
->rate_control_algorithm
= "iwl-agn-rs";
2503 /* Tell mac80211 our characteristics */
2504 hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
2505 IEEE80211_HW_AMPDU_AGGREGATION
|
2506 IEEE80211_HW_NEED_DTIM_PERIOD
|
2507 IEEE80211_HW_SPECTRUM_MGMT
|
2508 IEEE80211_HW_REPORTS_TX_ACK_STATUS
;
2510 hw
->max_tx_aggregation_subframes
= LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
2512 hw
->flags
|= IEEE80211_HW_SUPPORTS_PS
|
2513 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
;
2515 if (priv
->cfg
->sku
& EEPROM_SKU_CAP_11N_ENABLE
)
2516 hw
->flags
|= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS
|
2517 IEEE80211_HW_SUPPORTS_STATIC_SMPS
;
2519 if (capa
->flags
& IWL_UCODE_TLV_FLAGS_MFP
)
2520 hw
->flags
|= IEEE80211_HW_MFP_CAPABLE
;
2522 hw
->sta_data_size
= sizeof(struct iwl_station_priv
);
2523 hw
->vif_data_size
= sizeof(struct iwl_vif_priv
);
2525 for_each_context(priv
, ctx
) {
2526 hw
->wiphy
->interface_modes
|= ctx
->interface_modes
;
2527 hw
->wiphy
->interface_modes
|= ctx
->exclusive_interface_modes
;
2530 BUILD_BUG_ON(NUM_IWL_RXON_CTX
!= 2);
2532 if (hw
->wiphy
->interface_modes
& BIT(NL80211_IFTYPE_P2P_CLIENT
)) {
2533 hw
->wiphy
->iface_combinations
= iwlagn_iface_combinations_p2p
;
2534 hw
->wiphy
->n_iface_combinations
=
2535 ARRAY_SIZE(iwlagn_iface_combinations_p2p
);
2536 } else if (hw
->wiphy
->interface_modes
& BIT(NL80211_IFTYPE_AP
)) {
2537 hw
->wiphy
->iface_combinations
= iwlagn_iface_combinations_dualmode
;
2538 hw
->wiphy
->n_iface_combinations
=
2539 ARRAY_SIZE(iwlagn_iface_combinations_dualmode
);
2542 hw
->wiphy
->max_remain_on_channel_duration
= 1000;
2544 hw
->wiphy
->flags
|= WIPHY_FLAG_CUSTOM_REGULATORY
|
2545 WIPHY_FLAG_DISABLE_BEACON_HINTS
|
2546 WIPHY_FLAG_IBSS_RSN
;
2549 * For now, disable PS by default because it affects
2550 * RX performance significantly.
2552 hw
->wiphy
->flags
&= ~WIPHY_FLAG_PS_ON_BY_DEFAULT
;
2554 hw
->wiphy
->max_scan_ssids
= PROBE_OPTION_MAX
;
2555 /* we create the 802.11 header and a zero-length SSID element */
2556 hw
->wiphy
->max_scan_ie_len
= capa
->max_probe_length
- 24 - 2;
2558 /* Default value; 4 EDCA QOS priorities */
2561 hw
->max_listen_interval
= IWL_CONN_MAX_LISTEN_INTERVAL
;
2563 if (priv
->bands
[IEEE80211_BAND_2GHZ
].n_channels
)
2564 priv
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
2565 &priv
->bands
[IEEE80211_BAND_2GHZ
];
2566 if (priv
->bands
[IEEE80211_BAND_5GHZ
].n_channels
)
2567 priv
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
2568 &priv
->bands
[IEEE80211_BAND_5GHZ
];
2570 iwl_leds_init(priv
);
2572 ret
= ieee80211_register_hw(priv
->hw
);
2574 IWL_ERR(priv
, "Failed to register hw (error %d)\n", ret
);
2577 priv
->mac80211_registered
= 1;
2583 static int iwlagn_mac_start(struct ieee80211_hw
*hw
)
2585 struct iwl_priv
*priv
= hw
->priv
;
2588 IWL_DEBUG_MAC80211(priv
, "enter\n");
2590 /* we should be verifying the device is ready to be opened */
2591 mutex_lock(&priv
->mutex
);
2592 ret
= __iwl_up(priv
);
2593 mutex_unlock(&priv
->mutex
);
2597 IWL_DEBUG_INFO(priv
, "Start UP work done.\n");
2599 /* Now we should be done, and the READY bit should be set. */
2600 if (WARN_ON(!test_bit(STATUS_READY
, &priv
->status
)))
2603 iwlagn_led_enable(priv
);
2606 IWL_DEBUG_MAC80211(priv
, "leave\n");
2610 static void iwlagn_mac_stop(struct ieee80211_hw
*hw
)
2612 struct iwl_priv
*priv
= hw
->priv
;
2614 IWL_DEBUG_MAC80211(priv
, "enter\n");
2623 flush_workqueue(priv
->workqueue
);
2625 /* User space software may expect getting rfkill changes
2626 * even if interface is down */
2627 iwl_write32(priv
, CSR_INT
, 0xFFFFFFFF);
2628 iwl_enable_rfkill_int(priv
);
2630 IWL_DEBUG_MAC80211(priv
, "leave\n");
2633 static void iwlagn_mac_tx(struct ieee80211_hw
*hw
, struct sk_buff
*skb
)
2635 struct iwl_priv
*priv
= hw
->priv
;
2637 IWL_DEBUG_MACDUMP(priv
, "enter\n");
2639 IWL_DEBUG_TX(priv
, "dev->xmit(%d bytes) at rate 0x%02x\n", skb
->len
,
2640 ieee80211_get_tx_rate(hw
, IEEE80211_SKB_CB(skb
))->bitrate
);
2642 if (iwlagn_tx_skb(priv
, skb
))
2643 dev_kfree_skb_any(skb
);
2645 IWL_DEBUG_MACDUMP(priv
, "leave\n");
2648 static void iwlagn_mac_update_tkip_key(struct ieee80211_hw
*hw
,
2649 struct ieee80211_vif
*vif
,
2650 struct ieee80211_key_conf
*keyconf
,
2651 struct ieee80211_sta
*sta
,
2652 u32 iv32
, u16
*phase1key
)
2654 struct iwl_priv
*priv
= hw
->priv
;
2655 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2657 IWL_DEBUG_MAC80211(priv
, "enter\n");
2659 iwl_update_tkip_key(priv
, vif_priv
->ctx
, keyconf
, sta
,
2662 IWL_DEBUG_MAC80211(priv
, "leave\n");
2665 static int iwlagn_mac_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
2666 struct ieee80211_vif
*vif
,
2667 struct ieee80211_sta
*sta
,
2668 struct ieee80211_key_conf
*key
)
2670 struct iwl_priv
*priv
= hw
->priv
;
2671 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2672 struct iwl_rxon_context
*ctx
= vif_priv
->ctx
;
2675 bool is_default_wep_key
= false;
2677 IWL_DEBUG_MAC80211(priv
, "enter\n");
2679 if (iwlagn_mod_params
.sw_crypto
) {
2680 IWL_DEBUG_MAC80211(priv
, "leave - hwcrypto disabled\n");
2685 * To support IBSS RSN, don't program group keys in IBSS, the
2686 * hardware will then not attempt to decrypt the frames.
2688 if (vif
->type
== NL80211_IFTYPE_ADHOC
&&
2689 !(key
->flags
& IEEE80211_KEY_FLAG_PAIRWISE
))
2692 sta_id
= iwl_sta_id_or_broadcast(priv
, vif_priv
->ctx
, sta
);
2693 if (sta_id
== IWL_INVALID_STATION
)
2696 mutex_lock(&priv
->mutex
);
2697 iwl_scan_cancel_timeout(priv
, 100);
2700 * If we are getting WEP group key and we didn't receive any key mapping
2701 * so far, we are in legacy wep mode (group key only), otherwise we are
2703 * In legacy wep mode, we use another host command to the uCode.
2705 if ((key
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
2706 key
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
2709 is_default_wep_key
= !ctx
->key_mapping_keys
;
2711 is_default_wep_key
=
2712 (key
->hw_key_idx
== HW_KEY_DEFAULT
);
2717 if (is_default_wep_key
)
2718 ret
= iwl_set_default_wep_key(priv
, vif_priv
->ctx
, key
);
2720 ret
= iwl_set_dynamic_key(priv
, vif_priv
->ctx
,
2723 IWL_DEBUG_MAC80211(priv
, "enable hwcrypto key\n");
2726 if (is_default_wep_key
)
2727 ret
= iwl_remove_default_wep_key(priv
, ctx
, key
);
2729 ret
= iwl_remove_dynamic_key(priv
, ctx
, key
, sta_id
);
2731 IWL_DEBUG_MAC80211(priv
, "disable hwcrypto key\n");
2737 mutex_unlock(&priv
->mutex
);
2738 IWL_DEBUG_MAC80211(priv
, "leave\n");
2743 static int iwlagn_mac_ampdu_action(struct ieee80211_hw
*hw
,
2744 struct ieee80211_vif
*vif
,
2745 enum ieee80211_ampdu_mlme_action action
,
2746 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
2749 struct iwl_priv
*priv
= hw
->priv
;
2751 struct iwl_station_priv
*sta_priv
= (void *) sta
->drv_priv
;
2753 IWL_DEBUG_HT(priv
, "A-MPDU action on addr %pM tid %d\n",
2756 if (!(priv
->cfg
->sku
& EEPROM_SKU_CAP_11N_ENABLE
))
2759 mutex_lock(&priv
->mutex
);
2762 case IEEE80211_AMPDU_RX_START
:
2763 IWL_DEBUG_HT(priv
, "start Rx\n");
2764 ret
= iwl_sta_rx_agg_start(priv
, sta
, tid
, *ssn
);
2766 case IEEE80211_AMPDU_RX_STOP
:
2767 IWL_DEBUG_HT(priv
, "stop Rx\n");
2768 ret
= iwl_sta_rx_agg_stop(priv
, sta
, tid
);
2769 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2772 case IEEE80211_AMPDU_TX_START
:
2773 IWL_DEBUG_HT(priv
, "start Tx\n");
2774 ret
= iwlagn_tx_agg_start(priv
, vif
, sta
, tid
, ssn
);
2776 priv
->_agn
.agg_tids_count
++;
2777 IWL_DEBUG_HT(priv
, "priv->_agn.agg_tids_count = %u\n",
2778 priv
->_agn
.agg_tids_count
);
2781 case IEEE80211_AMPDU_TX_STOP
:
2782 IWL_DEBUG_HT(priv
, "stop Tx\n");
2783 ret
= iwlagn_tx_agg_stop(priv
, vif
, sta
, tid
);
2784 if ((ret
== 0) && (priv
->_agn
.agg_tids_count
> 0)) {
2785 priv
->_agn
.agg_tids_count
--;
2786 IWL_DEBUG_HT(priv
, "priv->_agn.agg_tids_count = %u\n",
2787 priv
->_agn
.agg_tids_count
);
2789 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
))
2791 if (priv
->cfg
->ht_params
&&
2792 priv
->cfg
->ht_params
->use_rts_for_aggregation
) {
2794 * switch off RTS/CTS if it was previously enabled
2796 sta_priv
->lq_sta
.lq
.general_params
.flags
&=
2797 ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK
;
2798 iwl_send_lq_cmd(priv
, iwl_rxon_ctx_from_vif(vif
),
2799 &sta_priv
->lq_sta
.lq
, CMD_ASYNC
, false);
2802 case IEEE80211_AMPDU_TX_OPERATIONAL
:
2803 buf_size
= min_t(int, buf_size
, LINK_QUAL_AGG_FRAME_LIMIT_DEF
);
2805 iwlagn_txq_agg_queue_setup(priv
, sta
, tid
, buf_size
);
2808 * If the limit is 0, then it wasn't initialised yet,
2809 * use the default. We can do that since we take the
2810 * minimum below, and we don't want to go above our
2811 * default due to hardware restrictions.
2813 if (sta_priv
->max_agg_bufsize
== 0)
2814 sta_priv
->max_agg_bufsize
=
2815 LINK_QUAL_AGG_FRAME_LIMIT_DEF
;
2818 * Even though in theory the peer could have different
2819 * aggregation reorder buffer sizes for different sessions,
2820 * our ucode doesn't allow for that and has a global limit
2821 * for each station. Therefore, use the minimum of all the
2822 * aggregation sessions and our default value.
2824 sta_priv
->max_agg_bufsize
=
2825 min(sta_priv
->max_agg_bufsize
, buf_size
);
2827 if (priv
->cfg
->ht_params
&&
2828 priv
->cfg
->ht_params
->use_rts_for_aggregation
) {
2830 * switch to RTS/CTS if it is the prefer protection
2831 * method for HT traffic
2834 sta_priv
->lq_sta
.lq
.general_params
.flags
|=
2835 LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK
;
2838 sta_priv
->lq_sta
.lq
.agg_params
.agg_frame_cnt_limit
=
2839 sta_priv
->max_agg_bufsize
;
2841 iwl_send_lq_cmd(priv
, iwl_rxon_ctx_from_vif(vif
),
2842 &sta_priv
->lq_sta
.lq
, CMD_ASYNC
, false);
2844 IWL_INFO(priv
, "Tx aggregation enabled on ra = %pM tid = %d\n",
2849 mutex_unlock(&priv
->mutex
);
2854 static int iwlagn_mac_sta_add(struct ieee80211_hw
*hw
,
2855 struct ieee80211_vif
*vif
,
2856 struct ieee80211_sta
*sta
)
2858 struct iwl_priv
*priv
= hw
->priv
;
2859 struct iwl_station_priv
*sta_priv
= (void *)sta
->drv_priv
;
2860 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
2861 bool is_ap
= vif
->type
== NL80211_IFTYPE_STATION
;
2865 IWL_DEBUG_INFO(priv
, "received request to add station %pM\n",
2867 mutex_lock(&priv
->mutex
);
2868 IWL_DEBUG_INFO(priv
, "proceeding to add station %pM\n",
2870 sta_priv
->common
.sta_id
= IWL_INVALID_STATION
;
2872 atomic_set(&sta_priv
->pending_frames
, 0);
2873 if (vif
->type
== NL80211_IFTYPE_AP
)
2874 sta_priv
->client
= true;
2876 ret
= iwl_add_station_common(priv
, vif_priv
->ctx
, sta
->addr
,
2877 is_ap
, sta
, &sta_id
);
2879 IWL_ERR(priv
, "Unable to add station %pM (%d)\n",
2881 /* Should we return success if return code is EEXIST ? */
2882 mutex_unlock(&priv
->mutex
);
2886 sta_priv
->common
.sta_id
= sta_id
;
2888 /* Initialize rate scaling */
2889 IWL_DEBUG_INFO(priv
, "Initializing rate scaling for station %pM\n",
2891 iwl_rs_rate_init(priv
, sta
, sta_id
);
2892 mutex_unlock(&priv
->mutex
);
2897 static void iwlagn_mac_channel_switch(struct ieee80211_hw
*hw
,
2898 struct ieee80211_channel_switch
*ch_switch
)
2900 struct iwl_priv
*priv
= hw
->priv
;
2901 const struct iwl_channel_info
*ch_info
;
2902 struct ieee80211_conf
*conf
= &hw
->conf
;
2903 struct ieee80211_channel
*channel
= ch_switch
->channel
;
2904 struct iwl_ht_config
*ht_conf
= &priv
->current_ht_config
;
2907 * When we add support for multiple interfaces, we need to
2908 * revisit this. The channel switch command in the device
2909 * only affects the BSS context, but what does that really
2910 * mean? And what if we get a CSA on the second interface?
2911 * This needs a lot of work.
2913 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
2916 IWL_DEBUG_MAC80211(priv
, "enter\n");
2918 mutex_lock(&priv
->mutex
);
2920 if (iwl_is_rfkill(priv
))
2923 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
) ||
2924 test_bit(STATUS_SCANNING
, &priv
->status
) ||
2925 test_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->status
))
2928 if (!iwl_is_associated_ctx(ctx
))
2931 if (!priv
->cfg
->ops
->lib
->set_channel_switch
)
2934 ch
= channel
->hw_value
;
2935 if (le16_to_cpu(ctx
->active
.channel
) == ch
)
2938 ch_info
= iwl_get_channel_info(priv
, channel
->band
, ch
);
2939 if (!is_channel_valid(ch_info
)) {
2940 IWL_DEBUG_MAC80211(priv
, "invalid channel\n");
2944 spin_lock_irq(&priv
->lock
);
2946 priv
->current_ht_config
.smps
= conf
->smps_mode
;
2948 /* Configure HT40 channels */
2949 ctx
->ht
.enabled
= conf_is_ht(conf
);
2950 if (ctx
->ht
.enabled
) {
2951 if (conf_is_ht40_minus(conf
)) {
2952 ctx
->ht
.extension_chan_offset
=
2953 IEEE80211_HT_PARAM_CHA_SEC_BELOW
;
2954 ctx
->ht
.is_40mhz
= true;
2955 } else if (conf_is_ht40_plus(conf
)) {
2956 ctx
->ht
.extension_chan_offset
=
2957 IEEE80211_HT_PARAM_CHA_SEC_ABOVE
;
2958 ctx
->ht
.is_40mhz
= true;
2960 ctx
->ht
.extension_chan_offset
=
2961 IEEE80211_HT_PARAM_CHA_SEC_NONE
;
2962 ctx
->ht
.is_40mhz
= false;
2965 ctx
->ht
.is_40mhz
= false;
2967 if ((le16_to_cpu(ctx
->staging
.channel
) != ch
))
2968 ctx
->staging
.flags
= 0;
2970 iwl_set_rxon_channel(priv
, channel
, ctx
);
2971 iwl_set_rxon_ht(priv
, ht_conf
);
2972 iwl_set_flags_for_band(priv
, ctx
, channel
->band
, ctx
->vif
);
2974 spin_unlock_irq(&priv
->lock
);
2978 * at this point, staging_rxon has the
2979 * configuration for channel switch
2981 set_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->status
);
2982 priv
->switch_channel
= cpu_to_le16(ch
);
2983 if (priv
->cfg
->ops
->lib
->set_channel_switch(priv
, ch_switch
)) {
2984 clear_bit(STATUS_CHANNEL_SWITCH_PENDING
, &priv
->status
);
2985 priv
->switch_channel
= 0;
2986 ieee80211_chswitch_done(ctx
->vif
, false);
2990 mutex_unlock(&priv
->mutex
);
2991 IWL_DEBUG_MAC80211(priv
, "leave\n");
2994 static void iwlagn_configure_filter(struct ieee80211_hw
*hw
,
2995 unsigned int changed_flags
,
2996 unsigned int *total_flags
,
2999 struct iwl_priv
*priv
= hw
->priv
;
3000 __le32 filter_or
= 0, filter_nand
= 0;
3001 struct iwl_rxon_context
*ctx
;
3003 #define CHK(test, flag) do { \
3004 if (*total_flags & (test)) \
3005 filter_or |= (flag); \
3007 filter_nand |= (flag); \
3010 IWL_DEBUG_MAC80211(priv
, "Enter: changed: 0x%x, total: 0x%x\n",
3011 changed_flags
, *total_flags
);
3013 CHK(FIF_OTHER_BSS
| FIF_PROMISC_IN_BSS
, RXON_FILTER_PROMISC_MSK
);
3014 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
3015 CHK(FIF_CONTROL
, RXON_FILTER_CTL2HOST_MSK
| RXON_FILTER_PROMISC_MSK
);
3016 CHK(FIF_BCN_PRBRESP_PROMISC
, RXON_FILTER_BCON_AWARE_MSK
);
3020 mutex_lock(&priv
->mutex
);
3022 for_each_context(priv
, ctx
) {
3023 ctx
->staging
.filter_flags
&= ~filter_nand
;
3024 ctx
->staging
.filter_flags
|= filter_or
;
3027 * Not committing directly because hardware can perform a scan,
3028 * but we'll eventually commit the filter flags change anyway.
3032 mutex_unlock(&priv
->mutex
);
3035 * Receiving all multicast frames is always enabled by the
3036 * default flags setup in iwl_connection_init_rx_config()
3037 * since we currently do not support programming multicast
3038 * filters into the device.
3040 *total_flags
&= FIF_OTHER_BSS
| FIF_ALLMULTI
| FIF_PROMISC_IN_BSS
|
3041 FIF_BCN_PRBRESP_PROMISC
| FIF_CONTROL
;
3044 static void iwlagn_mac_flush(struct ieee80211_hw
*hw
, bool drop
)
3046 struct iwl_priv
*priv
= hw
->priv
;
3048 mutex_lock(&priv
->mutex
);
3049 IWL_DEBUG_MAC80211(priv
, "enter\n");
3051 if (test_bit(STATUS_EXIT_PENDING
, &priv
->status
)) {
3052 IWL_DEBUG_TX(priv
, "Aborting flush due to device shutdown\n");
3055 if (iwl_is_rfkill(priv
)) {
3056 IWL_DEBUG_TX(priv
, "Aborting flush due to RF Kill\n");
3061 * mac80211 will not push any more frames for transmit
3062 * until the flush is completed
3065 IWL_DEBUG_MAC80211(priv
, "send flush command\n");
3066 if (iwlagn_txfifo_flush(priv
, IWL_DROP_ALL
)) {
3067 IWL_ERR(priv
, "flush request fail\n");
3071 IWL_DEBUG_MAC80211(priv
, "wait transmit/flush all frames\n");
3072 iwlagn_wait_tx_queue_empty(priv
);
3074 mutex_unlock(&priv
->mutex
);
3075 IWL_DEBUG_MAC80211(priv
, "leave\n");
3078 static void iwlagn_disable_roc(struct iwl_priv
*priv
)
3080 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_PAN
];
3081 struct ieee80211_channel
*chan
= ACCESS_ONCE(priv
->hw
->conf
.channel
);
3083 lockdep_assert_held(&priv
->mutex
);
3085 if (!ctx
->is_active
)
3088 ctx
->staging
.dev_type
= RXON_DEV_TYPE_2STA
;
3089 ctx
->staging
.filter_flags
&= ~RXON_FILTER_ASSOC_MSK
;
3090 iwl_set_rxon_channel(priv
, chan
, ctx
);
3091 iwl_set_flags_for_band(priv
, ctx
, chan
->band
, NULL
);
3093 priv
->_agn
.hw_roc_channel
= NULL
;
3095 iwlagn_commit_rxon(priv
, ctx
);
3097 ctx
->is_active
= false;
3100 static void iwlagn_bg_roc_done(struct work_struct
*work
)
3102 struct iwl_priv
*priv
= container_of(work
, struct iwl_priv
,
3103 _agn
.hw_roc_work
.work
);
3105 mutex_lock(&priv
->mutex
);
3106 ieee80211_remain_on_channel_expired(priv
->hw
);
3107 iwlagn_disable_roc(priv
);
3108 mutex_unlock(&priv
->mutex
);
3111 static int iwl_mac_remain_on_channel(struct ieee80211_hw
*hw
,
3112 struct ieee80211_channel
*channel
,
3113 enum nl80211_channel_type channel_type
,
3116 struct iwl_priv
*priv
= hw
->priv
;
3119 if (!(priv
->valid_contexts
& BIT(IWL_RXON_CTX_PAN
)))
3122 if (!(priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
&
3123 BIT(NL80211_IFTYPE_P2P_CLIENT
)))
3126 mutex_lock(&priv
->mutex
);
3128 if (priv
->contexts
[IWL_RXON_CTX_PAN
].is_active
||
3129 test_bit(STATUS_SCAN_HW
, &priv
->status
)) {
3134 priv
->contexts
[IWL_RXON_CTX_PAN
].is_active
= true;
3135 priv
->_agn
.hw_roc_channel
= channel
;
3136 priv
->_agn
.hw_roc_chantype
= channel_type
;
3137 priv
->_agn
.hw_roc_duration
= DIV_ROUND_UP(duration
* 1000, 1024);
3138 iwlagn_commit_rxon(priv
, &priv
->contexts
[IWL_RXON_CTX_PAN
]);
3139 queue_delayed_work(priv
->workqueue
, &priv
->_agn
.hw_roc_work
,
3140 msecs_to_jiffies(duration
+ 20));
3142 msleep(IWL_MIN_SLOT_TIME
); /* TU is almost ms */
3143 ieee80211_ready_on_channel(priv
->hw
);
3146 mutex_unlock(&priv
->mutex
);
3151 static int iwl_mac_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
3153 struct iwl_priv
*priv
= hw
->priv
;
3155 if (!(priv
->valid_contexts
& BIT(IWL_RXON_CTX_PAN
)))
3158 cancel_delayed_work_sync(&priv
->_agn
.hw_roc_work
);
3160 mutex_lock(&priv
->mutex
);
3161 iwlagn_disable_roc(priv
);
3162 mutex_unlock(&priv
->mutex
);
3167 /*****************************************************************************
3169 * driver setup and teardown
3171 *****************************************************************************/
3173 static void iwl_setup_deferred_work(struct iwl_priv
*priv
)
3175 priv
->workqueue
= create_singlethread_workqueue(DRV_NAME
);
3177 init_waitqueue_head(&priv
->wait_command_queue
);
3179 INIT_WORK(&priv
->restart
, iwl_bg_restart
);
3180 INIT_WORK(&priv
->rx_replenish
, iwl_bg_rx_replenish
);
3181 INIT_WORK(&priv
->beacon_update
, iwl_bg_beacon_update
);
3182 INIT_WORK(&priv
->run_time_calib_work
, iwl_bg_run_time_calib_work
);
3183 INIT_WORK(&priv
->tx_flush
, iwl_bg_tx_flush
);
3184 INIT_WORK(&priv
->bt_full_concurrency
, iwl_bg_bt_full_concurrency
);
3185 INIT_WORK(&priv
->bt_runtime_config
, iwl_bg_bt_runtime_config
);
3186 INIT_DELAYED_WORK(&priv
->_agn
.hw_roc_work
, iwlagn_bg_roc_done
);
3188 iwl_setup_scan_deferred_work(priv
);
3190 if (priv
->cfg
->ops
->lib
->setup_deferred_work
)
3191 priv
->cfg
->ops
->lib
->setup_deferred_work(priv
);
3193 init_timer(&priv
->statistics_periodic
);
3194 priv
->statistics_periodic
.data
= (unsigned long)priv
;
3195 priv
->statistics_periodic
.function
= iwl_bg_statistics_periodic
;
3197 init_timer(&priv
->ucode_trace
);
3198 priv
->ucode_trace
.data
= (unsigned long)priv
;
3199 priv
->ucode_trace
.function
= iwl_bg_ucode_trace
;
3201 init_timer(&priv
->watchdog
);
3202 priv
->watchdog
.data
= (unsigned long)priv
;
3203 priv
->watchdog
.function
= iwl_bg_watchdog
;
3205 tasklet_init(&priv
->irq_tasklet
, (void (*)(unsigned long))
3206 iwl_irq_tasklet
, (unsigned long)priv
);
3209 static void iwl_cancel_deferred_work(struct iwl_priv
*priv
)
3211 if (priv
->cfg
->ops
->lib
->cancel_deferred_work
)
3212 priv
->cfg
->ops
->lib
->cancel_deferred_work(priv
);
3214 cancel_work_sync(&priv
->run_time_calib_work
);
3215 cancel_work_sync(&priv
->beacon_update
);
3217 iwl_cancel_scan_deferred_work(priv
);
3219 cancel_work_sync(&priv
->bt_full_concurrency
);
3220 cancel_work_sync(&priv
->bt_runtime_config
);
3222 del_timer_sync(&priv
->statistics_periodic
);
3223 del_timer_sync(&priv
->ucode_trace
);
3226 static void iwl_init_hw_rates(struct iwl_priv
*priv
,
3227 struct ieee80211_rate
*rates
)
3231 for (i
= 0; i
< IWL_RATE_COUNT_LEGACY
; i
++) {
3232 rates
[i
].bitrate
= iwl_rates
[i
].ieee
* 5;
3233 rates
[i
].hw_value
= i
; /* Rate scaling will work on indexes */
3234 rates
[i
].hw_value_short
= i
;
3236 if ((i
>= IWL_FIRST_CCK_RATE
) && (i
<= IWL_LAST_CCK_RATE
)) {
3238 * If CCK != 1M then set short preamble rate flag.
3241 (iwl_rates
[i
].plcp
== IWL_RATE_1M_PLCP
) ?
3242 0 : IEEE80211_RATE_SHORT_PREAMBLE
;
3247 static int iwl_init_drv(struct iwl_priv
*priv
)
3251 spin_lock_init(&priv
->sta_lock
);
3252 spin_lock_init(&priv
->hcmd_lock
);
3254 mutex_init(&priv
->mutex
);
3256 priv
->ieee_channels
= NULL
;
3257 priv
->ieee_rates
= NULL
;
3258 priv
->band
= IEEE80211_BAND_2GHZ
;
3260 priv
->iw_mode
= NL80211_IFTYPE_STATION
;
3261 priv
->current_ht_config
.smps
= IEEE80211_SMPS_STATIC
;
3262 priv
->missed_beacon_threshold
= IWL_MISSED_BEACON_THRESHOLD_DEF
;
3263 priv
->_agn
.agg_tids_count
= 0;
3265 /* initialize force reset */
3266 priv
->force_reset
[IWL_RF_RESET
].reset_duration
=
3267 IWL_DELAY_NEXT_FORCE_RF_RESET
;
3268 priv
->force_reset
[IWL_FW_RESET
].reset_duration
=
3269 IWL_DELAY_NEXT_FORCE_FW_RELOAD
;
3271 priv
->rx_statistics_jiffies
= jiffies
;
3273 /* Choose which receivers/antennas to use */
3274 if (priv
->cfg
->ops
->hcmd
->set_rxon_chain
)
3275 priv
->cfg
->ops
->hcmd
->set_rxon_chain(priv
,
3276 &priv
->contexts
[IWL_RXON_CTX_BSS
]);
3278 iwl_init_scan_params(priv
);
3281 if (priv
->cfg
->bt_params
&&
3282 priv
->cfg
->bt_params
->advanced_bt_coexist
) {
3283 priv
->kill_ack_mask
= IWLAGN_BT_KILL_ACK_MASK_DEFAULT
;
3284 priv
->kill_cts_mask
= IWLAGN_BT_KILL_CTS_MASK_DEFAULT
;
3285 priv
->bt_valid
= IWLAGN_BT_ALL_VALID_MSK
;
3286 priv
->bt_on_thresh
= BT_ON_THRESHOLD_DEF
;
3287 priv
->bt_duration
= BT_DURATION_LIMIT_DEF
;
3288 priv
->dynamic_frag_thresh
= BT_FRAG_THRESHOLD_DEF
;
3291 ret
= iwl_init_channel_map(priv
);
3293 IWL_ERR(priv
, "initializing regulatory failed: %d\n", ret
);
3297 ret
= iwlcore_init_geos(priv
);
3299 IWL_ERR(priv
, "initializing geos failed: %d\n", ret
);
3300 goto err_free_channel_map
;
3302 iwl_init_hw_rates(priv
, priv
->ieee_rates
);
3306 err_free_channel_map
:
3307 iwl_free_channel_map(priv
);
3312 static void iwl_uninit_drv(struct iwl_priv
*priv
)
3314 iwl_calib_free_results(priv
);
3315 iwlcore_free_geos(priv
);
3316 iwl_free_channel_map(priv
);
3317 kfree(priv
->scan_cmd
);
3318 kfree(priv
->beacon_cmd
);
3321 struct ieee80211_ops iwlagn_hw_ops
= {
3322 .tx
= iwlagn_mac_tx
,
3323 .start
= iwlagn_mac_start
,
3324 .stop
= iwlagn_mac_stop
,
3325 .add_interface
= iwl_mac_add_interface
,
3326 .remove_interface
= iwl_mac_remove_interface
,
3327 .change_interface
= iwl_mac_change_interface
,
3328 .config
= iwlagn_mac_config
,
3329 .configure_filter
= iwlagn_configure_filter
,
3330 .set_key
= iwlagn_mac_set_key
,
3331 .update_tkip_key
= iwlagn_mac_update_tkip_key
,
3332 .conf_tx
= iwl_mac_conf_tx
,
3333 .bss_info_changed
= iwlagn_bss_info_changed
,
3334 .ampdu_action
= iwlagn_mac_ampdu_action
,
3335 .hw_scan
= iwl_mac_hw_scan
,
3336 .sta_notify
= iwlagn_mac_sta_notify
,
3337 .sta_add
= iwlagn_mac_sta_add
,
3338 .sta_remove
= iwl_mac_sta_remove
,
3339 .channel_switch
= iwlagn_mac_channel_switch
,
3340 .flush
= iwlagn_mac_flush
,
3341 .tx_last_beacon
= iwl_mac_tx_last_beacon
,
3342 .remain_on_channel
= iwl_mac_remain_on_channel
,
3343 .cancel_remain_on_channel
= iwl_mac_cancel_remain_on_channel
,
3344 .offchannel_tx
= iwl_mac_offchannel_tx
,
3345 .offchannel_tx_cancel_wait
= iwl_mac_offchannel_tx_cancel_wait
,
3346 CFG80211_TESTMODE_CMD(iwl_testmode_cmd
)
3347 CFG80211_TESTMODE_DUMP(iwl_testmode_dump
)
3350 static u32
iwl_hw_detect(struct iwl_priv
*priv
)
3352 return iwl_read32(priv
, CSR_HW_REV
);
3355 static int iwl_set_hw_params(struct iwl_priv
*priv
)
3357 priv
->hw_params
.max_rxq_size
= RX_QUEUE_SIZE
;
3358 priv
->hw_params
.max_rxq_log
= RX_QUEUE_SIZE_LOG
;
3359 if (iwlagn_mod_params
.amsdu_size_8K
)
3360 priv
->hw_params
.rx_page_order
= get_order(IWL_RX_BUF_SIZE_8K
);
3362 priv
->hw_params
.rx_page_order
= get_order(IWL_RX_BUF_SIZE_4K
);
3364 priv
->hw_params
.max_beacon_itrvl
= IWL_MAX_UCODE_BEACON_INTERVAL
;
3366 if (iwlagn_mod_params
.disable_11n
)
3367 priv
->cfg
->sku
&= ~EEPROM_SKU_CAP_11N_ENABLE
;
3369 /* Device-specific setup */
3370 return priv
->cfg
->ops
->lib
->set_hw_params(priv
);
3373 static const u8 iwlagn_bss_ac_to_fifo
[] = {
3380 static const u8 iwlagn_bss_ac_to_queue
[] = {
3384 static const u8 iwlagn_pan_ac_to_fifo
[] = {
3385 IWL_TX_FIFO_VO_IPAN
,
3386 IWL_TX_FIFO_VI_IPAN
,
3387 IWL_TX_FIFO_BE_IPAN
,
3388 IWL_TX_FIFO_BK_IPAN
,
3391 static const u8 iwlagn_pan_ac_to_queue
[] = {
3395 /* This function both allocates and initializes hw and priv. */
3396 static struct ieee80211_hw
*iwl_alloc_all(struct iwl_cfg
*cfg
)
3398 struct iwl_priv
*priv
;
3399 /* mac80211 allocates memory for this device instance, including
3400 * space for this driver's private structure */
3401 struct ieee80211_hw
*hw
;
3403 hw
= ieee80211_alloc_hw(sizeof(struct iwl_priv
), &iwlagn_hw_ops
);
3405 pr_err("%s: Can not allocate network device\n",
3417 static void iwl_init_context(struct iwl_priv
*priv
)
3422 * The default context is always valid,
3423 * more may be discovered when firmware
3426 priv
->valid_contexts
= BIT(IWL_RXON_CTX_BSS
);
3428 for (i
= 0; i
< NUM_IWL_RXON_CTX
; i
++)
3429 priv
->contexts
[i
].ctxid
= i
;
3431 priv
->contexts
[IWL_RXON_CTX_BSS
].always_active
= true;
3432 priv
->contexts
[IWL_RXON_CTX_BSS
].is_active
= true;
3433 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_cmd
= REPLY_RXON
;
3434 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_timing_cmd
= REPLY_RXON_TIMING
;
3435 priv
->contexts
[IWL_RXON_CTX_BSS
].rxon_assoc_cmd
= REPLY_RXON_ASSOC
;
3436 priv
->contexts
[IWL_RXON_CTX_BSS
].qos_cmd
= REPLY_QOS_PARAM
;
3437 priv
->contexts
[IWL_RXON_CTX_BSS
].ap_sta_id
= IWL_AP_ID
;
3438 priv
->contexts
[IWL_RXON_CTX_BSS
].wep_key_cmd
= REPLY_WEPKEY
;
3439 priv
->contexts
[IWL_RXON_CTX_BSS
].ac_to_fifo
= iwlagn_bss_ac_to_fifo
;
3440 priv
->contexts
[IWL_RXON_CTX_BSS
].ac_to_queue
= iwlagn_bss_ac_to_queue
;
3441 priv
->contexts
[IWL_RXON_CTX_BSS
].exclusive_interface_modes
=
3442 BIT(NL80211_IFTYPE_ADHOC
);
3443 priv
->contexts
[IWL_RXON_CTX_BSS
].interface_modes
=
3444 BIT(NL80211_IFTYPE_STATION
);
3445 priv
->contexts
[IWL_RXON_CTX_BSS
].ap_devtype
= RXON_DEV_TYPE_AP
;
3446 priv
->contexts
[IWL_RXON_CTX_BSS
].ibss_devtype
= RXON_DEV_TYPE_IBSS
;
3447 priv
->contexts
[IWL_RXON_CTX_BSS
].station_devtype
= RXON_DEV_TYPE_ESS
;
3448 priv
->contexts
[IWL_RXON_CTX_BSS
].unused_devtype
= RXON_DEV_TYPE_ESS
;
3450 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_cmd
= REPLY_WIPAN_RXON
;
3451 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_timing_cmd
=
3452 REPLY_WIPAN_RXON_TIMING
;
3453 priv
->contexts
[IWL_RXON_CTX_PAN
].rxon_assoc_cmd
=
3454 REPLY_WIPAN_RXON_ASSOC
;
3455 priv
->contexts
[IWL_RXON_CTX_PAN
].qos_cmd
= REPLY_WIPAN_QOS_PARAM
;
3456 priv
->contexts
[IWL_RXON_CTX_PAN
].ap_sta_id
= IWL_AP_ID_PAN
;
3457 priv
->contexts
[IWL_RXON_CTX_PAN
].wep_key_cmd
= REPLY_WIPAN_WEPKEY
;
3458 priv
->contexts
[IWL_RXON_CTX_PAN
].bcast_sta_id
= IWLAGN_PAN_BCAST_ID
;
3459 priv
->contexts
[IWL_RXON_CTX_PAN
].station_flags
= STA_FLG_PAN_STATION
;
3460 priv
->contexts
[IWL_RXON_CTX_PAN
].ac_to_fifo
= iwlagn_pan_ac_to_fifo
;
3461 priv
->contexts
[IWL_RXON_CTX_PAN
].ac_to_queue
= iwlagn_pan_ac_to_queue
;
3462 priv
->contexts
[IWL_RXON_CTX_PAN
].mcast_queue
= IWL_IPAN_MCAST_QUEUE
;
3463 priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
=
3464 BIT(NL80211_IFTYPE_STATION
) | BIT(NL80211_IFTYPE_AP
);
3465 #ifdef CONFIG_IWL_P2P
3466 priv
->contexts
[IWL_RXON_CTX_PAN
].interface_modes
|=
3467 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
3469 priv
->contexts
[IWL_RXON_CTX_PAN
].ap_devtype
= RXON_DEV_TYPE_CP
;
3470 priv
->contexts
[IWL_RXON_CTX_PAN
].station_devtype
= RXON_DEV_TYPE_2STA
;
3471 priv
->contexts
[IWL_RXON_CTX_PAN
].unused_devtype
= RXON_DEV_TYPE_P2P
;
3473 BUILD_BUG_ON(NUM_IWL_RXON_CTX
!= 2);
3476 int iwl_probe(void *bus_specific
, struct iwl_bus_ops
*bus_ops
,
3477 struct iwl_cfg
*cfg
)
3480 struct iwl_priv
*priv
;
3481 struct ieee80211_hw
*hw
;
3485 /************************
3486 * 1. Allocating HW data
3487 ************************/
3488 hw
= iwl_alloc_all(cfg
);
3494 priv
->bus
.priv
= priv
;
3495 priv
->bus
.bus_specific
= bus_specific
;
3496 priv
->bus
.ops
= bus_ops
;
3497 priv
->bus
.ops
->set_drv_data(&priv
->bus
, priv
);
3498 priv
->bus
.dev
= priv
->bus
.ops
->get_dev(&priv
->bus
);
3500 /* At this point both hw and priv are allocated. */
3502 SET_IEEE80211_DEV(hw
, priv
->bus
.dev
);
3504 IWL_DEBUG_INFO(priv
, "*** LOAD DRIVER ***\n");
3506 priv
->inta_mask
= CSR_INI_SET_MASK
;
3508 /* is antenna coupling more than 35dB ? */
3509 priv
->bt_ant_couple_ok
=
3510 (iwlagn_ant_coupling
> IWL_BT_ANTENNA_COUPLING_THRESHOLD
) ?
3513 /* enable/disable bt channel inhibition */
3514 priv
->bt_ch_announce
= iwlagn_bt_ch_announce
;
3515 IWL_DEBUG_INFO(priv
, "BT channel inhibition is %s\n",
3516 (priv
->bt_ch_announce
) ? "On" : "Off");
3518 if (iwl_alloc_traffic_mem(priv
))
3519 IWL_ERR(priv
, "Not enough memory to generate traffic log\n");
3522 /* these spin locks will be used in apm_ops.init and EEPROM access
3523 * we should init now
3525 spin_lock_init(&priv
->reg_lock
);
3526 spin_lock_init(&priv
->lock
);
3529 * stop and reset the on-board processor just in case it is in a
3530 * strange state ... like being left stranded by a primary kernel
3531 * and this is now the kdump kernel trying to start up
3533 iwl_write32(priv
, CSR_RESET
, CSR_RESET_REG_FLAG_NEVO_RESET
);
3535 /***********************
3536 * 3. Read REV register
3537 ***********************/
3538 hw_rev
= iwl_hw_detect(priv
);
3539 IWL_INFO(priv
, "Detected %s, REV=0x%X\n",
3540 priv
->cfg
->name
, hw_rev
);
3542 if (iwl_prepare_card_hw(priv
)) {
3544 IWL_WARN(priv
, "Failed, HW not ready\n");
3545 goto out_free_traffic_mem
;
3551 /* Read the EEPROM */
3552 err
= iwl_eeprom_init(priv
, hw_rev
);
3554 IWL_ERR(priv
, "Unable to init EEPROM\n");
3555 goto out_free_traffic_mem
;
3557 err
= iwl_eeprom_check_version(priv
);
3559 goto out_free_eeprom
;
3561 err
= iwl_eeprom_check_sku(priv
);
3563 goto out_free_eeprom
;
3565 /* extract MAC Address */
3566 iwl_eeprom_get_mac(priv
, priv
->addresses
[0].addr
);
3567 IWL_DEBUG_INFO(priv
, "MAC address: %pM\n", priv
->addresses
[0].addr
);
3568 priv
->hw
->wiphy
->addresses
= priv
->addresses
;
3569 priv
->hw
->wiphy
->n_addresses
= 1;
3570 num_mac
= iwl_eeprom_query16(priv
, EEPROM_NUM_MAC_ADDRESS
);
3572 memcpy(priv
->addresses
[1].addr
, priv
->addresses
[0].addr
,
3574 priv
->addresses
[1].addr
[5]++;
3575 priv
->hw
->wiphy
->n_addresses
++;
3578 /* initialize all valid contexts */
3579 iwl_init_context(priv
);
3581 /************************
3582 * 5. Setup HW constants
3583 ************************/
3584 if (iwl_set_hw_params(priv
)) {
3586 IWL_ERR(priv
, "failed to set hw parameters\n");
3587 goto out_free_eeprom
;
3590 /*******************
3592 *******************/
3594 err
= iwl_init_drv(priv
);
3596 goto out_free_eeprom
;
3597 /* At this point both hw and priv are initialized. */
3599 /********************
3601 ********************/
3602 iwl_alloc_isr_ict(priv
);
3604 err
= request_irq(priv
->bus
.ops
->get_irq(&priv
->bus
), iwl_isr_ict
,
3605 IRQF_SHARED
, DRV_NAME
, priv
);
3607 IWL_ERR(priv
, "Error allocating IRQ %d\n",
3608 priv
->bus
.ops
->get_irq(&priv
->bus
));
3609 goto out_uninit_drv
;
3612 iwl_setup_deferred_work(priv
);
3613 iwl_setup_rx_handlers(priv
);
3614 iwl_testmode_init(priv
);
3616 /*********************************************
3617 * 8. Enable interrupts
3618 *********************************************/
3620 iwl_enable_rfkill_int(priv
);
3622 /* If platform's RF_KILL switch is NOT set to KILL */
3623 if (iwl_read32(priv
, CSR_GP_CNTRL
) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW
)
3624 clear_bit(STATUS_RF_KILL_HW
, &priv
->status
);
3626 set_bit(STATUS_RF_KILL_HW
, &priv
->status
);
3628 wiphy_rfkill_set_hw_state(priv
->hw
->wiphy
,
3629 test_bit(STATUS_RF_KILL_HW
, &priv
->status
));
3631 iwl_power_initialize(priv
);
3632 iwl_tt_initialize(priv
);
3634 init_completion(&priv
->_agn
.firmware_loading_complete
);
3636 err
= iwl_request_firmware(priv
, true);
3638 goto out_destroy_workqueue
;
3642 out_destroy_workqueue
:
3643 destroy_workqueue(priv
->workqueue
);
3644 priv
->workqueue
= NULL
;
3645 free_irq(priv
->bus
.ops
->get_irq(&priv
->bus
), priv
);
3646 iwl_free_isr_ict(priv
);
3648 iwl_uninit_drv(priv
);
3650 iwl_eeprom_free(priv
);
3651 out_free_traffic_mem
:
3652 iwl_free_traffic_mem(priv
);
3653 ieee80211_free_hw(priv
->hw
);
3658 void __devexit
iwl_remove(struct iwl_priv
* priv
)
3660 unsigned long flags
;
3662 wait_for_completion(&priv
->_agn
.firmware_loading_complete
);
3664 IWL_DEBUG_INFO(priv
, "*** UNLOAD DRIVER ***\n");
3666 iwl_dbgfs_unregister(priv
);
3667 sysfs_remove_group(&priv
->bus
.dev
->kobj
,
3668 &iwl_attribute_group
);
3670 /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
3671 * to be called and iwl_down since we are removing the device
3672 * we need to set STATUS_EXIT_PENDING bit.
3674 set_bit(STATUS_EXIT_PENDING
, &priv
->status
);
3676 iwl_testmode_cleanup(priv
);
3677 iwl_leds_exit(priv
);
3679 if (priv
->mac80211_registered
) {
3680 ieee80211_unregister_hw(priv
->hw
);
3681 priv
->mac80211_registered
= 0;
3684 /* Reset to low power before unloading driver. */
3689 /* make sure we flush any pending irq or
3690 * tasklet for the driver
3692 spin_lock_irqsave(&priv
->lock
, flags
);
3693 iwl_disable_interrupts(priv
);
3694 spin_unlock_irqrestore(&priv
->lock
, flags
);
3696 iwl_synchronize_irq(priv
);
3698 iwl_dealloc_ucode(priv
);
3701 iwlagn_rx_queue_free(priv
, &priv
->rxq
);
3702 iwlagn_hw_txq_ctx_free(priv
);
3704 iwl_eeprom_free(priv
);
3707 /*netif_stop_queue(dev); */
3708 flush_workqueue(priv
->workqueue
);
3710 /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
3711 * priv->workqueue... so we can't take down the workqueue
3713 destroy_workqueue(priv
->workqueue
);
3714 priv
->workqueue
= NULL
;
3715 iwl_free_traffic_mem(priv
);
3717 free_irq(priv
->bus
.ops
->get_irq(&priv
->bus
), priv
);
3718 priv
->bus
.ops
->set_drv_data(&priv
->bus
, NULL
);
3720 iwl_uninit_drv(priv
);
3722 iwl_free_isr_ict(priv
);
3724 dev_kfree_skb(priv
->beacon_skb
);
3726 ieee80211_free_hw(priv
->hw
);
3730 /*****************************************************************************
3732 * driver and module entry point
3734 *****************************************************************************/
3735 static int __init
iwl_init(void)
3739 pr_info(DRV_DESCRIPTION
", " DRV_VERSION
"\n");
3740 pr_info(DRV_COPYRIGHT
"\n");
3742 ret
= iwlagn_rate_control_register();
3744 pr_err("Unable to register rate control algorithm: %d\n", ret
);
3748 ret
= iwl_pci_register_driver();
3751 goto error_register
;
3755 iwlagn_rate_control_unregister();
3759 static void __exit
iwl_exit(void)
3761 iwl_pci_unregister_driver();
3762 iwlagn_rate_control_unregister();
3765 module_exit(iwl_exit
);
3766 module_init(iwl_init
);
3768 #ifdef CONFIG_IWLWIFI_DEBUG
3769 module_param_named(debug
, iwl_debug_level
, uint
, S_IRUGO
| S_IWUSR
);
3770 MODULE_PARM_DESC(debug
, "debug output mask");
3773 module_param_named(swcrypto
, iwlagn_mod_params
.sw_crypto
, int, S_IRUGO
);
3774 MODULE_PARM_DESC(swcrypto
, "using crypto in software (default 0 [hardware])");
3775 module_param_named(queues_num
, iwlagn_mod_params
.num_of_queues
, int, S_IRUGO
);
3776 MODULE_PARM_DESC(queues_num
, "number of hw queues.");
3777 module_param_named(11n_disable
, iwlagn_mod_params
.disable_11n
, int, S_IRUGO
);
3778 MODULE_PARM_DESC(11n_disable
, "disable 11n functionality");
3779 module_param_named(amsdu_size_8K
, iwlagn_mod_params
.amsdu_size_8K
,
3781 MODULE_PARM_DESC(amsdu_size_8K
, "enable 8K amsdu size");
3782 module_param_named(fw_restart
, iwlagn_mod_params
.restart_fw
, int, S_IRUGO
);
3783 MODULE_PARM_DESC(fw_restart
, "restart firmware in case of error");
3785 module_param_named(ucode_alternative
, iwlagn_wanted_ucode_alternative
, int,
3787 MODULE_PARM_DESC(ucode_alternative
,
3788 "specify ucode alternative to use from ucode file");
3790 module_param_named(antenna_coupling
, iwlagn_ant_coupling
, int, S_IRUGO
);
3791 MODULE_PARM_DESC(antenna_coupling
,
3792 "specify antenna coupling in dB (defualt: 0 dB)");
3794 module_param_named(bt_ch_inhibition
, iwlagn_bt_ch_announce
, bool, S_IRUGO
);
3795 MODULE_PARM_DESC(bt_ch_inhibition
,
3796 "Disable BT channel inhibition (default: enable)");
3798 module_param_named(plcp_check
, iwlagn_mod_params
.plcp_check
, bool, S_IRUGO
);
3799 MODULE_PARM_DESC(plcp_check
, "Check plcp health (default: 1 [enabled])");
3801 module_param_named(ack_check
, iwlagn_mod_params
.ack_check
, bool, S_IRUGO
);
3802 MODULE_PARM_DESC(ack_check
, "Check ack health (default: 0 [disabled])");
3805 * set bt_coex_active to true, uCode will do kill/defer
3806 * every time the priority line is asserted (BT is sending signals on the
3807 * priority line in the PCIx).
3808 * set bt_coex_active to false, uCode will ignore the BT activity and
3809 * perform the normal operation
3811 * User might experience transmit issue on some platform due to WiFi/BT
3812 * co-exist problem. The possible behaviors are:
3813 * Able to scan and finding all the available AP
3814 * Not able to associate with any AP
3815 * On those platforms, WiFi communication can be restored by set
3816 * "bt_coex_active" module parameter to "false"
3818 * default: bt_coex_active = true (BT_COEX_ENABLE)
3820 module_param_named(bt_coex_active
, iwlagn_mod_params
.bt_coex_active
,
3822 MODULE_PARM_DESC(bt_coex_active
, "enable wifi/bt co-exist (default: enable)");
3824 module_param_named(led_mode
, iwlagn_mod_params
.led_mode
, int, S_IRUGO
);
3825 MODULE_PARM_DESC(led_mode
, "0=system default, "
3826 "1=On(RF On)/Off(RF Off), 2=blinking (default: 0)");
3829 * For now, keep using power level 1 instead of automatically
3832 module_param_named(no_sleep_autoadjust
, iwlagn_mod_params
.no_sleep_autoadjust
,
3834 MODULE_PARM_DESC(no_sleep_autoadjust
,
3835 "don't automatically adjust sleep level "
3836 "according to maximum network latency (default: true)");