1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
4 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
7 * Intel Linux Wireless <linuxwifi@intel.com>
8 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10 *****************************************************************************/
11 #include <linux/etherdevice.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <net/mac80211.h>
18 #include "iwl-agn-hw.h"
19 #include "iwl-trans.h"
20 #include "iwl-modparams.h"
25 int iwlagn_hw_valid_rtc_data_addr(u32 addr
)
27 return (addr
>= IWLAGN_RTC_DATA_LOWER_BOUND
) &&
28 (addr
< IWLAGN_RTC_DATA_UPPER_BOUND
);
31 int iwlagn_send_tx_power(struct iwl_priv
*priv
)
33 struct iwlagn_tx_power_dbm_cmd tx_power_cmd
;
36 if (WARN_ONCE(test_bit(STATUS_SCAN_HW
, &priv
->status
),
37 "TX Power requested while scanning!\n"))
40 /* half dBm need to multiply */
41 tx_power_cmd
.global_lmt
= (s8
)(2 * priv
->tx_power_user_lmt
);
43 if (tx_power_cmd
.global_lmt
> priv
->nvm_data
->max_tx_pwr_half_dbm
) {
45 * For the newer devices which using enhanced/extend tx power
46 * table in EEPROM, the format is in half dBm. driver need to
47 * convert to dBm format before report to mac80211.
48 * By doing so, there is a possibility of 1/2 dBm resolution
49 * lost. driver will perform "round-up" operation before
50 * reporting, but it will cause 1/2 dBm tx power over the
51 * regulatory limit. Perform the checking here, if the
52 * "tx_power_user_lmt" is higher than EEPROM value (in
53 * half-dBm format), lower the tx power based on EEPROM
55 tx_power_cmd
.global_lmt
=
56 priv
->nvm_data
->max_tx_pwr_half_dbm
;
58 tx_power_cmd
.flags
= IWLAGN_TX_POWER_NO_CLOSED
;
59 tx_power_cmd
.srv_chan_lmt
= IWLAGN_TX_POWER_AUTO
;
61 if (IWL_UCODE_API(priv
->fw
->ucode_ver
) == 1)
62 tx_ant_cfg_cmd
= REPLY_TX_POWER_DBM_CMD_V1
;
64 tx_ant_cfg_cmd
= REPLY_TX_POWER_DBM_CMD
;
66 return iwl_dvm_send_cmd_pdu(priv
, tx_ant_cfg_cmd
, 0,
67 sizeof(tx_power_cmd
), &tx_power_cmd
);
70 void iwlagn_temperature(struct iwl_priv
*priv
)
72 lockdep_assert_held(&priv
->statistics
.lock
);
74 /* store temperature from correct statistics (in Celsius) */
75 priv
->temperature
= le32_to_cpu(priv
->statistics
.common
.temperature
);
79 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags
, enum nl80211_band band
)
84 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
85 if (rate_n_flags
& RATE_MCS_HT_MSK
) {
86 idx
= (rate_n_flags
& 0xff);
88 /* Legacy rate format, search for match in table */
90 if (band
== NL80211_BAND_5GHZ
)
91 band_offset
= IWL_FIRST_OFDM_RATE
;
92 for (idx
= band_offset
; idx
< IWL_RATE_COUNT_LEGACY
; idx
++)
93 if (iwl_rates
[idx
].plcp
== (rate_n_flags
& 0xFF))
94 return idx
- band_offset
;
100 int iwlagn_manage_ibss_station(struct iwl_priv
*priv
,
101 struct ieee80211_vif
*vif
, bool add
)
103 struct iwl_vif_priv
*vif_priv
= (void *)vif
->drv_priv
;
106 return iwlagn_add_bssid_station(priv
, vif_priv
->ctx
,
108 &vif_priv
->ibss_bssid_sta_id
);
109 return iwl_remove_station(priv
, vif_priv
->ibss_bssid_sta_id
,
110 vif
->bss_conf
.bssid
);
114 * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
117 * 1. acquire mutex before calling
118 * 2. make sure rf is on and not in exit state
120 int iwlagn_txfifo_flush(struct iwl_priv
*priv
, u32 scd_q_msk
)
122 struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3
= {
123 .flush_control
= cpu_to_le16(IWL_DROP_ALL
),
125 struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2
= {
126 .flush_control
= cpu_to_le16(IWL_DROP_ALL
),
129 u32 queue_control
= IWL_SCD_VO_MSK
| IWL_SCD_VI_MSK
|
130 IWL_SCD_BE_MSK
| IWL_SCD_BK_MSK
| IWL_SCD_MGMT_MSK
;
132 if ((priv
->valid_contexts
!= BIT(IWL_RXON_CTX_BSS
)))
133 queue_control
|= IWL_PAN_SCD_VO_MSK
| IWL_PAN_SCD_VI_MSK
|
134 IWL_PAN_SCD_BE_MSK
| IWL_PAN_SCD_BK_MSK
|
135 IWL_PAN_SCD_MGMT_MSK
|
136 IWL_PAN_SCD_MULTICAST_MSK
;
138 if (priv
->nvm_data
->sku_cap_11n_enable
)
139 queue_control
|= IWL_AGG_TX_QUEUE_MSK
;
142 queue_control
= scd_q_msk
;
144 IWL_DEBUG_INFO(priv
, "queue control: 0x%x\n", queue_control
);
145 flush_cmd_v3
.queue_control
= cpu_to_le32(queue_control
);
146 flush_cmd_v2
.queue_control
= cpu_to_le16((u16
)queue_control
);
148 if (IWL_UCODE_API(priv
->fw
->ucode_ver
) > 2)
149 return iwl_dvm_send_cmd_pdu(priv
, REPLY_TXFIFO_FLUSH
, 0,
150 sizeof(flush_cmd_v3
),
152 return iwl_dvm_send_cmd_pdu(priv
, REPLY_TXFIFO_FLUSH
, 0,
153 sizeof(flush_cmd_v2
), &flush_cmd_v2
);
156 void iwlagn_dev_txfifo_flush(struct iwl_priv
*priv
)
158 mutex_lock(&priv
->mutex
);
159 ieee80211_stop_queues(priv
->hw
);
160 if (iwlagn_txfifo_flush(priv
, 0)) {
161 IWL_ERR(priv
, "flush request fail\n");
164 IWL_DEBUG_INFO(priv
, "wait transmit/flush all frames\n");
165 iwl_trans_wait_tx_queues_empty(priv
->trans
, 0xffffffff);
167 ieee80211_wake_queues(priv
->hw
);
168 mutex_unlock(&priv
->mutex
);
175 static const __le32 iwlagn_def_3w_lookup
[IWLAGN_BT_DECISION_LUT_SIZE
] = {
176 cpu_to_le32(0xaaaaaaaa),
177 cpu_to_le32(0xaaaaaaaa),
178 cpu_to_le32(0xaeaaaaaa),
179 cpu_to_le32(0xaaaaaaaa),
180 cpu_to_le32(0xcc00ff28),
181 cpu_to_le32(0x0000aaaa),
182 cpu_to_le32(0xcc00aaaa),
183 cpu_to_le32(0x0000aaaa),
184 cpu_to_le32(0xc0004000),
185 cpu_to_le32(0x00004000),
186 cpu_to_le32(0xf0005000),
187 cpu_to_le32(0xf0005000),
190 /* Full concurrency */
191 static const __le32 iwlagn_concurrent_lookup
[IWLAGN_BT_DECISION_LUT_SIZE
] = {
192 cpu_to_le32(0xaaaaaaaa),
193 cpu_to_le32(0xaaaaaaaa),
194 cpu_to_le32(0xaaaaaaaa),
195 cpu_to_le32(0xaaaaaaaa),
196 cpu_to_le32(0xaaaaaaaa),
197 cpu_to_le32(0xaaaaaaaa),
198 cpu_to_le32(0xaaaaaaaa),
199 cpu_to_le32(0xaaaaaaaa),
200 cpu_to_le32(0x00000000),
201 cpu_to_le32(0x00000000),
202 cpu_to_le32(0x00000000),
203 cpu_to_le32(0x00000000),
206 void iwlagn_send_advance_bt_config(struct iwl_priv
*priv
)
208 struct iwl_basic_bt_cmd basic
= {
209 .max_kill
= IWLAGN_BT_MAX_KILL_DEFAULT
,
210 .bt3_timer_t7_value
= IWLAGN_BT3_T7_DEFAULT
,
211 .bt3_prio_sample_time
= IWLAGN_BT3_PRIO_SAMPLE_DEFAULT
,
212 .bt3_timer_t2_value
= IWLAGN_BT3_T2_DEFAULT
,
214 struct iwl_bt_cmd_v1 bt_cmd_v1
;
215 struct iwl_bt_cmd_v2 bt_cmd_v2
;
218 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup
) !=
219 sizeof(basic
.bt3_lookup_table
));
221 if (priv
->lib
->bt_params
) {
223 * newer generation of devices (2000 series and newer)
224 * use the version 2 of the bt command
225 * we need to make sure sending the host command
226 * with correct data structure to avoid uCode assert
228 if (priv
->lib
->bt_params
->bt_session_2
) {
229 bt_cmd_v2
.prio_boost
= cpu_to_le32(
230 priv
->lib
->bt_params
->bt_prio_boost
);
231 bt_cmd_v2
.tx_prio_boost
= 0;
232 bt_cmd_v2
.rx_prio_boost
= 0;
234 /* older version only has 8 bits */
235 WARN_ON(priv
->lib
->bt_params
->bt_prio_boost
& ~0xFF);
236 bt_cmd_v1
.prio_boost
=
237 priv
->lib
->bt_params
->bt_prio_boost
;
238 bt_cmd_v1
.tx_prio_boost
= 0;
239 bt_cmd_v1
.rx_prio_boost
= 0;
242 IWL_ERR(priv
, "failed to construct BT Coex Config\n");
247 * Possible situations when BT needs to take over for receive,
248 * at the same time where STA needs to response to AP's frame(s),
249 * reduce the tx power of the required response frames, by that,
250 * allow the concurrent BT receive & WiFi transmit
251 * (BT - ANT A, WiFi -ANT B), without interference to one another
253 * Reduced tx power apply to control frames only (ACK/Back/CTS)
254 * when indicated by the BT config command
256 basic
.kill_ack_mask
= priv
->kill_ack_mask
;
257 basic
.kill_cts_mask
= priv
->kill_cts_mask
;
258 if (priv
->reduced_txpower
)
259 basic
.reduce_txpower
= IWLAGN_BT_REDUCED_TX_PWR
;
260 basic
.valid
= priv
->bt_valid
;
263 * Configure BT coex mode to "no coexistence" when the
264 * user disabled BT coexistence, we have no interface
265 * (might be in monitor mode), or the interface is in
266 * IBSS mode (no proper uCode support for coex then).
268 if (!iwlwifi_mod_params
.bt_coex_active
||
269 priv
->iw_mode
== NL80211_IFTYPE_ADHOC
) {
270 basic
.flags
= IWLAGN_BT_FLAG_COEX_MODE_DISABLED
;
272 basic
.flags
= IWLAGN_BT_FLAG_COEX_MODE_3W
<<
273 IWLAGN_BT_FLAG_COEX_MODE_SHIFT
;
275 if (!priv
->bt_enable_pspoll
)
276 basic
.flags
|= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE
;
278 basic
.flags
&= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE
;
280 if (priv
->bt_ch_announce
)
281 basic
.flags
|= IWLAGN_BT_FLAG_CHANNEL_INHIBITION
;
282 IWL_DEBUG_COEX(priv
, "BT coex flag: 0X%x\n", basic
.flags
);
284 priv
->bt_enable_flag
= basic
.flags
;
285 if (priv
->bt_full_concurrent
)
286 memcpy(basic
.bt3_lookup_table
, iwlagn_concurrent_lookup
,
287 sizeof(iwlagn_concurrent_lookup
));
289 memcpy(basic
.bt3_lookup_table
, iwlagn_def_3w_lookup
,
290 sizeof(iwlagn_def_3w_lookup
));
292 IWL_DEBUG_COEX(priv
, "BT coex %s in %s mode\n",
293 basic
.flags
? "active" : "disabled",
294 priv
->bt_full_concurrent
?
295 "full concurrency" : "3-wire");
297 if (priv
->lib
->bt_params
->bt_session_2
) {
298 memcpy(&bt_cmd_v2
.basic
, &basic
,
300 ret
= iwl_dvm_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
301 0, sizeof(bt_cmd_v2
), &bt_cmd_v2
);
303 memcpy(&bt_cmd_v1
.basic
, &basic
,
305 ret
= iwl_dvm_send_cmd_pdu(priv
, REPLY_BT_CONFIG
,
306 0, sizeof(bt_cmd_v1
), &bt_cmd_v1
);
309 IWL_ERR(priv
, "failed to send BT Coex Config\n");
313 void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv
*priv
, bool rssi_ena
)
315 struct iwl_rxon_context
*ctx
, *found_ctx
= NULL
;
316 bool found_ap
= false;
318 lockdep_assert_held(&priv
->mutex
);
320 /* Check whether AP or GO mode is active. */
322 for_each_context(priv
, ctx
) {
323 if (ctx
->vif
&& ctx
->vif
->type
== NL80211_IFTYPE_AP
&&
324 iwl_is_associated_ctx(ctx
)) {
332 * If disable was received or If GO/AP mode, disable RSSI
335 if (!rssi_ena
|| found_ap
) {
336 if (priv
->cur_rssi_ctx
) {
337 ctx
= priv
->cur_rssi_ctx
;
338 ieee80211_disable_rssi_reports(ctx
->vif
);
339 priv
->cur_rssi_ctx
= NULL
;
345 * If rssi measurements need to be enabled, consider all cases now.
346 * Figure out how many contexts are active.
348 for_each_context(priv
, ctx
) {
349 if (ctx
->vif
&& ctx
->vif
->type
== NL80211_IFTYPE_STATION
&&
350 iwl_is_associated_ctx(ctx
)) {
357 * rssi monitor already enabled for the correct interface...nothing
360 if (found_ctx
== priv
->cur_rssi_ctx
)
364 * Figure out if rssi monitor is currently enabled, and needs
365 * to be changed. If rssi monitor is already enabled, disable
366 * it first else just enable rssi measurements on the
367 * interface found above.
369 if (priv
->cur_rssi_ctx
) {
370 ctx
= priv
->cur_rssi_ctx
;
372 ieee80211_disable_rssi_reports(ctx
->vif
);
375 priv
->cur_rssi_ctx
= found_ctx
;
380 ieee80211_enable_rssi_reports(found_ctx
->vif
,
381 IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD
,
382 IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD
);
385 static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg
*uart_msg
)
387 return (BT_UART_MSG_FRAME3SCOESCO_MSK
& uart_msg
->frame3
) >>
388 BT_UART_MSG_FRAME3SCOESCO_POS
;
391 static void iwlagn_bt_traffic_change_work(struct work_struct
*work
)
393 struct iwl_priv
*priv
=
394 container_of(work
, struct iwl_priv
, bt_traffic_change_work
);
395 struct iwl_rxon_context
*ctx
;
396 int smps_request
= -1;
398 if (priv
->bt_enable_flag
== IWLAGN_BT_FLAG_COEX_MODE_DISABLED
) {
399 /* bt coex disabled */
404 * Note: bt_traffic_load can be overridden by scan complete and
405 * coex profile notifications. Ignore that since only bad consequence
406 * can be not matching debug print with actual state.
408 IWL_DEBUG_COEX(priv
, "BT traffic load changes: %d\n",
409 priv
->bt_traffic_load
);
411 switch (priv
->bt_traffic_load
) {
412 case IWL_BT_COEX_TRAFFIC_LOAD_NONE
:
414 smps_request
= IEEE80211_SMPS_DYNAMIC
;
416 smps_request
= IEEE80211_SMPS_AUTOMATIC
;
418 case IWL_BT_COEX_TRAFFIC_LOAD_LOW
:
419 smps_request
= IEEE80211_SMPS_DYNAMIC
;
421 case IWL_BT_COEX_TRAFFIC_LOAD_HIGH
:
422 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS
:
423 smps_request
= IEEE80211_SMPS_STATIC
;
426 IWL_ERR(priv
, "Invalid BT traffic load: %d\n",
427 priv
->bt_traffic_load
);
431 mutex_lock(&priv
->mutex
);
434 * We can not send command to firmware while scanning. When the scan
435 * complete we will schedule this work again. We do check with mutex
436 * locked to prevent new scan request to arrive. We do not check
437 * STATUS_SCANNING to avoid race when queue_work two times from
438 * different notifications, but quit and not perform any work at all.
440 if (test_bit(STATUS_SCAN_HW
, &priv
->status
))
443 iwl_update_chain_flags(priv
);
445 if (smps_request
!= -1) {
446 priv
->current_ht_config
.smps
= smps_request
;
447 for_each_context(priv
, ctx
) {
448 if (ctx
->vif
&& ctx
->vif
->type
== NL80211_IFTYPE_STATION
)
449 ieee80211_request_smps(ctx
->vif
, smps_request
);
454 * Dynamic PS poll related functionality. Adjust RSSI measurements if
457 iwlagn_bt_coex_rssi_monitor(priv
);
459 mutex_unlock(&priv
->mutex
);
463 * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
464 * correct interface or disable it if this is the last interface to be
467 void iwlagn_bt_coex_rssi_monitor(struct iwl_priv
*priv
)
469 if (priv
->bt_is_sco
&&
470 priv
->bt_traffic_load
== IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS
)
471 iwlagn_bt_adjust_rssi_monitor(priv
, true);
473 iwlagn_bt_adjust_rssi_monitor(priv
, false);
476 static void iwlagn_print_uartmsg(struct iwl_priv
*priv
,
477 struct iwl_bt_uart_msg
*uart_msg
)
479 IWL_DEBUG_COEX(priv
, "Message Type = 0x%X, SSN = 0x%X, "
480 "Update Req = 0x%X\n",
481 (BT_UART_MSG_FRAME1MSGTYPE_MSK
& uart_msg
->frame1
) >>
482 BT_UART_MSG_FRAME1MSGTYPE_POS
,
483 (BT_UART_MSG_FRAME1SSN_MSK
& uart_msg
->frame1
) >>
484 BT_UART_MSG_FRAME1SSN_POS
,
485 (BT_UART_MSG_FRAME1UPDATEREQ_MSK
& uart_msg
->frame1
) >>
486 BT_UART_MSG_FRAME1UPDATEREQ_POS
);
488 IWL_DEBUG_COEX(priv
, "Open connections = 0x%X, Traffic load = 0x%X, "
489 "Chl_SeqN = 0x%X, In band = 0x%X\n",
490 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK
& uart_msg
->frame2
) >>
491 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS
,
492 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK
& uart_msg
->frame2
) >>
493 BT_UART_MSG_FRAME2TRAFFICLOAD_POS
,
494 (BT_UART_MSG_FRAME2CHLSEQN_MSK
& uart_msg
->frame2
) >>
495 BT_UART_MSG_FRAME2CHLSEQN_POS
,
496 (BT_UART_MSG_FRAME2INBAND_MSK
& uart_msg
->frame2
) >>
497 BT_UART_MSG_FRAME2INBAND_POS
);
499 IWL_DEBUG_COEX(priv
, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
500 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
501 (BT_UART_MSG_FRAME3SCOESCO_MSK
& uart_msg
->frame3
) >>
502 BT_UART_MSG_FRAME3SCOESCO_POS
,
503 (BT_UART_MSG_FRAME3SNIFF_MSK
& uart_msg
->frame3
) >>
504 BT_UART_MSG_FRAME3SNIFF_POS
,
505 (BT_UART_MSG_FRAME3A2DP_MSK
& uart_msg
->frame3
) >>
506 BT_UART_MSG_FRAME3A2DP_POS
,
507 (BT_UART_MSG_FRAME3ACL_MSK
& uart_msg
->frame3
) >>
508 BT_UART_MSG_FRAME3ACL_POS
,
509 (BT_UART_MSG_FRAME3MASTER_MSK
& uart_msg
->frame3
) >>
510 BT_UART_MSG_FRAME3MASTER_POS
,
511 (BT_UART_MSG_FRAME3OBEX_MSK
& uart_msg
->frame3
) >>
512 BT_UART_MSG_FRAME3OBEX_POS
);
514 IWL_DEBUG_COEX(priv
, "Idle duration = 0x%X\n",
515 (BT_UART_MSG_FRAME4IDLEDURATION_MSK
& uart_msg
->frame4
) >>
516 BT_UART_MSG_FRAME4IDLEDURATION_POS
);
518 IWL_DEBUG_COEX(priv
, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
519 "eSCO Retransmissions = 0x%X\n",
520 (BT_UART_MSG_FRAME5TXACTIVITY_MSK
& uart_msg
->frame5
) >>
521 BT_UART_MSG_FRAME5TXACTIVITY_POS
,
522 (BT_UART_MSG_FRAME5RXACTIVITY_MSK
& uart_msg
->frame5
) >>
523 BT_UART_MSG_FRAME5RXACTIVITY_POS
,
524 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK
& uart_msg
->frame5
) >>
525 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS
);
527 IWL_DEBUG_COEX(priv
, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
528 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK
& uart_msg
->frame6
) >>
529 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS
,
530 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK
& uart_msg
->frame6
) >>
531 BT_UART_MSG_FRAME6DISCOVERABLE_POS
);
533 IWL_DEBUG_COEX(priv
, "Sniff Activity = 0x%X, Page = "
534 "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
535 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK
& uart_msg
->frame7
) >>
536 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS
,
537 (BT_UART_MSG_FRAME7PAGE_MSK
& uart_msg
->frame7
) >>
538 BT_UART_MSG_FRAME7PAGE_POS
,
539 (BT_UART_MSG_FRAME7INQUIRY_MSK
& uart_msg
->frame7
) >>
540 BT_UART_MSG_FRAME7INQUIRY_POS
,
541 (BT_UART_MSG_FRAME7CONNECTABLE_MSK
& uart_msg
->frame7
) >>
542 BT_UART_MSG_FRAME7CONNECTABLE_POS
);
545 static bool iwlagn_set_kill_msk(struct iwl_priv
*priv
,
546 struct iwl_bt_uart_msg
*uart_msg
)
548 bool need_update
= false;
549 u8 kill_msk
= IWL_BT_KILL_REDUCE
;
550 static const __le32 bt_kill_ack_msg
[3] = {
551 IWLAGN_BT_KILL_ACK_MASK_DEFAULT
,
552 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO
,
553 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE
};
554 static const __le32 bt_kill_cts_msg
[3] = {
555 IWLAGN_BT_KILL_CTS_MASK_DEFAULT
,
556 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO
,
557 IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE
};
559 if (!priv
->reduced_txpower
)
560 kill_msk
= (BT_UART_MSG_FRAME3SCOESCO_MSK
& uart_msg
->frame3
)
561 ? IWL_BT_KILL_OVERRIDE
: IWL_BT_KILL_DEFAULT
;
562 if (priv
->kill_ack_mask
!= bt_kill_ack_msg
[kill_msk
] ||
563 priv
->kill_cts_mask
!= bt_kill_cts_msg
[kill_msk
]) {
564 priv
->bt_valid
|= IWLAGN_BT_VALID_KILL_ACK_MASK
;
565 priv
->kill_ack_mask
= bt_kill_ack_msg
[kill_msk
];
566 priv
->bt_valid
|= IWLAGN_BT_VALID_KILL_CTS_MASK
;
567 priv
->kill_cts_mask
= bt_kill_cts_msg
[kill_msk
];
574 * Upon RSSI changes, sends a bt config command with following changes
575 * 1. enable/disable "reduced control frames tx power
576 * 2. update the "kill)ack_mask" and "kill_cts_mask"
578 * If "reduced tx power" is enabled, uCode shall
579 * 1. ACK/Back/CTS rate shall reduced to 6Mbps
580 * 2. not use duplciate 20/40MHz mode
582 static bool iwlagn_fill_txpower_mode(struct iwl_priv
*priv
,
583 struct iwl_bt_uart_msg
*uart_msg
)
585 bool need_update
= false;
586 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
589 if (!ctx
->vif
|| (ctx
->vif
->type
!= NL80211_IFTYPE_STATION
)) {
590 IWL_DEBUG_INFO(priv
, "BSS ctx not active or not in sta mode\n");
594 ave_rssi
= ieee80211_ave_rssi(ctx
->vif
);
596 /* no rssi data, no changes to reduce tx power */
597 IWL_DEBUG_COEX(priv
, "no rssi data available\n");
600 if (!priv
->reduced_txpower
&&
601 !iwl_is_associated(priv
, IWL_RXON_CTX_PAN
) &&
602 (ave_rssi
> BT_ENABLE_REDUCED_TXPOWER_THRESHOLD
) &&
603 (uart_msg
->frame3
& (BT_UART_MSG_FRAME3ACL_MSK
|
604 BT_UART_MSG_FRAME3OBEX_MSK
)) &&
605 !(uart_msg
->frame3
& (BT_UART_MSG_FRAME3SCOESCO_MSK
|
606 BT_UART_MSG_FRAME3SNIFF_MSK
| BT_UART_MSG_FRAME3A2DP_MSK
))) {
607 /* enabling reduced tx power */
608 priv
->reduced_txpower
= true;
609 priv
->bt_valid
|= IWLAGN_BT_VALID_REDUCED_TX_PWR
;
611 } else if (priv
->reduced_txpower
&&
612 (iwl_is_associated(priv
, IWL_RXON_CTX_PAN
) ||
613 (ave_rssi
< BT_DISABLE_REDUCED_TXPOWER_THRESHOLD
) ||
614 (uart_msg
->frame3
& (BT_UART_MSG_FRAME3SCOESCO_MSK
|
615 BT_UART_MSG_FRAME3SNIFF_MSK
| BT_UART_MSG_FRAME3A2DP_MSK
)) ||
616 !(uart_msg
->frame3
& (BT_UART_MSG_FRAME3ACL_MSK
|
617 BT_UART_MSG_FRAME3OBEX_MSK
)))) {
618 /* disable reduced tx power */
619 priv
->reduced_txpower
= false;
620 priv
->bt_valid
|= IWLAGN_BT_VALID_REDUCED_TX_PWR
;
627 static void iwlagn_bt_coex_profile_notif(struct iwl_priv
*priv
,
628 struct iwl_rx_cmd_buffer
*rxb
)
630 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
631 struct iwl_bt_coex_profile_notif
*coex
= (void *)pkt
->data
;
632 struct iwl_bt_uart_msg
*uart_msg
= &coex
->last_bt_uart_msg
;
634 if (priv
->bt_enable_flag
== IWLAGN_BT_FLAG_COEX_MODE_DISABLED
) {
635 /* bt coex disabled */
639 IWL_DEBUG_COEX(priv
, "BT Coex notification:\n");
640 IWL_DEBUG_COEX(priv
, " status: %d\n", coex
->bt_status
);
641 IWL_DEBUG_COEX(priv
, " traffic load: %d\n", coex
->bt_traffic_load
);
642 IWL_DEBUG_COEX(priv
, " CI compliance: %d\n",
643 coex
->bt_ci_compliance
);
644 iwlagn_print_uartmsg(priv
, uart_msg
);
646 priv
->last_bt_traffic_load
= priv
->bt_traffic_load
;
647 priv
->bt_is_sco
= iwlagn_bt_traffic_is_sco(uart_msg
);
649 if (priv
->iw_mode
!= NL80211_IFTYPE_ADHOC
) {
650 if (priv
->bt_status
!= coex
->bt_status
||
651 priv
->last_bt_traffic_load
!= coex
->bt_traffic_load
) {
652 if (coex
->bt_status
) {
654 if (!priv
->bt_ch_announce
)
655 priv
->bt_traffic_load
=
656 IWL_BT_COEX_TRAFFIC_LOAD_HIGH
;
658 priv
->bt_traffic_load
=
659 coex
->bt_traffic_load
;
662 priv
->bt_traffic_load
=
663 IWL_BT_COEX_TRAFFIC_LOAD_NONE
;
665 priv
->bt_status
= coex
->bt_status
;
666 queue_work(priv
->workqueue
,
667 &priv
->bt_traffic_change_work
);
671 /* schedule to send runtime bt_config */
672 /* check reduce power before change ack/cts kill mask */
673 if (iwlagn_fill_txpower_mode(priv
, uart_msg
) ||
674 iwlagn_set_kill_msk(priv
, uart_msg
))
675 queue_work(priv
->workqueue
, &priv
->bt_runtime_config
);
678 /* FIXME: based on notification, adjust the prio_boost */
680 priv
->bt_ci_compliance
= coex
->bt_ci_compliance
;
683 void iwlagn_bt_rx_handler_setup(struct iwl_priv
*priv
)
685 priv
->rx_handlers
[REPLY_BT_COEX_PROFILE_NOTIF
] =
686 iwlagn_bt_coex_profile_notif
;
689 void iwlagn_bt_setup_deferred_work(struct iwl_priv
*priv
)
691 INIT_WORK(&priv
->bt_traffic_change_work
,
692 iwlagn_bt_traffic_change_work
);
695 void iwlagn_bt_cancel_deferred_work(struct iwl_priv
*priv
)
697 cancel_work_sync(&priv
->bt_traffic_change_work
);
700 static bool is_single_rx_stream(struct iwl_priv
*priv
)
702 return priv
->current_ht_config
.smps
== IEEE80211_SMPS_STATIC
||
703 priv
->current_ht_config
.single_chain_sufficient
;
706 #define IWL_NUM_RX_CHAINS_MULTIPLE 3
707 #define IWL_NUM_RX_CHAINS_SINGLE 2
708 #define IWL_NUM_IDLE_CHAINS_DUAL 2
709 #define IWL_NUM_IDLE_CHAINS_SINGLE 1
712 * Determine how many receiver/antenna chains to use.
714 * More provides better reception via diversity. Fewer saves power
715 * at the expense of throughput, but only when not in powersave to
718 * MIMO (dual stream) requires at least 2, but works better with 3.
719 * This does not determine *which* chains to use, just how many.
721 static int iwl_get_active_rx_chain_count(struct iwl_priv
*priv
)
723 if (priv
->lib
->bt_params
&&
724 priv
->lib
->bt_params
->advanced_bt_coexist
&&
725 (priv
->bt_full_concurrent
||
726 priv
->bt_traffic_load
>= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
)) {
728 * only use chain 'A' in bt high traffic load or
729 * full concurrency mode
731 return IWL_NUM_RX_CHAINS_SINGLE
;
733 /* # of Rx chains to use when expecting MIMO. */
734 if (is_single_rx_stream(priv
))
735 return IWL_NUM_RX_CHAINS_SINGLE
;
737 return IWL_NUM_RX_CHAINS_MULTIPLE
;
741 * When we are in power saving mode, unless device support spatial
742 * multiplexing power save, use the active count for rx chain count.
744 static int iwl_get_idle_rx_chain_count(struct iwl_priv
*priv
, int active_cnt
)
746 /* # Rx chains when idling, depending on SMPS mode */
747 switch (priv
->current_ht_config
.smps
) {
748 case IEEE80211_SMPS_STATIC
:
749 case IEEE80211_SMPS_DYNAMIC
:
750 return IWL_NUM_IDLE_CHAINS_SINGLE
;
751 case IEEE80211_SMPS_AUTOMATIC
:
752 case IEEE80211_SMPS_OFF
:
755 WARN(1, "invalid SMPS mode %d",
756 priv
->current_ht_config
.smps
);
762 static u8
iwl_count_chain_bitmap(u32 chain_bitmap
)
765 res
= (chain_bitmap
& BIT(0)) >> 0;
766 res
+= (chain_bitmap
& BIT(1)) >> 1;
767 res
+= (chain_bitmap
& BIT(2)) >> 2;
768 res
+= (chain_bitmap
& BIT(3)) >> 3;
773 * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
775 * Selects how many and which Rx receivers/antennas/chains to use.
776 * This should not be used for scan command ... it puts data in wrong place.
778 void iwlagn_set_rxon_chain(struct iwl_priv
*priv
, struct iwl_rxon_context
*ctx
)
780 bool is_single
= is_single_rx_stream(priv
);
781 bool is_cam
= !test_bit(STATUS_POWER_PMI
, &priv
->status
);
782 u8 idle_rx_cnt
, active_rx_cnt
, valid_rx_cnt
;
786 /* Tell uCode which antennas are actually connected.
787 * Before first association, we assume all antennas are connected.
788 * Just after first association, iwl_chain_noise_calibration()
789 * checks which antennas actually *are* connected. */
790 if (priv
->chain_noise_data
.active_chains
)
791 active_chains
= priv
->chain_noise_data
.active_chains
;
793 active_chains
= priv
->nvm_data
->valid_rx_ant
;
795 if (priv
->lib
->bt_params
&&
796 priv
->lib
->bt_params
->advanced_bt_coexist
&&
797 (priv
->bt_full_concurrent
||
798 priv
->bt_traffic_load
>= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
)) {
800 * only use chain 'A' in bt high traffic load or
801 * full concurrency mode
803 active_chains
= first_antenna(active_chains
);
806 rx_chain
= active_chains
<< RXON_RX_CHAIN_VALID_POS
;
808 /* How many receivers should we use? */
809 active_rx_cnt
= iwl_get_active_rx_chain_count(priv
);
810 idle_rx_cnt
= iwl_get_idle_rx_chain_count(priv
, active_rx_cnt
);
813 /* correct rx chain count according hw settings
814 * and chain noise calibration
816 valid_rx_cnt
= iwl_count_chain_bitmap(active_chains
);
817 if (valid_rx_cnt
< active_rx_cnt
)
818 active_rx_cnt
= valid_rx_cnt
;
820 if (valid_rx_cnt
< idle_rx_cnt
)
821 idle_rx_cnt
= valid_rx_cnt
;
823 rx_chain
|= active_rx_cnt
<< RXON_RX_CHAIN_MIMO_CNT_POS
;
824 rx_chain
|= idle_rx_cnt
<< RXON_RX_CHAIN_CNT_POS
;
826 ctx
->staging
.rx_chain
= cpu_to_le16(rx_chain
);
828 if (!is_single
&& (active_rx_cnt
>= IWL_NUM_RX_CHAINS_SINGLE
) && is_cam
)
829 ctx
->staging
.rx_chain
|= RXON_RX_CHAIN_MIMO_FORCE_MSK
;
831 ctx
->staging
.rx_chain
&= ~RXON_RX_CHAIN_MIMO_FORCE_MSK
;
833 IWL_DEBUG_ASSOC(priv
, "rx_chain=0x%X active=%d idle=%d\n",
834 ctx
->staging
.rx_chain
,
835 active_rx_cnt
, idle_rx_cnt
);
837 WARN_ON(active_rx_cnt
== 0 || idle_rx_cnt
== 0 ||
838 active_rx_cnt
< idle_rx_cnt
);
841 u8
iwl_toggle_tx_ant(struct iwl_priv
*priv
, u8 ant
, u8 valid
)
846 if (priv
->band
== NL80211_BAND_2GHZ
&&
847 priv
->bt_traffic_load
>= IWL_BT_COEX_TRAFFIC_LOAD_HIGH
)
850 for (i
= 0; i
< RATE_ANT_NUM
- 1; i
++) {
851 ind
= (ind
+ 1) < RATE_ANT_NUM
? ind
+ 1 : 0;
852 if (valid
& BIT(ind
))
858 #ifdef CONFIG_PM_SLEEP
859 static void iwlagn_convert_p1k(u16
*p1k
, __le16
*out
)
863 for (i
= 0; i
< IWLAGN_P1K_SIZE
; i
++)
864 out
[i
] = cpu_to_le16(p1k
[i
]);
867 struct wowlan_key_data
{
868 struct iwl_rxon_context
*ctx
;
869 struct iwlagn_wowlan_rsc_tsc_params_cmd
*rsc_tsc
;
870 struct iwlagn_wowlan_tkip_params_cmd
*tkip
;
872 bool error
, use_rsc_tsc
, use_tkip
;
876 static void iwlagn_wowlan_program_keys(struct ieee80211_hw
*hw
,
877 struct ieee80211_vif
*vif
,
878 struct ieee80211_sta
*sta
,
879 struct ieee80211_key_conf
*key
,
882 struct iwl_priv
*priv
= IWL_MAC80211_GET_DVM(hw
);
883 struct wowlan_key_data
*data
= _data
;
884 struct iwl_rxon_context
*ctx
= data
->ctx
;
885 struct aes_sc
*aes_sc
, *aes_tx_sc
= NULL
;
886 struct tkip_sc
*tkip_sc
, *tkip_tx_sc
= NULL
;
887 struct iwlagn_p1k_cache
*rx_p1ks
;
889 struct ieee80211_key_seq seq
;
891 u16 p1k
[IWLAGN_P1K_SIZE
];
894 mutex_lock(&priv
->mutex
);
896 if ((key
->cipher
== WLAN_CIPHER_SUITE_WEP40
||
897 key
->cipher
== WLAN_CIPHER_SUITE_WEP104
) &&
898 !sta
&& !ctx
->key_mapping_keys
)
899 ret
= iwl_set_default_wep_key(priv
, ctx
, key
);
901 ret
= iwl_set_dynamic_key(priv
, ctx
, key
, sta
);
904 IWL_ERR(priv
, "Error setting key during suspend!\n");
908 switch (key
->cipher
) {
909 case WLAN_CIPHER_SUITE_TKIP
:
913 tkip_sc
= data
->rsc_tsc
->all_tsc_rsc
.tkip
.unicast_rsc
;
914 tkip_tx_sc
= &data
->rsc_tsc
->all_tsc_rsc
.tkip
.tsc
;
916 rx_p1ks
= data
->tkip
->rx_uni
;
918 pn64
= atomic64_read(&key
->tx_pn
);
919 tkip_tx_sc
->iv16
= cpu_to_le16(TKIP_PN_TO_IV16(pn64
));
920 tkip_tx_sc
->iv32
= cpu_to_le32(TKIP_PN_TO_IV32(pn64
));
922 ieee80211_get_tkip_p1k_iv(key
, seq
.tkip
.iv32
, p1k
);
923 iwlagn_convert_p1k(p1k
, data
->tkip
->tx
.p1k
);
925 memcpy(data
->tkip
->mic_keys
.tx
,
926 &key
->key
[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY
],
927 IWLAGN_MIC_KEY_SIZE
);
929 rx_mic_key
= data
->tkip
->mic_keys
.rx_unicast
;
932 data
->rsc_tsc
->all_tsc_rsc
.tkip
.multicast_rsc
;
933 rx_p1ks
= data
->tkip
->rx_multi
;
934 rx_mic_key
= data
->tkip
->mic_keys
.rx_mcast
;
938 * For non-QoS this relies on the fact that both the uCode and
939 * mac80211 use TID 0 (as they need to to avoid replay attacks)
940 * for checking the IV in the frames.
942 for (i
= 0; i
< IWLAGN_NUM_RSC
; i
++) {
943 ieee80211_get_key_rx_seq(key
, i
, &seq
);
944 tkip_sc
[i
].iv16
= cpu_to_le16(seq
.tkip
.iv16
);
945 tkip_sc
[i
].iv32
= cpu_to_le32(seq
.tkip
.iv32
);
946 /* wrapping isn't allowed, AP must rekey */
947 if (seq
.tkip
.iv32
> cur_rx_iv32
)
948 cur_rx_iv32
= seq
.tkip
.iv32
;
951 ieee80211_get_tkip_rx_p1k(key
, data
->bssid
, cur_rx_iv32
, p1k
);
952 iwlagn_convert_p1k(p1k
, rx_p1ks
[0].p1k
);
953 ieee80211_get_tkip_rx_p1k(key
, data
->bssid
,
954 cur_rx_iv32
+ 1, p1k
);
955 iwlagn_convert_p1k(p1k
, rx_p1ks
[1].p1k
);
958 &key
->key
[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY
],
959 IWLAGN_MIC_KEY_SIZE
);
961 data
->use_tkip
= true;
962 data
->use_rsc_tsc
= true;
964 case WLAN_CIPHER_SUITE_CCMP
:
968 aes_sc
= data
->rsc_tsc
->all_tsc_rsc
.aes
.unicast_rsc
;
969 aes_tx_sc
= &data
->rsc_tsc
->all_tsc_rsc
.aes
.tsc
;
971 pn64
= atomic64_read(&key
->tx_pn
);
972 aes_tx_sc
->pn
= cpu_to_le64(pn64
);
974 aes_sc
= data
->rsc_tsc
->all_tsc_rsc
.aes
.multicast_rsc
;
977 * For non-QoS this relies on the fact that both the uCode and
978 * mac80211 use TID 0 for checking the IV in the frames.
980 for (i
= 0; i
< IWLAGN_NUM_RSC
; i
++) {
981 u8
*pn
= seq
.ccmp
.pn
;
983 ieee80211_get_key_rx_seq(key
, i
, &seq
);
984 aes_sc
[i
].pn
= cpu_to_le64(
992 data
->use_rsc_tsc
= true;
996 mutex_unlock(&priv
->mutex
);
999 int iwlagn_send_patterns(struct iwl_priv
*priv
,
1000 struct cfg80211_wowlan
*wowlan
)
1002 struct iwlagn_wowlan_patterns_cmd
*pattern_cmd
;
1003 struct iwl_host_cmd cmd
= {
1004 .id
= REPLY_WOWLAN_PATTERNS
,
1005 .dataflags
[0] = IWL_HCMD_DFL_NOCOPY
,
1009 if (!wowlan
->n_patterns
)
1012 cmd
.len
[0] = struct_size(pattern_cmd
, patterns
, wowlan
->n_patterns
);
1014 pattern_cmd
= kmalloc(cmd
.len
[0], GFP_KERNEL
);
1018 pattern_cmd
->n_patterns
= cpu_to_le32(wowlan
->n_patterns
);
1020 for (i
= 0; i
< wowlan
->n_patterns
; i
++) {
1021 int mask_len
= DIV_ROUND_UP(wowlan
->patterns
[i
].pattern_len
, 8);
1023 memcpy(&pattern_cmd
->patterns
[i
].mask
,
1024 wowlan
->patterns
[i
].mask
, mask_len
);
1025 memcpy(&pattern_cmd
->patterns
[i
].pattern
,
1026 wowlan
->patterns
[i
].pattern
,
1027 wowlan
->patterns
[i
].pattern_len
);
1028 pattern_cmd
->patterns
[i
].mask_size
= mask_len
;
1029 pattern_cmd
->patterns
[i
].pattern_size
=
1030 wowlan
->patterns
[i
].pattern_len
;
1033 cmd
.data
[0] = pattern_cmd
;
1034 err
= iwl_dvm_send_cmd(priv
, &cmd
);
1039 int iwlagn_suspend(struct iwl_priv
*priv
, struct cfg80211_wowlan
*wowlan
)
1041 struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd
;
1042 struct iwl_rxon_cmd rxon
;
1043 struct iwl_rxon_context
*ctx
= &priv
->contexts
[IWL_RXON_CTX_BSS
];
1044 struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd
;
1045 struct iwlagn_wowlan_tkip_params_cmd tkip_cmd
= {};
1046 struct iwlagn_d3_config_cmd d3_cfg_cmd
= {
1048 * Program the minimum sleep time to 10 seconds, as many
1049 * platforms have issues processing a wakeup signal while
1050 * still being in the process of suspending.
1052 .min_sleep_time
= cpu_to_le32(10 * 1000 * 1000),
1054 struct wowlan_key_data key_data
= {
1056 .bssid
= ctx
->active
.bssid_addr
,
1057 .use_rsc_tsc
= false,
1064 key_data
.rsc_tsc
= kzalloc(sizeof(*key_data
.rsc_tsc
), GFP_KERNEL
);
1065 if (!key_data
.rsc_tsc
)
1068 memset(&wakeup_filter_cmd
, 0, sizeof(wakeup_filter_cmd
));
1071 * We know the last used seqno, and the uCode expects to know that
1072 * one, it will increment before TX.
1074 seq
= le16_to_cpu(priv
->last_seq_ctl
) & IEEE80211_SCTL_SEQ
;
1075 wakeup_filter_cmd
.non_qos_seq
= cpu_to_le16(seq
);
1078 * For QoS counters, we store the one to use next, so subtract 0x10
1079 * since the uCode will add 0x10 before using the value.
1081 for (i
= 0; i
< IWL_MAX_TID_COUNT
; i
++) {
1082 seq
= priv
->tid_data
[IWL_AP_ID
][i
].seq_number
;
1084 wakeup_filter_cmd
.qos_seq
[i
] = cpu_to_le16(seq
);
1087 if (wowlan
->disconnect
)
1088 wakeup_filter_cmd
.enabled
|=
1089 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS
|
1090 IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE
);
1091 if (wowlan
->magic_pkt
)
1092 wakeup_filter_cmd
.enabled
|=
1093 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET
);
1094 if (wowlan
->gtk_rekey_failure
)
1095 wakeup_filter_cmd
.enabled
|=
1096 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL
);
1097 if (wowlan
->eap_identity_req
)
1098 wakeup_filter_cmd
.enabled
|=
1099 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ
);
1100 if (wowlan
->four_way_handshake
)
1101 wakeup_filter_cmd
.enabled
|=
1102 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE
);
1103 if (wowlan
->n_patterns
)
1104 wakeup_filter_cmd
.enabled
|=
1105 cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH
);
1107 if (wowlan
->rfkill_release
)
1108 d3_cfg_cmd
.wakeup_flags
|=
1109 cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL
);
1111 iwl_scan_cancel_timeout(priv
, 200);
1113 memcpy(&rxon
, &ctx
->active
, sizeof(rxon
));
1115 priv
->ucode_loaded
= false;
1116 iwl_trans_stop_device(priv
->trans
);
1117 ret
= iwl_trans_start_hw(priv
->trans
);
1121 priv
->wowlan
= true;
1123 ret
= iwl_load_ucode_wait_alive(priv
, IWL_UCODE_WOWLAN
);
1127 /* now configure WoWLAN ucode */
1128 ret
= iwl_alive_start(priv
);
1132 memcpy(&ctx
->staging
, &rxon
, sizeof(rxon
));
1133 ret
= iwlagn_commit_rxon(priv
, ctx
);
1137 ret
= iwl_power_update_mode(priv
, true);
1141 if (!iwlwifi_mod_params
.swcrypto
) {
1142 /* mark all keys clear */
1143 priv
->ucode_key_table
= 0;
1144 ctx
->key_mapping_keys
= 0;
1147 * This needs to be unlocked due to lock ordering
1148 * constraints. Since we're in the suspend path
1149 * that isn't really a problem though.
1151 mutex_unlock(&priv
->mutex
);
1152 ieee80211_iter_keys(priv
->hw
, ctx
->vif
,
1153 iwlagn_wowlan_program_keys
,
1155 mutex_lock(&priv
->mutex
);
1156 if (key_data
.error
) {
1161 if (key_data
.use_rsc_tsc
) {
1162 struct iwl_host_cmd rsc_tsc_cmd
= {
1163 .id
= REPLY_WOWLAN_TSC_RSC_PARAMS
,
1164 .data
[0] = key_data
.rsc_tsc
,
1165 .dataflags
[0] = IWL_HCMD_DFL_NOCOPY
,
1166 .len
[0] = sizeof(*key_data
.rsc_tsc
),
1169 ret
= iwl_dvm_send_cmd(priv
, &rsc_tsc_cmd
);
1174 if (key_data
.use_tkip
) {
1175 ret
= iwl_dvm_send_cmd_pdu(priv
,
1176 REPLY_WOWLAN_TKIP_PARAMS
,
1177 0, sizeof(tkip_cmd
),
1183 if (priv
->have_rekey_data
) {
1184 memset(&kek_kck_cmd
, 0, sizeof(kek_kck_cmd
));
1185 memcpy(kek_kck_cmd
.kck
, priv
->kck
, NL80211_KCK_LEN
);
1186 kek_kck_cmd
.kck_len
= cpu_to_le16(NL80211_KCK_LEN
);
1187 memcpy(kek_kck_cmd
.kek
, priv
->kek
, NL80211_KEK_LEN
);
1188 kek_kck_cmd
.kek_len
= cpu_to_le16(NL80211_KEK_LEN
);
1189 kek_kck_cmd
.replay_ctr
= priv
->replay_ctr
;
1191 ret
= iwl_dvm_send_cmd_pdu(priv
,
1192 REPLY_WOWLAN_KEK_KCK_MATERIAL
,
1193 0, sizeof(kek_kck_cmd
),
1200 ret
= iwl_dvm_send_cmd_pdu(priv
, REPLY_D3_CONFIG
, 0,
1201 sizeof(d3_cfg_cmd
), &d3_cfg_cmd
);
1205 ret
= iwl_dvm_send_cmd_pdu(priv
, REPLY_WOWLAN_WAKEUP_FILTER
,
1206 0, sizeof(wakeup_filter_cmd
),
1207 &wakeup_filter_cmd
);
1211 ret
= iwlagn_send_patterns(priv
, wowlan
);
1213 kfree(key_data
.rsc_tsc
);
1218 int iwl_dvm_send_cmd(struct iwl_priv
*priv
, struct iwl_host_cmd
*cmd
)
1220 if (iwl_is_rfkill(priv
) || iwl_is_ctkill(priv
)) {
1221 IWL_WARN(priv
, "Not sending command - %s KILL\n",
1222 iwl_is_rfkill(priv
) ? "RF" : "CT");
1226 if (test_bit(STATUS_FW_ERROR
, &priv
->status
)) {
1227 IWL_ERR(priv
, "Command %s failed: FW Error\n",
1228 iwl_get_cmd_string(priv
->trans
, cmd
->id
));
1233 * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
1234 * in iwl_down but cancel the workers only later.
1236 if (!priv
->ucode_loaded
) {
1237 IWL_ERR(priv
, "Fw not loaded - dropping CMD: %x\n", cmd
->id
);
1242 * Synchronous commands from this op-mode must hold
1243 * the mutex, this ensures we don't try to send two
1244 * (or more) synchronous commands at a time.
1246 if (!(cmd
->flags
& CMD_ASYNC
))
1247 lockdep_assert_held(&priv
->mutex
);
1249 return iwl_trans_send_cmd(priv
->trans
, cmd
);
1252 int iwl_dvm_send_cmd_pdu(struct iwl_priv
*priv
, u8 id
,
1253 u32 flags
, u16 len
, const void *data
)
1255 struct iwl_host_cmd cmd
= {
1262 return iwl_dvm_send_cmd(priv
, &cmd
);