gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / net / wireless / intel / iwlwifi / mvm / d3.c
blob122ca7624073debf05e6bf8bd08becfa3a7091b7
1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
6 * GPL LICENSE SUMMARY
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
29 * BSD LICENSE
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
46 * distribution.
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
65 #include <linux/etherdevice.h>
66 #include <linux/ip.h>
67 #include <linux/fs.h>
68 #include <net/cfg80211.h>
69 #include <net/ipv6.h>
70 #include <net/tcp.h>
71 #include <net/addrconf.h>
72 #include "iwl-modparams.h"
73 #include "fw-api.h"
74 #include "mvm.h"
76 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
77 struct ieee80211_vif *vif,
78 struct cfg80211_gtk_rekey_data *data)
80 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
81 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
83 if (iwlwifi_mod_params.swcrypto)
84 return;
86 mutex_lock(&mvm->mutex);
88 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
89 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
90 mvmvif->rekey_data.replay_ctr =
91 cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
92 mvmvif->rekey_data.valid = true;
94 mutex_unlock(&mvm->mutex);
97 #if IS_ENABLED(CONFIG_IPV6)
98 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
99 struct ieee80211_vif *vif,
100 struct inet6_dev *idev)
102 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
103 struct inet6_ifaddr *ifa;
104 int idx = 0;
106 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
108 read_lock_bh(&idev->lock);
109 list_for_each_entry(ifa, &idev->addr_list, if_list) {
110 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
111 if (ifa->flags & IFA_F_TENTATIVE)
112 __set_bit(idx, mvmvif->tentative_addrs);
113 idx++;
114 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
115 break;
117 read_unlock_bh(&idev->lock);
119 mvmvif->num_target_ipv6_addrs = idx;
121 #endif
123 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
124 struct ieee80211_vif *vif, int idx)
126 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
128 mvmvif->tx_key_idx = idx;
131 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
133 int i;
135 for (i = 0; i < IWL_P1K_SIZE; i++)
136 out[i] = cpu_to_le16(p1k[i]);
139 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
140 struct iwl_mvm_key_pn *ptk_pn,
141 struct ieee80211_key_seq *seq,
142 int tid, int queues)
144 const u8 *ret = seq->ccmp.pn;
145 int i;
147 /* get the PN from mac80211, used on the default queue */
148 ieee80211_get_key_rx_seq(key, tid, seq);
150 /* and use the internal data for the other queues */
151 for (i = 1; i < queues; i++) {
152 const u8 *tmp = ptk_pn->q[i].pn[tid];
154 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
155 ret = tmp;
158 return ret;
161 struct wowlan_key_data {
162 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
163 struct iwl_wowlan_tkip_params_cmd *tkip;
164 bool error, use_rsc_tsc, use_tkip, configure_keys;
165 int wep_key_idx;
168 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
169 struct ieee80211_vif *vif,
170 struct ieee80211_sta *sta,
171 struct ieee80211_key_conf *key,
172 void *_data)
174 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
175 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
176 struct wowlan_key_data *data = _data;
177 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
178 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
179 struct iwl_p1k_cache *rx_p1ks;
180 u8 *rx_mic_key;
181 struct ieee80211_key_seq seq;
182 u32 cur_rx_iv32 = 0;
183 u16 p1k[IWL_P1K_SIZE];
184 int ret, i;
186 switch (key->cipher) {
187 case WLAN_CIPHER_SUITE_WEP40:
188 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
189 struct {
190 struct iwl_mvm_wep_key_cmd wep_key_cmd;
191 struct iwl_mvm_wep_key wep_key;
192 } __packed wkc = {
193 .wep_key_cmd.mac_id_n_color =
194 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
195 mvmvif->color)),
196 .wep_key_cmd.num_keys = 1,
197 /* firmware sets STA_KEY_FLG_WEP_13BYTES */
198 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
199 .wep_key.key_index = key->keyidx,
200 .wep_key.key_size = key->keylen,
204 * This will fail -- the key functions don't set support
205 * pairwise WEP keys. However, that's better than silently
206 * failing WoWLAN. Or maybe not?
208 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
209 break;
211 memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
212 if (key->keyidx == mvmvif->tx_key_idx) {
213 /* TX key must be at offset 0 */
214 wkc.wep_key.key_offset = 0;
215 } else {
216 /* others start at 1 */
217 data->wep_key_idx++;
218 wkc.wep_key.key_offset = data->wep_key_idx;
221 if (data->configure_keys) {
222 mutex_lock(&mvm->mutex);
223 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
224 sizeof(wkc), &wkc);
225 data->error = ret != 0;
227 mvm->ptk_ivlen = key->iv_len;
228 mvm->ptk_icvlen = key->icv_len;
229 mvm->gtk_ivlen = key->iv_len;
230 mvm->gtk_icvlen = key->icv_len;
231 mutex_unlock(&mvm->mutex);
234 /* don't upload key again */
235 return;
237 default:
238 data->error = true;
239 return;
240 case WLAN_CIPHER_SUITE_AES_CMAC:
242 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
243 * but we also shouldn't abort suspend due to that. It does have
244 * support for the IGTK key renewal, but doesn't really use the
245 * IGTK for anything. This means we could spuriously wake up or
246 * be deauthenticated, but that was considered acceptable.
248 return;
249 case WLAN_CIPHER_SUITE_TKIP:
250 if (sta) {
251 u64 pn64;
253 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
254 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
256 rx_p1ks = data->tkip->rx_uni;
258 pn64 = atomic64_read(&key->tx_pn);
259 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
260 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
262 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
263 p1k);
264 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
266 memcpy(data->tkip->mic_keys.tx,
267 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
268 IWL_MIC_KEY_SIZE);
270 rx_mic_key = data->tkip->mic_keys.rx_unicast;
271 } else {
272 tkip_sc =
273 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
274 rx_p1ks = data->tkip->rx_multi;
275 rx_mic_key = data->tkip->mic_keys.rx_mcast;
279 * For non-QoS this relies on the fact that both the uCode and
280 * mac80211 use TID 0 (as they need to to avoid replay attacks)
281 * for checking the IV in the frames.
283 for (i = 0; i < IWL_NUM_RSC; i++) {
284 ieee80211_get_key_rx_seq(key, i, &seq);
285 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
286 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
287 /* wrapping isn't allowed, AP must rekey */
288 if (seq.tkip.iv32 > cur_rx_iv32)
289 cur_rx_iv32 = seq.tkip.iv32;
292 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
293 cur_rx_iv32, p1k);
294 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
295 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
296 cur_rx_iv32 + 1, p1k);
297 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
299 memcpy(rx_mic_key,
300 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
301 IWL_MIC_KEY_SIZE);
303 data->use_tkip = true;
304 data->use_rsc_tsc = true;
305 break;
306 case WLAN_CIPHER_SUITE_CCMP:
307 if (sta) {
308 u64 pn64;
310 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
311 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
313 pn64 = atomic64_read(&key->tx_pn);
314 aes_tx_sc->pn = cpu_to_le64(pn64);
315 } else {
316 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
320 * For non-QoS this relies on the fact that both the uCode and
321 * mac80211/our RX code use TID 0 for checking the PN.
323 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
324 struct iwl_mvm_sta *mvmsta;
325 struct iwl_mvm_key_pn *ptk_pn;
326 const u8 *pn;
328 mvmsta = iwl_mvm_sta_from_mac80211(sta);
329 ptk_pn = rcu_dereference_protected(
330 mvmsta->ptk_pn[key->keyidx],
331 lockdep_is_held(&mvm->mutex));
332 if (WARN_ON(!ptk_pn))
333 break;
335 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
336 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
337 mvm->trans->num_rx_queues);
338 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
339 ((u64)pn[4] << 8) |
340 ((u64)pn[3] << 16) |
341 ((u64)pn[2] << 24) |
342 ((u64)pn[1] << 32) |
343 ((u64)pn[0] << 40));
345 } else {
346 for (i = 0; i < IWL_NUM_RSC; i++) {
347 u8 *pn = seq.ccmp.pn;
349 ieee80211_get_key_rx_seq(key, i, &seq);
350 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
351 ((u64)pn[4] << 8) |
352 ((u64)pn[3] << 16) |
353 ((u64)pn[2] << 24) |
354 ((u64)pn[1] << 32) |
355 ((u64)pn[0] << 40));
358 data->use_rsc_tsc = true;
359 break;
362 if (data->configure_keys) {
363 mutex_lock(&mvm->mutex);
365 * The D3 firmware hardcodes the key offset 0 as the key it
366 * uses to transmit packets to the AP, i.e. the PTK.
368 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
369 mvm->ptk_ivlen = key->iv_len;
370 mvm->ptk_icvlen = key->icv_len;
371 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
372 } else {
374 * firmware only supports TSC/RSC for a single key,
375 * so if there are multiple keep overwriting them
376 * with new ones -- this relies on mac80211 doing
377 * list_add_tail().
379 mvm->gtk_ivlen = key->iv_len;
380 mvm->gtk_icvlen = key->icv_len;
381 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
383 mutex_unlock(&mvm->mutex);
384 data->error = ret != 0;
388 static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
389 struct cfg80211_wowlan *wowlan)
391 struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
392 struct iwl_host_cmd cmd = {
393 .id = WOWLAN_PATTERNS,
394 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
396 int i, err;
398 if (!wowlan->n_patterns)
399 return 0;
401 cmd.len[0] = struct_size(pattern_cmd, patterns, wowlan->n_patterns);
403 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
404 if (!pattern_cmd)
405 return -ENOMEM;
407 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
409 for (i = 0; i < wowlan->n_patterns; i++) {
410 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
412 memcpy(&pattern_cmd->patterns[i].mask,
413 wowlan->patterns[i].mask, mask_len);
414 memcpy(&pattern_cmd->patterns[i].pattern,
415 wowlan->patterns[i].pattern,
416 wowlan->patterns[i].pattern_len);
417 pattern_cmd->patterns[i].mask_size = mask_len;
418 pattern_cmd->patterns[i].pattern_size =
419 wowlan->patterns[i].pattern_len;
422 cmd.data[0] = pattern_cmd;
423 err = iwl_mvm_send_cmd(mvm, &cmd);
424 kfree(pattern_cmd);
425 return err;
428 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
429 struct cfg80211_wowlan *wowlan)
431 struct iwl_wowlan_patterns_cmd *pattern_cmd;
432 struct iwl_host_cmd cmd = {
433 .id = WOWLAN_PATTERNS,
434 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
436 int i, err;
438 if (!wowlan->n_patterns)
439 return 0;
441 cmd.len[0] = sizeof(*pattern_cmd) +
442 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
444 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
445 if (!pattern_cmd)
446 return -ENOMEM;
448 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
450 for (i = 0; i < wowlan->n_patterns; i++) {
451 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
453 pattern_cmd->patterns[i].pattern_type =
454 WOWLAN_PATTERN_TYPE_BITMASK;
456 memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
457 wowlan->patterns[i].mask, mask_len);
458 memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
459 wowlan->patterns[i].pattern,
460 wowlan->patterns[i].pattern_len);
461 pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
462 pattern_cmd->patterns[i].u.bitmask.pattern_size =
463 wowlan->patterns[i].pattern_len;
466 cmd.data[0] = pattern_cmd;
467 err = iwl_mvm_send_cmd(mvm, &cmd);
468 kfree(pattern_cmd);
469 return err;
472 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
473 struct ieee80211_sta *ap_sta)
475 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
476 struct ieee80211_chanctx_conf *ctx;
477 u8 chains_static, chains_dynamic;
478 struct cfg80211_chan_def chandef;
479 int ret, i;
480 struct iwl_binding_cmd_v1 binding_cmd = {};
481 struct iwl_time_quota_cmd quota_cmd = {};
482 struct iwl_time_quota_data *quota;
483 u32 status;
485 if (WARN_ON_ONCE(iwl_mvm_is_cdb_supported(mvm)))
486 return -EINVAL;
488 /* add back the PHY */
489 if (WARN_ON(!mvmvif->phy_ctxt))
490 return -EINVAL;
492 rcu_read_lock();
493 ctx = rcu_dereference(vif->chanctx_conf);
494 if (WARN_ON(!ctx)) {
495 rcu_read_unlock();
496 return -EINVAL;
498 chandef = ctx->def;
499 chains_static = ctx->rx_chains_static;
500 chains_dynamic = ctx->rx_chains_dynamic;
501 rcu_read_unlock();
503 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
504 chains_static, chains_dynamic);
505 if (ret)
506 return ret;
508 /* add back the MAC */
509 mvmvif->uploaded = false;
511 if (WARN_ON(!vif->bss_conf.assoc))
512 return -EINVAL;
514 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
515 if (ret)
516 return ret;
518 /* add back binding - XXX refactor? */
519 binding_cmd.id_and_color =
520 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
521 mvmvif->phy_ctxt->color));
522 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
523 binding_cmd.phy =
524 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
525 mvmvif->phy_ctxt->color));
526 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
527 mvmvif->color));
528 for (i = 1; i < MAX_MACS_IN_BINDING; i++)
529 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
531 status = 0;
532 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
533 IWL_BINDING_CMD_SIZE_V1, &binding_cmd,
534 &status);
535 if (ret) {
536 IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
537 return ret;
540 if (status) {
541 IWL_ERR(mvm, "Binding command failed: %u\n", status);
542 return -EIO;
545 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
546 if (ret)
547 return ret;
548 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
550 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
551 if (ret)
552 return ret;
554 /* and some quota */
555 quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
556 quota->id_and_color =
557 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
558 mvmvif->phy_ctxt->color));
559 quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
560 quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
562 for (i = 1; i < MAX_BINDINGS; i++) {
563 quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
564 quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
567 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
568 iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
569 if (ret)
570 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
572 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
573 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
575 return 0;
578 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
579 struct ieee80211_vif *vif)
581 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
582 struct iwl_nonqos_seq_query_cmd query_cmd = {
583 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
584 .mac_id_n_color =
585 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
586 mvmvif->color)),
588 struct iwl_host_cmd cmd = {
589 .id = NON_QOS_TX_COUNTER_CMD,
590 .flags = CMD_WANT_SKB,
592 int err;
593 u32 size;
595 cmd.data[0] = &query_cmd;
596 cmd.len[0] = sizeof(query_cmd);
598 err = iwl_mvm_send_cmd(mvm, &cmd);
599 if (err)
600 return err;
602 size = iwl_rx_packet_payload_len(cmd.resp_pkt);
603 if (size < sizeof(__le16)) {
604 err = -EINVAL;
605 } else {
606 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
607 /* firmware returns next, not last-used seqno */
608 err = (u16) (err - 0x10);
611 iwl_free_resp(&cmd);
612 return err;
615 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
617 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
618 struct iwl_nonqos_seq_query_cmd query_cmd = {
619 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
620 .mac_id_n_color =
621 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
622 mvmvif->color)),
623 .value = cpu_to_le16(mvmvif->seqno),
626 /* return if called during restart, not resume from D3 */
627 if (!mvmvif->seqno_valid)
628 return;
630 mvmvif->seqno_valid = false;
632 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
633 sizeof(query_cmd), &query_cmd))
634 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
637 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
639 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
641 iwl_mvm_stop_device(mvm);
643 * Set the HW restart bit -- this is mostly true as we're
644 * going to load new firmware and reprogram that, though
645 * the reprogramming is going to be manual to avoid adding
646 * all the MACs that aren't support.
647 * We don't have to clear up everything though because the
648 * reprogramming is manual. When we resume, we'll actually
649 * go through a proper restart sequence again to switch
650 * back to the runtime firmware image.
652 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
654 /* the fw is reset, so all the keys are cleared */
655 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
657 mvm->ptk_ivlen = 0;
658 mvm->ptk_icvlen = 0;
659 mvm->ptk_ivlen = 0;
660 mvm->ptk_icvlen = 0;
662 return iwl_mvm_load_d3_fw(mvm);
665 static int
666 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
667 struct cfg80211_wowlan *wowlan,
668 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
669 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
670 struct ieee80211_sta *ap_sta)
672 int ret;
673 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
675 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
677 wowlan_config_cmd->is_11n_connection =
678 ap_sta->ht_cap.ht_supported;
679 wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
680 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
682 /* Query the last used seqno and set it */
683 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
684 if (ret < 0)
685 return ret;
687 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
689 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
691 if (wowlan->disconnect)
692 wowlan_config_cmd->wakeup_filter |=
693 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
694 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
695 if (wowlan->magic_pkt)
696 wowlan_config_cmd->wakeup_filter |=
697 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
698 if (wowlan->gtk_rekey_failure)
699 wowlan_config_cmd->wakeup_filter |=
700 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
701 if (wowlan->eap_identity_req)
702 wowlan_config_cmd->wakeup_filter |=
703 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
704 if (wowlan->four_way_handshake)
705 wowlan_config_cmd->wakeup_filter |=
706 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
707 if (wowlan->n_patterns)
708 wowlan_config_cmd->wakeup_filter |=
709 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
711 if (wowlan->rfkill_release)
712 wowlan_config_cmd->wakeup_filter |=
713 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
715 if (wowlan->tcp) {
717 * Set the "link change" (really "link lost") flag as well
718 * since that implies losing the TCP connection.
720 wowlan_config_cmd->wakeup_filter |=
721 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
722 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
723 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
724 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
727 if (wowlan->any) {
728 wowlan_config_cmd->wakeup_filter |=
729 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
730 IWL_WOWLAN_WAKEUP_LINK_CHANGE |
731 IWL_WOWLAN_WAKEUP_RX_FRAME |
732 IWL_WOWLAN_WAKEUP_BCN_FILTERING);
735 return 0;
738 static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
739 struct ieee80211_vif *vif,
740 u32 cmd_flags)
742 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
743 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
744 bool unified = fw_has_capa(&mvm->fw->ucode_capa,
745 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
746 struct wowlan_key_data key_data = {
747 .configure_keys = !unified,
748 .use_rsc_tsc = false,
749 .tkip = &tkip_cmd,
750 .use_tkip = false,
752 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
753 int ret;
755 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
756 if (!key_data.rsc_tsc)
757 return -ENOMEM;
760 * if we have to configure keys, call ieee80211_iter_keys(),
761 * as we need non-atomic context in order to take the
762 * required locks.
765 * Note that currently we don't propagate cmd_flags
766 * to the iterator. In case of key_data.configure_keys,
767 * all the configured commands are SYNC, and
768 * iwl_mvm_wowlan_program_keys() will take care of
769 * locking/unlocking mvm->mutex.
771 ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_wowlan_program_keys,
772 &key_data);
774 if (key_data.error) {
775 ret = -EIO;
776 goto out;
779 if (key_data.use_rsc_tsc) {
780 ret = iwl_mvm_send_cmd_pdu(mvm,
781 WOWLAN_TSC_RSC_PARAM, cmd_flags,
782 sizeof(*key_data.rsc_tsc),
783 key_data.rsc_tsc);
784 if (ret)
785 goto out;
788 if (key_data.use_tkip &&
789 !fw_has_api(&mvm->fw->ucode_capa,
790 IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
791 ret = iwl_mvm_send_cmd_pdu(mvm,
792 WOWLAN_TKIP_PARAM,
793 cmd_flags, sizeof(tkip_cmd),
794 &tkip_cmd);
795 if (ret)
796 goto out;
799 /* configure rekey data only if offloaded rekey is supported (d3) */
800 if (mvmvif->rekey_data.valid) {
801 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
802 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
803 NL80211_KCK_LEN);
804 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
805 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
806 NL80211_KEK_LEN);
807 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
808 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
810 ret = iwl_mvm_send_cmd_pdu(mvm,
811 WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
812 sizeof(kek_kck_cmd),
813 &kek_kck_cmd);
814 if (ret)
815 goto out;
817 ret = 0;
818 out:
819 kfree(key_data.rsc_tsc);
820 return ret;
823 static int
824 iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
825 struct cfg80211_wowlan *wowlan,
826 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
827 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
828 struct ieee80211_sta *ap_sta)
830 int ret;
831 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
832 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
834 mvm->offload_tid = wowlan_config_cmd->offloading_tid;
836 if (!unified_image) {
837 ret = iwl_mvm_switch_to_d3(mvm);
838 if (ret)
839 return ret;
841 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
842 if (ret)
843 return ret;
846 if (!iwlwifi_mod_params.swcrypto) {
848 * This needs to be unlocked due to lock ordering
849 * constraints. Since we're in the suspend path
850 * that isn't really a problem though.
852 mutex_unlock(&mvm->mutex);
853 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, CMD_ASYNC);
854 mutex_lock(&mvm->mutex);
855 if (ret)
856 return ret;
859 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
860 sizeof(*wowlan_config_cmd),
861 wowlan_config_cmd);
862 if (ret)
863 return ret;
865 if (fw_has_api(&mvm->fw->ucode_capa,
866 IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
867 ret = iwl_mvm_send_patterns(mvm, wowlan);
868 else
869 ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
870 if (ret)
871 return ret;
873 return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
876 static int
877 iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
878 struct cfg80211_wowlan *wowlan,
879 struct cfg80211_sched_scan_request *nd_config,
880 struct ieee80211_vif *vif)
882 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
883 int ret;
884 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
885 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
887 if (!unified_image) {
888 ret = iwl_mvm_switch_to_d3(mvm);
889 if (ret)
890 return ret;
891 } else {
892 /* In theory, we wouldn't have to stop a running sched
893 * scan in order to start another one (for
894 * net-detect). But in practice this doesn't seem to
895 * work properly, so stop any running sched_scan now.
897 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
898 if (ret)
899 return ret;
902 /* rfkill release can be either for wowlan or netdetect */
903 if (wowlan->rfkill_release)
904 wowlan_config_cmd.wakeup_filter |=
905 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
907 wowlan_config_cmd.sta_id = mvm->aux_sta.sta_id;
909 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
910 sizeof(wowlan_config_cmd),
911 &wowlan_config_cmd);
912 if (ret)
913 return ret;
915 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
916 IWL_MVM_SCAN_NETDETECT);
917 if (ret)
918 return ret;
920 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
921 return -EBUSY;
923 /* save the sched scan matchsets... */
924 if (nd_config->n_match_sets) {
925 mvm->nd_match_sets = kmemdup(nd_config->match_sets,
926 sizeof(*nd_config->match_sets) *
927 nd_config->n_match_sets,
928 GFP_KERNEL);
929 if (mvm->nd_match_sets)
930 mvm->n_nd_match_sets = nd_config->n_match_sets;
933 /* ...and the sched scan channels for later reporting */
934 mvm->nd_channels = kmemdup(nd_config->channels,
935 sizeof(*nd_config->channels) *
936 nd_config->n_channels,
937 GFP_KERNEL);
938 if (mvm->nd_channels)
939 mvm->n_nd_channels = nd_config->n_channels;
941 return 0;
944 static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
946 kfree(mvm->nd_match_sets);
947 mvm->nd_match_sets = NULL;
948 mvm->n_nd_match_sets = 0;
949 kfree(mvm->nd_channels);
950 mvm->nd_channels = NULL;
951 mvm->n_nd_channels = 0;
954 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
955 struct cfg80211_wowlan *wowlan,
956 bool test)
958 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
959 struct ieee80211_vif *vif = NULL;
960 struct iwl_mvm_vif *mvmvif = NULL;
961 struct ieee80211_sta *ap_sta = NULL;
962 struct iwl_d3_manager_config d3_cfg_cmd_data = {
964 * Program the minimum sleep time to 10 seconds, as many
965 * platforms have issues processing a wakeup signal while
966 * still being in the process of suspending.
968 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
970 struct iwl_host_cmd d3_cfg_cmd = {
971 .id = D3_CONFIG_CMD,
972 .flags = CMD_WANT_SKB,
973 .data[0] = &d3_cfg_cmd_data,
974 .len[0] = sizeof(d3_cfg_cmd_data),
976 int ret;
977 int len __maybe_unused;
978 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
979 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
981 if (!wowlan) {
983 * mac80211 shouldn't get here, but for D3 test
984 * it doesn't warrant a warning
986 WARN_ON(!test);
987 return -EINVAL;
990 mutex_lock(&mvm->mutex);
992 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
994 vif = iwl_mvm_get_bss_vif(mvm);
995 if (IS_ERR_OR_NULL(vif)) {
996 ret = 1;
997 goto out_noreset;
1000 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1002 if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
1003 /* if we're not associated, this must be netdetect */
1004 if (!wowlan->nd_config) {
1005 ret = 1;
1006 goto out_noreset;
1009 ret = iwl_mvm_netdetect_config(
1010 mvm, wowlan, wowlan->nd_config, vif);
1011 if (ret)
1012 goto out;
1014 mvm->net_detect = true;
1015 } else {
1016 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1018 wowlan_config_cmd.sta_id = mvmvif->ap_sta_id;
1020 ap_sta = rcu_dereference_protected(
1021 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1022 lockdep_is_held(&mvm->mutex));
1023 if (IS_ERR_OR_NULL(ap_sta)) {
1024 ret = -EINVAL;
1025 goto out_noreset;
1028 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1029 vif, mvmvif, ap_sta);
1030 if (ret)
1031 goto out_noreset;
1032 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1033 vif, mvmvif, ap_sta);
1034 if (ret)
1035 goto out;
1037 mvm->net_detect = false;
1040 ret = iwl_mvm_power_update_device(mvm);
1041 if (ret)
1042 goto out;
1044 ret = iwl_mvm_power_update_mac(mvm);
1045 if (ret)
1046 goto out;
1048 #ifdef CONFIG_IWLWIFI_DEBUGFS
1049 if (mvm->d3_wake_sysassert)
1050 d3_cfg_cmd_data.wakeup_flags |=
1051 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1052 #endif
1055 * Prior to 9000 device family the driver needs to stop the dbg
1056 * recording before entering D3. In later devices the FW stops the
1057 * recording automatically.
1059 if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
1060 iwl_fw_dbg_stop_restart_recording(&mvm->fwrt, NULL, true);
1062 /* must be last -- this switches firmware state */
1063 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1064 if (ret)
1065 goto out;
1066 #ifdef CONFIG_IWLWIFI_DEBUGFS
1067 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1068 if (len >= sizeof(u32)) {
1069 mvm->d3_test_pme_ptr =
1070 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1072 #endif
1073 iwl_free_resp(&d3_cfg_cmd);
1075 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1077 ret = iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
1078 out:
1079 if (ret < 0) {
1080 iwl_mvm_free_nd(mvm);
1082 if (!unified_image) {
1083 if (mvm->fw_restart > 0) {
1084 mvm->fw_restart--;
1085 ieee80211_restart_hw(mvm->hw);
1089 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
1091 out_noreset:
1092 mutex_unlock(&mvm->mutex);
1094 return ret;
1097 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1099 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1100 struct iwl_trans *trans = mvm->trans;
1101 int ret;
1103 iwl_mvm_pause_tcm(mvm, true);
1105 iwl_fw_runtime_suspend(&mvm->fwrt);
1107 ret = iwl_trans_suspend(trans);
1108 if (ret)
1109 return ret;
1111 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
1113 return __iwl_mvm_suspend(hw, wowlan, false);
1116 /* converted data from the different status responses */
1117 struct iwl_wowlan_status_data {
1118 u16 pattern_number;
1119 u16 qos_seq_ctr[8];
1120 u32 wakeup_reasons;
1121 u32 wake_packet_length;
1122 u32 wake_packet_bufsize;
1123 const u8 *wake_packet;
1126 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1127 struct ieee80211_vif *vif,
1128 struct iwl_wowlan_status_data *status)
1130 struct sk_buff *pkt = NULL;
1131 struct cfg80211_wowlan_wakeup wakeup = {
1132 .pattern_idx = -1,
1134 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1135 u32 reasons = status->wakeup_reasons;
1137 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1138 wakeup_report = NULL;
1139 goto report;
1142 pm_wakeup_event(mvm->dev, 0);
1144 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1145 wakeup.magic_pkt = true;
1147 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1148 wakeup.pattern_idx =
1149 status->pattern_number;
1151 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1152 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1153 wakeup.disconnect = true;
1155 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1156 wakeup.gtk_rekey_failure = true;
1158 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1159 wakeup.rfkill_release = true;
1161 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1162 wakeup.eap_identity_req = true;
1164 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1165 wakeup.four_way_handshake = true;
1167 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1168 wakeup.tcp_connlost = true;
1170 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1171 wakeup.tcp_nomoretokens = true;
1173 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1174 wakeup.tcp_match = true;
1176 if (status->wake_packet_bufsize) {
1177 int pktsize = status->wake_packet_bufsize;
1178 int pktlen = status->wake_packet_length;
1179 const u8 *pktdata = status->wake_packet;
1180 struct ieee80211_hdr *hdr = (void *)pktdata;
1181 int truncated = pktlen - pktsize;
1183 /* this would be a firmware bug */
1184 if (WARN_ON_ONCE(truncated < 0))
1185 truncated = 0;
1187 if (ieee80211_is_data(hdr->frame_control)) {
1188 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1189 int ivlen = 0, icvlen = 4; /* also FCS */
1191 pkt = alloc_skb(pktsize, GFP_KERNEL);
1192 if (!pkt)
1193 goto report;
1195 skb_put_data(pkt, pktdata, hdrlen);
1196 pktdata += hdrlen;
1197 pktsize -= hdrlen;
1199 if (ieee80211_has_protected(hdr->frame_control)) {
1201 * This is unlocked and using gtk_i(c)vlen,
1202 * but since everything is under RTNL still
1203 * that's not really a problem - changing
1204 * it would be difficult.
1206 if (is_multicast_ether_addr(hdr->addr1)) {
1207 ivlen = mvm->gtk_ivlen;
1208 icvlen += mvm->gtk_icvlen;
1209 } else {
1210 ivlen = mvm->ptk_ivlen;
1211 icvlen += mvm->ptk_icvlen;
1215 /* if truncated, FCS/ICV is (partially) gone */
1216 if (truncated >= icvlen) {
1217 icvlen = 0;
1218 truncated -= icvlen;
1219 } else {
1220 icvlen -= truncated;
1221 truncated = 0;
1224 pktsize -= ivlen + icvlen;
1225 pktdata += ivlen;
1227 skb_put_data(pkt, pktdata, pktsize);
1229 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1230 goto report;
1231 wakeup.packet = pkt->data;
1232 wakeup.packet_present_len = pkt->len;
1233 wakeup.packet_len = pkt->len - truncated;
1234 wakeup.packet_80211 = false;
1235 } else {
1236 int fcslen = 4;
1238 if (truncated >= 4) {
1239 truncated -= 4;
1240 fcslen = 0;
1241 } else {
1242 fcslen -= truncated;
1243 truncated = 0;
1245 pktsize -= fcslen;
1246 wakeup.packet = status->wake_packet;
1247 wakeup.packet_present_len = pktsize;
1248 wakeup.packet_len = pktlen - truncated;
1249 wakeup.packet_80211 = true;
1253 report:
1254 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1255 kfree_skb(pkt);
1258 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1259 struct ieee80211_key_seq *seq)
1261 u64 pn;
1263 pn = le64_to_cpu(sc->pn);
1264 seq->ccmp.pn[0] = pn >> 40;
1265 seq->ccmp.pn[1] = pn >> 32;
1266 seq->ccmp.pn[2] = pn >> 24;
1267 seq->ccmp.pn[3] = pn >> 16;
1268 seq->ccmp.pn[4] = pn >> 8;
1269 seq->ccmp.pn[5] = pn;
1272 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1273 struct ieee80211_key_seq *seq)
1275 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1276 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1279 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
1280 struct ieee80211_sta *sta,
1281 struct ieee80211_key_conf *key)
1283 int tid;
1285 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1287 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
1288 struct iwl_mvm_sta *mvmsta;
1289 struct iwl_mvm_key_pn *ptk_pn;
1291 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1293 ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
1294 lockdep_is_held(&mvm->mutex));
1295 if (WARN_ON(!ptk_pn))
1296 return;
1298 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1299 struct ieee80211_key_seq seq = {};
1300 int i;
1302 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1303 ieee80211_set_key_rx_seq(key, tid, &seq);
1304 for (i = 1; i < mvm->trans->num_rx_queues; i++)
1305 memcpy(ptk_pn->q[i].pn[tid],
1306 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
1308 } else {
1309 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1310 struct ieee80211_key_seq seq = {};
1312 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1313 ieee80211_set_key_rx_seq(key, tid, &seq);
1318 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1319 struct ieee80211_key_conf *key)
1321 int tid;
1323 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1325 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1326 struct ieee80211_key_seq seq = {};
1328 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1329 ieee80211_set_key_rx_seq(key, tid, &seq);
1333 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
1334 struct ieee80211_key_conf *key,
1335 struct iwl_wowlan_status *status)
1337 union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc;
1339 switch (key->cipher) {
1340 case WLAN_CIPHER_SUITE_CCMP:
1341 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
1342 break;
1343 case WLAN_CIPHER_SUITE_TKIP:
1344 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1345 break;
1346 default:
1347 WARN_ON(1);
1351 struct iwl_mvm_d3_gtk_iter_data {
1352 struct iwl_mvm *mvm;
1353 struct iwl_wowlan_status *status;
1354 void *last_gtk;
1355 u32 cipher;
1356 bool find_phase, unhandled_cipher;
1357 int num_keys;
1360 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
1361 struct ieee80211_vif *vif,
1362 struct ieee80211_sta *sta,
1363 struct ieee80211_key_conf *key,
1364 void *_data)
1366 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1368 if (data->unhandled_cipher)
1369 return;
1371 switch (key->cipher) {
1372 case WLAN_CIPHER_SUITE_WEP40:
1373 case WLAN_CIPHER_SUITE_WEP104:
1374 /* ignore WEP completely, nothing to do */
1375 return;
1376 case WLAN_CIPHER_SUITE_CCMP:
1377 case WLAN_CIPHER_SUITE_TKIP:
1378 /* we support these */
1379 break;
1380 default:
1381 /* everything else (even CMAC for MFP) - disconnect from AP */
1382 data->unhandled_cipher = true;
1383 return;
1386 data->num_keys++;
1389 * pairwise key - update sequence counters only;
1390 * note that this assumes no TDLS sessions are active
1392 if (sta) {
1393 struct ieee80211_key_seq seq = {};
1394 union iwl_all_tsc_rsc *sc =
1395 &data->status->gtk[0].rsc.all_tsc_rsc;
1397 if (data->find_phase)
1398 return;
1400 switch (key->cipher) {
1401 case WLAN_CIPHER_SUITE_CCMP:
1402 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
1403 sta, key);
1404 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1405 break;
1406 case WLAN_CIPHER_SUITE_TKIP:
1407 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1408 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1409 atomic64_set(&key->tx_pn,
1410 (u64)seq.tkip.iv16 |
1411 ((u64)seq.tkip.iv32 << 16));
1412 break;
1415 /* that's it for this key */
1416 return;
1419 if (data->find_phase) {
1420 data->last_gtk = key;
1421 data->cipher = key->cipher;
1422 return;
1425 if (data->status->num_of_gtk_rekeys)
1426 ieee80211_remove_key(key);
1427 else if (data->last_gtk == key)
1428 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
1431 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1432 struct ieee80211_vif *vif,
1433 struct iwl_wowlan_status *status)
1435 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1436 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1437 .mvm = mvm,
1438 .status = status,
1440 u32 disconnection_reasons =
1441 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1442 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1444 if (!status || !vif->bss_conf.bssid)
1445 return false;
1447 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1448 return false;
1450 /* find last GTK that we used initially, if any */
1451 gtkdata.find_phase = true;
1452 ieee80211_iter_keys(mvm->hw, vif,
1453 iwl_mvm_d3_update_keys, &gtkdata);
1454 /* not trying to keep connections with MFP/unhandled ciphers */
1455 if (gtkdata.unhandled_cipher)
1456 return false;
1457 if (!gtkdata.num_keys)
1458 goto out;
1459 if (!gtkdata.last_gtk)
1460 return false;
1463 * invalidate all other GTKs that might still exist and update
1464 * the one that we used
1466 gtkdata.find_phase = false;
1467 ieee80211_iter_keys(mvm->hw, vif,
1468 iwl_mvm_d3_update_keys, &gtkdata);
1470 if (status->num_of_gtk_rekeys) {
1471 struct ieee80211_key_conf *key;
1472 struct {
1473 struct ieee80211_key_conf conf;
1474 u8 key[32];
1475 } conf = {
1476 .conf.cipher = gtkdata.cipher,
1477 .conf.keyidx =
1478 iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
1480 __be64 replay_ctr;
1482 switch (gtkdata.cipher) {
1483 case WLAN_CIPHER_SUITE_CCMP:
1484 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1485 memcpy(conf.conf.key, status->gtk[0].key,
1486 WLAN_KEY_LEN_CCMP);
1487 break;
1488 case WLAN_CIPHER_SUITE_TKIP:
1489 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1490 memcpy(conf.conf.key, status->gtk[0].key, 16);
1491 /* leave TX MIC key zeroed, we don't use it anyway */
1492 memcpy(conf.conf.key +
1493 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1494 status->gtk[0].tkip_mic_key, 8);
1495 break;
1498 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1499 if (IS_ERR(key))
1500 return false;
1501 iwl_mvm_set_key_rx_seq(mvm, key, status);
1503 replay_ctr =
1504 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1506 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1507 (void *)&replay_ctr, GFP_KERNEL);
1510 out:
1511 mvmvif->seqno_valid = true;
1512 /* +0x10 because the set API expects next-to-use, not last-used */
1513 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1515 return true;
1518 struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
1520 struct iwl_wowlan_status *v7, *status;
1521 struct iwl_host_cmd cmd = {
1522 .id = WOWLAN_GET_STATUSES,
1523 .flags = CMD_WANT_SKB,
1525 int ret, len, status_size;
1527 lockdep_assert_held(&mvm->mutex);
1529 ret = iwl_mvm_send_cmd(mvm, &cmd);
1530 if (ret) {
1531 IWL_ERR(mvm, "failed to query wakeup status (%d)\n", ret);
1532 return ERR_PTR(ret);
1535 if (!fw_has_api(&mvm->fw->ucode_capa,
1536 IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
1537 struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
1538 int data_size;
1540 status_size = sizeof(*v6);
1541 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1543 if (len < status_size) {
1544 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1545 status = ERR_PTR(-EIO);
1546 goto out_free_resp;
1549 data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
1551 if (len != (status_size + data_size)) {
1552 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1553 status = ERR_PTR(-EIO);
1554 goto out_free_resp;
1557 status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
1558 if (!status)
1559 goto out_free_resp;
1561 BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
1562 sizeof(status->gtk[0].key));
1563 BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) >
1564 sizeof(status->gtk[0].tkip_mic_key));
1566 /* copy GTK info to the right place */
1567 memcpy(status->gtk[0].key, v6->gtk.decrypt_key,
1568 sizeof(v6->gtk.decrypt_key));
1569 memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key,
1570 sizeof(v6->gtk.tkip_mic_key));
1571 memcpy(&status->gtk[0].rsc, &v6->gtk.rsc,
1572 sizeof(status->gtk[0].rsc));
1574 /* hardcode the key length to 16 since v6 only supports 16 */
1575 status->gtk[0].key_len = 16;
1578 * The key index only uses 2 bits (values 0 to 3) and
1579 * we always set bit 7 which means this is the
1580 * currently used key.
1582 status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
1584 status->replay_ctr = v6->replay_ctr;
1586 /* everything starting from pattern_number is identical */
1587 memcpy(&status->pattern_number, &v6->pattern_number,
1588 offsetof(struct iwl_wowlan_status, wake_packet) -
1589 offsetof(struct iwl_wowlan_status, pattern_number) +
1590 data_size);
1592 goto out_free_resp;
1595 v7 = (void *)cmd.resp_pkt->data;
1596 status_size = sizeof(*v7);
1597 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1599 if (len < status_size) {
1600 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1601 status = ERR_PTR(-EIO);
1602 goto out_free_resp;
1605 if (len != (status_size +
1606 ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4))) {
1607 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1608 status = ERR_PTR(-EIO);
1609 goto out_free_resp;
1612 status = kmemdup(v7, len, GFP_KERNEL);
1614 out_free_resp:
1615 iwl_free_resp(&cmd);
1616 return status;
1619 static struct iwl_wowlan_status *
1620 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm)
1622 int ret;
1624 /* only for tracing for now */
1625 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1626 if (ret)
1627 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1629 return iwl_mvm_send_wowlan_get_status(mvm);
1632 /* releases the MVM mutex */
1633 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1634 struct ieee80211_vif *vif)
1636 struct iwl_wowlan_status_data status;
1637 struct iwl_wowlan_status *fw_status;
1638 int i;
1639 bool keep;
1640 struct iwl_mvm_sta *mvm_ap_sta;
1642 fw_status = iwl_mvm_get_wakeup_status(mvm);
1643 if (IS_ERR_OR_NULL(fw_status))
1644 goto out_unlock;
1646 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1647 for (i = 0; i < 8; i++)
1648 status.qos_seq_ctr[i] =
1649 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1650 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1651 status.wake_packet_length =
1652 le32_to_cpu(fw_status->wake_packet_length);
1653 status.wake_packet_bufsize =
1654 le32_to_cpu(fw_status->wake_packet_bufsize);
1655 status.wake_packet = fw_status->wake_packet;
1657 /* still at hard-coded place 0 for D3 image */
1658 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
1659 if (!mvm_ap_sta)
1660 goto out_free;
1662 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1663 u16 seq = status.qos_seq_ctr[i];
1664 /* firmware stores last-used value, we store next value */
1665 seq += 0x10;
1666 mvm_ap_sta->tid_data[i].seq_number = seq;
1669 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
1670 i = mvm->offload_tid;
1671 iwl_trans_set_q_ptrs(mvm->trans,
1672 mvm_ap_sta->tid_data[i].txq_id,
1673 mvm_ap_sta->tid_data[i].seq_number >> 4);
1676 /* now we have all the data we need, unlock to avoid mac80211 issues */
1677 mutex_unlock(&mvm->mutex);
1679 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1681 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1683 kfree(fw_status);
1684 return keep;
1686 out_free:
1687 kfree(fw_status);
1688 out_unlock:
1689 mutex_unlock(&mvm->mutex);
1690 return false;
1693 #define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
1694 IWL_SCAN_MAX_PROFILES)
1696 struct iwl_mvm_nd_query_results {
1697 u32 matched_profiles;
1698 u8 matches[ND_QUERY_BUF_LEN];
1701 static int
1702 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1703 struct iwl_mvm_nd_query_results *results)
1705 struct iwl_scan_offload_profiles_query *query;
1706 struct iwl_host_cmd cmd = {
1707 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1708 .flags = CMD_WANT_SKB,
1710 int ret, len;
1711 size_t query_len, matches_len;
1712 int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
1714 ret = iwl_mvm_send_cmd(mvm, &cmd);
1715 if (ret) {
1716 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1717 return ret;
1720 if (fw_has_api(&mvm->fw->ucode_capa,
1721 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
1722 query_len = sizeof(struct iwl_scan_offload_profiles_query);
1723 matches_len = sizeof(struct iwl_scan_offload_profile_match) *
1724 max_profiles;
1725 } else {
1726 query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
1727 matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
1728 max_profiles;
1731 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1732 if (len < query_len) {
1733 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1734 ret = -EIO;
1735 goto out_free_resp;
1738 query = (void *)cmd.resp_pkt->data;
1740 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1741 memcpy(results->matches, query->matches, matches_len);
1743 #ifdef CONFIG_IWLWIFI_DEBUGFS
1744 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1745 #endif
1747 out_free_resp:
1748 iwl_free_resp(&cmd);
1749 return ret;
1752 static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
1753 struct iwl_mvm_nd_query_results *query,
1754 int idx)
1756 int n_chans = 0, i;
1758 if (fw_has_api(&mvm->fw->ucode_capa,
1759 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
1760 struct iwl_scan_offload_profile_match *matches =
1761 (struct iwl_scan_offload_profile_match *)query->matches;
1763 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
1764 n_chans += hweight8(matches[idx].matching_channels[i]);
1765 } else {
1766 struct iwl_scan_offload_profile_match_v1 *matches =
1767 (struct iwl_scan_offload_profile_match_v1 *)query->matches;
1769 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
1770 n_chans += hweight8(matches[idx].matching_channels[i]);
1773 return n_chans;
1776 static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
1777 struct iwl_mvm_nd_query_results *query,
1778 struct cfg80211_wowlan_nd_match *match,
1779 int idx)
1781 int i;
1783 if (fw_has_api(&mvm->fw->ucode_capa,
1784 IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
1785 struct iwl_scan_offload_profile_match *matches =
1786 (struct iwl_scan_offload_profile_match *)query->matches;
1788 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
1789 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
1790 match->channels[match->n_channels++] =
1791 mvm->nd_channels[i]->center_freq;
1792 } else {
1793 struct iwl_scan_offload_profile_match_v1 *matches =
1794 (struct iwl_scan_offload_profile_match_v1 *)query->matches;
1796 for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
1797 if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
1798 match->channels[match->n_channels++] =
1799 mvm->nd_channels[i]->center_freq;
1803 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1804 struct ieee80211_vif *vif)
1806 struct cfg80211_wowlan_nd_info *net_detect = NULL;
1807 struct cfg80211_wowlan_wakeup wakeup = {
1808 .pattern_idx = -1,
1810 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1811 struct iwl_mvm_nd_query_results query;
1812 struct iwl_wowlan_status *fw_status;
1813 unsigned long matched_profiles;
1814 u32 reasons = 0;
1815 int i, n_matches, ret;
1817 fw_status = iwl_mvm_get_wakeup_status(mvm);
1818 if (!IS_ERR_OR_NULL(fw_status)) {
1819 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1820 kfree(fw_status);
1823 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1824 wakeup.rfkill_release = true;
1826 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1827 goto out;
1829 ret = iwl_mvm_netdetect_query_results(mvm, &query);
1830 if (ret || !query.matched_profiles) {
1831 wakeup_report = NULL;
1832 goto out;
1835 matched_profiles = query.matched_profiles;
1836 if (mvm->n_nd_match_sets) {
1837 n_matches = hweight_long(matched_profiles);
1838 } else {
1839 IWL_ERR(mvm, "no net detect match information available\n");
1840 n_matches = 0;
1843 net_detect = kzalloc(struct_size(net_detect, matches, n_matches),
1844 GFP_KERNEL);
1845 if (!net_detect || !n_matches)
1846 goto out_report_nd;
1848 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1849 struct cfg80211_wowlan_nd_match *match;
1850 int idx, n_channels = 0;
1852 n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
1854 match = kzalloc(struct_size(match, channels, n_channels),
1855 GFP_KERNEL);
1856 if (!match)
1857 goto out_report_nd;
1859 net_detect->matches[net_detect->n_matches++] = match;
1861 /* We inverted the order of the SSIDs in the scan
1862 * request, so invert the index here.
1864 idx = mvm->n_nd_match_sets - i - 1;
1865 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
1866 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
1867 match->ssid.ssid_len);
1869 if (mvm->n_nd_channels < n_channels)
1870 continue;
1872 iwl_mvm_query_set_freqs(mvm, &query, match, i);
1875 out_report_nd:
1876 wakeup.net_detect = net_detect;
1877 out:
1878 iwl_mvm_free_nd(mvm);
1880 mutex_unlock(&mvm->mutex);
1881 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1883 if (net_detect) {
1884 for (i = 0; i < net_detect->n_matches; i++)
1885 kfree(net_detect->matches[i]);
1886 kfree(net_detect);
1890 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
1891 struct ieee80211_vif *vif)
1893 /* skip the one we keep connection on */
1894 if (data == vif)
1895 return;
1897 if (vif->type == NL80211_IFTYPE_STATION)
1898 ieee80211_resume_disconnect(vif);
1901 static bool iwl_mvm_rt_status(struct iwl_trans *trans, u32 base, u32 *err_id)
1903 struct error_table_start {
1904 /* cf. struct iwl_error_event_table */
1905 u32 valid;
1906 __le32 err_id;
1907 } err_info;
1909 if (!base)
1910 return false;
1912 iwl_trans_read_mem_bytes(trans, base,
1913 &err_info, sizeof(err_info));
1914 if (err_info.valid && err_id)
1915 *err_id = le32_to_cpu(err_info.err_id);
1917 return !!err_info.valid;
1920 static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
1921 struct ieee80211_vif *vif)
1923 u32 err_id;
1925 /* check for lmac1 error */
1926 if (iwl_mvm_rt_status(mvm->trans,
1927 mvm->trans->dbg.lmac_error_event_table[0],
1928 &err_id)) {
1929 if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1930 struct cfg80211_wowlan_wakeup wakeup = {
1931 .rfkill_release = true,
1933 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1934 GFP_KERNEL);
1936 return true;
1939 /* check if we have lmac2 set and check for error */
1940 if (iwl_mvm_rt_status(mvm->trans,
1941 mvm->trans->dbg.lmac_error_event_table[1], NULL))
1942 return true;
1944 /* check for umac error */
1945 if (iwl_mvm_rt_status(mvm->trans,
1946 mvm->trans->dbg.umac_error_event_table, NULL))
1947 return true;
1949 return false;
1952 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
1954 struct ieee80211_vif *vif = NULL;
1955 int ret = 1;
1956 enum iwl_d3_status d3_status;
1957 bool keep = false;
1958 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1959 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1960 bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
1961 IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
1963 mutex_lock(&mvm->mutex);
1965 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
1967 /* get the BSS vif pointer again */
1968 vif = iwl_mvm_get_bss_vif(mvm);
1969 if (IS_ERR_OR_NULL(vif))
1970 goto err;
1972 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
1974 if (iwl_mvm_check_rt_status(mvm, vif)) {
1975 set_bit(STATUS_FW_ERROR, &mvm->trans->status);
1976 iwl_mvm_dump_nic_error_log(mvm);
1977 iwl_dbg_tlv_time_point(&mvm->fwrt,
1978 IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
1979 iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
1980 false, 0);
1981 ret = 1;
1982 goto err;
1985 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
1986 if (ret)
1987 goto err;
1989 if (d3_status != IWL_D3_STATUS_ALIVE) {
1990 IWL_INFO(mvm, "Device was reset during suspend\n");
1991 goto err;
1994 if (d0i3_first) {
1995 struct iwl_host_cmd cmd = {
1996 .id = D0I3_END_CMD,
1997 .flags = CMD_WANT_SKB,
1999 int len;
2001 ret = iwl_mvm_send_cmd(mvm, &cmd);
2002 if (ret < 0) {
2003 IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
2004 ret);
2005 goto err;
2007 switch (mvm->cmd_ver.d0i3_resp) {
2008 case 0:
2009 break;
2010 case 1:
2011 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
2012 if (len != sizeof(u32)) {
2013 IWL_ERR(mvm,
2014 "Error with D0I3_END_CMD response size (%d)\n",
2015 len);
2016 goto err;
2018 if (IWL_D0I3_RESET_REQUIRE &
2019 le32_to_cpu(*(__le32 *)cmd.resp_pkt->data)) {
2020 iwl_write32(mvm->trans, CSR_RESET,
2021 CSR_RESET_REG_FLAG_FORCE_NMI);
2022 iwl_free_resp(&cmd);
2024 break;
2025 default:
2026 WARN_ON(1);
2031 * Query the current location and source from the D3 firmware so we
2032 * can play it back when we re-intiailize the D0 firmware
2034 iwl_mvm_update_changed_regdom(mvm);
2036 /* Re-configure PPAG settings */
2037 iwl_mvm_ppag_send_cmd(mvm);
2039 if (!unified_image)
2040 /* Re-configure default SAR profile */
2041 iwl_mvm_sar_select_profile(mvm, 1, 1);
2043 if (mvm->net_detect) {
2044 /* If this is a non-unified image, we restart the FW,
2045 * so no need to stop the netdetect scan. If that
2046 * fails, continue and try to get the wake-up reasons,
2047 * but trigger a HW restart by keeping a failure code
2048 * in ret.
2050 if (unified_image)
2051 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
2052 false);
2054 iwl_mvm_query_netdetect_reasons(mvm, vif);
2055 /* has unlocked the mutex, so skip that */
2056 goto out;
2057 } else {
2058 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
2059 #ifdef CONFIG_IWLWIFI_DEBUGFS
2060 if (keep)
2061 mvm->keep_vif = vif;
2062 #endif
2063 /* has unlocked the mutex, so skip that */
2064 goto out_iterate;
2067 err:
2068 iwl_mvm_free_nd(mvm);
2069 mutex_unlock(&mvm->mutex);
2071 out_iterate:
2072 if (!test)
2073 ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
2074 IEEE80211_IFACE_ITER_NORMAL,
2075 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
2077 out:
2078 /* no need to reset the device in unified images, if successful */
2079 if (unified_image && !ret) {
2080 /* nothing else to do if we already sent D0I3_END_CMD */
2081 if (d0i3_first)
2082 return 0;
2084 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
2085 if (!ret)
2086 return 0;
2090 * Reconfigure the device in one of the following cases:
2091 * 1. We are not using a unified image
2092 * 2. We are using a unified image but had an error while exiting D3
2094 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2096 return 1;
2099 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
2101 iwl_trans_resume(mvm->trans);
2103 return __iwl_mvm_resume(mvm, false);
2106 int iwl_mvm_resume(struct ieee80211_hw *hw)
2108 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2109 int ret;
2111 ret = iwl_mvm_resume_d3(mvm);
2113 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2115 iwl_mvm_resume_tcm(mvm);
2117 iwl_fw_runtime_resume(&mvm->fwrt);
2119 return ret;
2122 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
2124 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2126 device_set_wakeup_enable(mvm->trans->dev, enabled);
2129 #ifdef CONFIG_IWLWIFI_DEBUGFS
2130 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2132 struct iwl_mvm *mvm = inode->i_private;
2133 int err;
2135 if (mvm->d3_test_active)
2136 return -EBUSY;
2138 file->private_data = inode->i_private;
2140 synchronize_net();
2142 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2144 iwl_mvm_pause_tcm(mvm, true);
2146 iwl_fw_runtime_suspend(&mvm->fwrt);
2148 /* start pseudo D3 */
2149 rtnl_lock();
2150 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2151 rtnl_unlock();
2152 if (err > 0)
2153 err = -EINVAL;
2154 if (err)
2155 return err;
2157 mvm->d3_test_active = true;
2158 mvm->keep_vif = NULL;
2159 return 0;
2162 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2163 size_t count, loff_t *ppos)
2165 struct iwl_mvm *mvm = file->private_data;
2166 u32 pme_asserted;
2168 while (true) {
2169 /* read pme_ptr if available */
2170 if (mvm->d3_test_pme_ptr) {
2171 pme_asserted = iwl_trans_read_mem32(mvm->trans,
2172 mvm->d3_test_pme_ptr);
2173 if (pme_asserted)
2174 break;
2177 if (msleep_interruptible(100))
2178 break;
2181 return 0;
2184 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2185 struct ieee80211_vif *vif)
2187 /* skip the one we keep connection on */
2188 if (_data == vif)
2189 return;
2191 if (vif->type == NL80211_IFTYPE_STATION)
2192 ieee80211_connection_loss(vif);
2195 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2197 struct iwl_mvm *mvm = inode->i_private;
2198 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2199 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2201 mvm->d3_test_active = false;
2203 iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
2205 rtnl_lock();
2206 __iwl_mvm_resume(mvm, true);
2207 rtnl_unlock();
2209 iwl_mvm_resume_tcm(mvm);
2211 iwl_fw_runtime_resume(&mvm->fwrt);
2213 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2215 iwl_abort_notification_waits(&mvm->notif_wait);
2216 if (!unified_image) {
2217 int remaining_time = 10;
2219 ieee80211_restart_hw(mvm->hw);
2221 /* wait for restart and disconnect all interfaces */
2222 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2223 remaining_time > 0) {
2224 remaining_time--;
2225 msleep(1000);
2228 if (remaining_time == 0)
2229 IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
2232 ieee80211_iterate_active_interfaces_atomic(
2233 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2234 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2236 return 0;
2239 const struct file_operations iwl_dbgfs_d3_test_ops = {
2240 .llseek = no_llseek,
2241 .open = iwl_mvm_d3_test_open,
2242 .read = iwl_mvm_d3_test_read,
2243 .release = iwl_mvm_d3_test_release,
2245 #endif