net/mlx4_en: Move filters cleanup to a proper location
[linux/fpc-iii.git] / drivers / net / wireless / intel / iwlwifi / mvm / d3.c
blob4fdc3dad3e85437492efc25df76b8ce8a02ad0ec
1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
6 * GPL LICENSE SUMMARY
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 * BSD LICENSE
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 Intel Deutschland GmbH
38 * All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
44 * * Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * * Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in
48 * the documentation and/or other materials provided with the
49 * distribution.
50 * * Neither the name Intel Corporation nor the names of its
51 * contributors may be used to endorse or promote products derived
52 * from this software without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66 *****************************************************************************/
68 #include <linux/etherdevice.h>
69 #include <linux/ip.h>
70 #include <linux/fs.h>
71 #include <net/cfg80211.h>
72 #include <net/ipv6.h>
73 #include <net/tcp.h>
74 #include <net/addrconf.h>
75 #include "iwl-modparams.h"
76 #include "fw-api.h"
77 #include "mvm.h"
79 void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
80 struct ieee80211_vif *vif,
81 struct cfg80211_gtk_rekey_data *data)
83 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
84 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
86 if (iwlwifi_mod_params.sw_crypto)
87 return;
89 mutex_lock(&mvm->mutex);
91 memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
92 memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
93 mvmvif->rekey_data.replay_ctr =
94 cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
95 mvmvif->rekey_data.valid = true;
97 mutex_unlock(&mvm->mutex);
100 #if IS_ENABLED(CONFIG_IPV6)
101 void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
102 struct ieee80211_vif *vif,
103 struct inet6_dev *idev)
105 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
106 struct inet6_ifaddr *ifa;
107 int idx = 0;
109 memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
111 read_lock_bh(&idev->lock);
112 list_for_each_entry(ifa, &idev->addr_list, if_list) {
113 mvmvif->target_ipv6_addrs[idx] = ifa->addr;
114 if (ifa->flags & IFA_F_TENTATIVE)
115 __set_bit(idx, mvmvif->tentative_addrs);
116 idx++;
117 if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
118 break;
120 read_unlock_bh(&idev->lock);
122 mvmvif->num_target_ipv6_addrs = idx;
124 #endif
126 void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
127 struct ieee80211_vif *vif, int idx)
129 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
131 mvmvif->tx_key_idx = idx;
134 static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
136 int i;
138 for (i = 0; i < IWL_P1K_SIZE; i++)
139 out[i] = cpu_to_le16(p1k[i]);
142 static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
143 struct iwl_mvm_key_pn *ptk_pn,
144 struct ieee80211_key_seq *seq,
145 int tid, int queues)
147 const u8 *ret = seq->ccmp.pn;
148 int i;
150 /* get the PN from mac80211, used on the default queue */
151 ieee80211_get_key_rx_seq(key, tid, seq);
153 /* and use the internal data for the other queues */
154 for (i = 1; i < queues; i++) {
155 const u8 *tmp = ptk_pn->q[i].pn[tid];
157 if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
158 ret = tmp;
161 return ret;
164 struct wowlan_key_data {
165 struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
166 struct iwl_wowlan_tkip_params_cmd *tkip;
167 bool error, use_rsc_tsc, use_tkip, configure_keys;
168 int wep_key_idx;
171 static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
172 struct ieee80211_vif *vif,
173 struct ieee80211_sta *sta,
174 struct ieee80211_key_conf *key,
175 void *_data)
177 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
178 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
179 struct wowlan_key_data *data = _data;
180 struct aes_sc *aes_sc, *aes_tx_sc = NULL;
181 struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
182 struct iwl_p1k_cache *rx_p1ks;
183 u8 *rx_mic_key;
184 struct ieee80211_key_seq seq;
185 u32 cur_rx_iv32 = 0;
186 u16 p1k[IWL_P1K_SIZE];
187 int ret, i;
189 switch (key->cipher) {
190 case WLAN_CIPHER_SUITE_WEP40:
191 case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
192 struct {
193 struct iwl_mvm_wep_key_cmd wep_key_cmd;
194 struct iwl_mvm_wep_key wep_key;
195 } __packed wkc = {
196 .wep_key_cmd.mac_id_n_color =
197 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
198 mvmvif->color)),
199 .wep_key_cmd.num_keys = 1,
200 /* firmware sets STA_KEY_FLG_WEP_13BYTES */
201 .wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
202 .wep_key.key_index = key->keyidx,
203 .wep_key.key_size = key->keylen,
207 * This will fail -- the key functions don't set support
208 * pairwise WEP keys. However, that's better than silently
209 * failing WoWLAN. Or maybe not?
211 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
212 break;
214 memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
215 if (key->keyidx == mvmvif->tx_key_idx) {
216 /* TX key must be at offset 0 */
217 wkc.wep_key.key_offset = 0;
218 } else {
219 /* others start at 1 */
220 data->wep_key_idx++;
221 wkc.wep_key.key_offset = data->wep_key_idx;
224 if (data->configure_keys) {
225 mutex_lock(&mvm->mutex);
226 ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
227 sizeof(wkc), &wkc);
228 data->error = ret != 0;
230 mvm->ptk_ivlen = key->iv_len;
231 mvm->ptk_icvlen = key->icv_len;
232 mvm->gtk_ivlen = key->iv_len;
233 mvm->gtk_icvlen = key->icv_len;
234 mutex_unlock(&mvm->mutex);
237 /* don't upload key again */
238 return;
240 default:
241 data->error = true;
242 return;
243 case WLAN_CIPHER_SUITE_AES_CMAC:
245 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
246 * but we also shouldn't abort suspend due to that. It does have
247 * support for the IGTK key renewal, but doesn't really use the
248 * IGTK for anything. This means we could spuriously wake up or
249 * be deauthenticated, but that was considered acceptable.
251 return;
252 case WLAN_CIPHER_SUITE_TKIP:
253 if (sta) {
254 u64 pn64;
256 tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
257 tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
259 rx_p1ks = data->tkip->rx_uni;
261 pn64 = atomic64_read(&key->tx_pn);
262 tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
263 tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
265 ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
266 p1k);
267 iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
269 memcpy(data->tkip->mic_keys.tx,
270 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
271 IWL_MIC_KEY_SIZE);
273 rx_mic_key = data->tkip->mic_keys.rx_unicast;
274 } else {
275 tkip_sc =
276 data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
277 rx_p1ks = data->tkip->rx_multi;
278 rx_mic_key = data->tkip->mic_keys.rx_mcast;
282 * For non-QoS this relies on the fact that both the uCode and
283 * mac80211 use TID 0 (as they need to to avoid replay attacks)
284 * for checking the IV in the frames.
286 for (i = 0; i < IWL_NUM_RSC; i++) {
287 ieee80211_get_key_rx_seq(key, i, &seq);
288 tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
289 tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
290 /* wrapping isn't allowed, AP must rekey */
291 if (seq.tkip.iv32 > cur_rx_iv32)
292 cur_rx_iv32 = seq.tkip.iv32;
295 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
296 cur_rx_iv32, p1k);
297 iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
298 ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
299 cur_rx_iv32 + 1, p1k);
300 iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
302 memcpy(rx_mic_key,
303 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
304 IWL_MIC_KEY_SIZE);
306 data->use_tkip = true;
307 data->use_rsc_tsc = true;
308 break;
309 case WLAN_CIPHER_SUITE_CCMP:
310 if (sta) {
311 u64 pn64;
313 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
314 aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
316 pn64 = atomic64_read(&key->tx_pn);
317 aes_tx_sc->pn = cpu_to_le64(pn64);
318 } else {
319 aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
323 * For non-QoS this relies on the fact that both the uCode and
324 * mac80211/our RX code use TID 0 for checking the PN.
326 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
327 struct iwl_mvm_sta *mvmsta;
328 struct iwl_mvm_key_pn *ptk_pn;
329 const u8 *pn;
331 mvmsta = iwl_mvm_sta_from_mac80211(sta);
332 ptk_pn = rcu_dereference_protected(
333 mvmsta->ptk_pn[key->keyidx],
334 lockdep_is_held(&mvm->mutex));
335 if (WARN_ON(!ptk_pn))
336 break;
338 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
339 pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
340 mvm->trans->num_rx_queues);
341 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
342 ((u64)pn[4] << 8) |
343 ((u64)pn[3] << 16) |
344 ((u64)pn[2] << 24) |
345 ((u64)pn[1] << 32) |
346 ((u64)pn[0] << 40));
348 } else {
349 for (i = 0; i < IWL_NUM_RSC; i++) {
350 u8 *pn = seq.ccmp.pn;
352 ieee80211_get_key_rx_seq(key, i, &seq);
353 aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
354 ((u64)pn[4] << 8) |
355 ((u64)pn[3] << 16) |
356 ((u64)pn[2] << 24) |
357 ((u64)pn[1] << 32) |
358 ((u64)pn[0] << 40));
361 data->use_rsc_tsc = true;
362 break;
365 if (data->configure_keys) {
366 mutex_lock(&mvm->mutex);
368 * The D3 firmware hardcodes the key offset 0 as the key it
369 * uses to transmit packets to the AP, i.e. the PTK.
371 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
372 mvm->ptk_ivlen = key->iv_len;
373 mvm->ptk_icvlen = key->icv_len;
374 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
375 } else {
377 * firmware only supports TSC/RSC for a single key,
378 * so if there are multiple keep overwriting them
379 * with new ones -- this relies on mac80211 doing
380 * list_add_tail().
382 mvm->gtk_ivlen = key->iv_len;
383 mvm->gtk_icvlen = key->icv_len;
384 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
386 mutex_unlock(&mvm->mutex);
387 data->error = ret != 0;
391 static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
392 struct cfg80211_wowlan *wowlan)
394 struct iwl_wowlan_patterns_cmd *pattern_cmd;
395 struct iwl_host_cmd cmd = {
396 .id = WOWLAN_PATTERNS,
397 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
399 int i, err;
401 if (!wowlan->n_patterns)
402 return 0;
404 cmd.len[0] = sizeof(*pattern_cmd) +
405 wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
407 pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
408 if (!pattern_cmd)
409 return -ENOMEM;
411 pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
413 for (i = 0; i < wowlan->n_patterns; i++) {
414 int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
416 memcpy(&pattern_cmd->patterns[i].mask,
417 wowlan->patterns[i].mask, mask_len);
418 memcpy(&pattern_cmd->patterns[i].pattern,
419 wowlan->patterns[i].pattern,
420 wowlan->patterns[i].pattern_len);
421 pattern_cmd->patterns[i].mask_size = mask_len;
422 pattern_cmd->patterns[i].pattern_size =
423 wowlan->patterns[i].pattern_len;
426 cmd.data[0] = pattern_cmd;
427 err = iwl_mvm_send_cmd(mvm, &cmd);
428 kfree(pattern_cmd);
429 return err;
432 enum iwl_mvm_tcp_packet_type {
433 MVM_TCP_TX_SYN,
434 MVM_TCP_RX_SYNACK,
435 MVM_TCP_TX_DATA,
436 MVM_TCP_RX_ACK,
437 MVM_TCP_RX_WAKE,
438 MVM_TCP_TX_FIN,
441 static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
443 __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
444 return cpu_to_le16(be16_to_cpu((__force __be16)check));
447 static void iwl_mvm_build_tcp_packet(struct ieee80211_vif *vif,
448 struct cfg80211_wowlan_tcp *tcp,
449 void *_pkt, u8 *mask,
450 __le16 *pseudo_hdr_csum,
451 enum iwl_mvm_tcp_packet_type ptype)
453 struct {
454 struct ethhdr eth;
455 struct iphdr ip;
456 struct tcphdr tcp;
457 u8 data[];
458 } __packed *pkt = _pkt;
459 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
460 int i;
462 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
463 pkt->ip.version = 4;
464 pkt->ip.ihl = 5;
465 pkt->ip.protocol = IPPROTO_TCP;
467 switch (ptype) {
468 case MVM_TCP_TX_SYN:
469 case MVM_TCP_TX_DATA:
470 case MVM_TCP_TX_FIN:
471 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
472 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
473 pkt->ip.ttl = 128;
474 pkt->ip.saddr = tcp->src;
475 pkt->ip.daddr = tcp->dst;
476 pkt->tcp.source = cpu_to_be16(tcp->src_port);
477 pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
478 /* overwritten for TX SYN later */
479 pkt->tcp.doff = sizeof(struct tcphdr) / 4;
480 pkt->tcp.window = cpu_to_be16(65000);
481 break;
482 case MVM_TCP_RX_SYNACK:
483 case MVM_TCP_RX_ACK:
484 case MVM_TCP_RX_WAKE:
485 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
486 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
487 pkt->ip.saddr = tcp->dst;
488 pkt->ip.daddr = tcp->src;
489 pkt->tcp.source = cpu_to_be16(tcp->dst_port);
490 pkt->tcp.dest = cpu_to_be16(tcp->src_port);
491 break;
492 default:
493 WARN_ON(1);
494 return;
497 switch (ptype) {
498 case MVM_TCP_TX_SYN:
499 /* firmware assumes 8 option bytes - 8 NOPs for now */
500 memset(pkt->data, 0x01, 8);
501 ip_tot_len += 8;
502 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
503 pkt->tcp.syn = 1;
504 break;
505 case MVM_TCP_TX_DATA:
506 ip_tot_len += tcp->payload_len;
507 memcpy(pkt->data, tcp->payload, tcp->payload_len);
508 pkt->tcp.psh = 1;
509 pkt->tcp.ack = 1;
510 break;
511 case MVM_TCP_TX_FIN:
512 pkt->tcp.fin = 1;
513 pkt->tcp.ack = 1;
514 break;
515 case MVM_TCP_RX_SYNACK:
516 pkt->tcp.syn = 1;
517 pkt->tcp.ack = 1;
518 break;
519 case MVM_TCP_RX_ACK:
520 pkt->tcp.ack = 1;
521 break;
522 case MVM_TCP_RX_WAKE:
523 ip_tot_len += tcp->wake_len;
524 pkt->tcp.psh = 1;
525 pkt->tcp.ack = 1;
526 memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
527 break;
530 switch (ptype) {
531 case MVM_TCP_TX_SYN:
532 case MVM_TCP_TX_DATA:
533 case MVM_TCP_TX_FIN:
534 pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
535 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
536 break;
537 case MVM_TCP_RX_WAKE:
538 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
539 u8 tmp = tcp->wake_mask[i];
540 mask[i + 6] |= tmp << 6;
541 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
542 mask[i + 7] = tmp >> 2;
544 /* fall through for ethernet/IP/TCP headers mask */
545 case MVM_TCP_RX_SYNACK:
546 case MVM_TCP_RX_ACK:
547 mask[0] = 0xff; /* match ethernet */
549 * match ethernet, ip.version, ip.ihl
550 * the ip.ihl half byte is really masked out by firmware
552 mask[1] = 0x7f;
553 mask[2] = 0x80; /* match ip.protocol */
554 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
555 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
556 mask[5] = 0x80; /* match tcp flags */
557 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
558 break;
561 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
562 pkt->ip.saddr, pkt->ip.daddr);
565 static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
566 struct ieee80211_vif *vif,
567 struct cfg80211_wowlan_tcp *tcp)
569 struct iwl_wowlan_remote_wake_config *cfg;
570 struct iwl_host_cmd cmd = {
571 .id = REMOTE_WAKE_CONFIG_CMD,
572 .len = { sizeof(*cfg), },
573 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
575 int ret;
577 if (!tcp)
578 return 0;
580 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
581 if (!cfg)
582 return -ENOMEM;
583 cmd.data[0] = cfg;
585 cfg->max_syn_retries = 10;
586 cfg->max_data_retries = 10;
587 cfg->tcp_syn_ack_timeout = 1; /* seconds */
588 cfg->tcp_ack_timeout = 1; /* seconds */
590 /* SYN (TX) */
591 iwl_mvm_build_tcp_packet(
592 vif, tcp, cfg->syn_tx.data, NULL,
593 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
594 MVM_TCP_TX_SYN);
595 cfg->syn_tx.info.tcp_payload_length = 0;
597 /* SYN/ACK (RX) */
598 iwl_mvm_build_tcp_packet(
599 vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
600 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
601 MVM_TCP_RX_SYNACK);
602 cfg->synack_rx.info.tcp_payload_length = 0;
604 /* KEEPALIVE/ACK (TX) */
605 iwl_mvm_build_tcp_packet(
606 vif, tcp, cfg->keepalive_tx.data, NULL,
607 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
608 MVM_TCP_TX_DATA);
609 cfg->keepalive_tx.info.tcp_payload_length =
610 cpu_to_le16(tcp->payload_len);
611 cfg->sequence_number_offset = tcp->payload_seq.offset;
612 /* length must be 0..4, the field is little endian */
613 cfg->sequence_number_length = tcp->payload_seq.len;
614 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
615 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
616 if (tcp->payload_tok.len) {
617 cfg->token_offset = tcp->payload_tok.offset;
618 cfg->token_length = tcp->payload_tok.len;
619 cfg->num_tokens =
620 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
621 memcpy(cfg->tokens, tcp->payload_tok.token_stream,
622 tcp->tokens_size);
623 } else {
624 /* set tokens to max value to almost never run out */
625 cfg->num_tokens = cpu_to_le16(65535);
628 /* ACK (RX) */
629 iwl_mvm_build_tcp_packet(
630 vif, tcp, cfg->keepalive_ack_rx.data,
631 cfg->keepalive_ack_rx.rx_mask,
632 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
633 MVM_TCP_RX_ACK);
634 cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
636 /* WAKEUP (RX) */
637 iwl_mvm_build_tcp_packet(
638 vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
639 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
640 MVM_TCP_RX_WAKE);
641 cfg->wake_rx.info.tcp_payload_length =
642 cpu_to_le16(tcp->wake_len);
644 /* FIN */
645 iwl_mvm_build_tcp_packet(
646 vif, tcp, cfg->fin_tx.data, NULL,
647 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
648 MVM_TCP_TX_FIN);
649 cfg->fin_tx.info.tcp_payload_length = 0;
651 ret = iwl_mvm_send_cmd(mvm, &cmd);
652 kfree(cfg);
654 return ret;
657 static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
658 struct ieee80211_sta *ap_sta)
660 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
661 struct ieee80211_chanctx_conf *ctx;
662 u8 chains_static, chains_dynamic;
663 struct cfg80211_chan_def chandef;
664 int ret, i;
665 struct iwl_binding_cmd binding_cmd = {};
666 struct iwl_time_quota_cmd quota_cmd = {};
667 u32 status;
669 /* add back the PHY */
670 if (WARN_ON(!mvmvif->phy_ctxt))
671 return -EINVAL;
673 rcu_read_lock();
674 ctx = rcu_dereference(vif->chanctx_conf);
675 if (WARN_ON(!ctx)) {
676 rcu_read_unlock();
677 return -EINVAL;
679 chandef = ctx->def;
680 chains_static = ctx->rx_chains_static;
681 chains_dynamic = ctx->rx_chains_dynamic;
682 rcu_read_unlock();
684 ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
685 chains_static, chains_dynamic);
686 if (ret)
687 return ret;
689 /* add back the MAC */
690 mvmvif->uploaded = false;
692 if (WARN_ON(!vif->bss_conf.assoc))
693 return -EINVAL;
695 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
696 if (ret)
697 return ret;
699 /* add back binding - XXX refactor? */
700 binding_cmd.id_and_color =
701 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
702 mvmvif->phy_ctxt->color));
703 binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
704 binding_cmd.phy =
705 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
706 mvmvif->phy_ctxt->color));
707 binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
708 mvmvif->color));
709 for (i = 1; i < MAX_MACS_IN_BINDING; i++)
710 binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
712 status = 0;
713 ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
714 sizeof(binding_cmd), &binding_cmd,
715 &status);
716 if (ret) {
717 IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
718 return ret;
721 if (status) {
722 IWL_ERR(mvm, "Binding command failed: %u\n", status);
723 return -EIO;
726 ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
727 if (ret)
728 return ret;
729 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
731 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
732 if (ret)
733 return ret;
735 /* and some quota */
736 quota_cmd.quotas[0].id_and_color =
737 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
738 mvmvif->phy_ctxt->color));
739 quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
740 quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
742 for (i = 1; i < MAX_BINDINGS; i++)
743 quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
745 ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
746 sizeof(quota_cmd), &quota_cmd);
747 if (ret)
748 IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
750 if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
751 IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
753 return 0;
756 static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
757 struct ieee80211_vif *vif)
759 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
760 struct iwl_nonqos_seq_query_cmd query_cmd = {
761 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
762 .mac_id_n_color =
763 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
764 mvmvif->color)),
766 struct iwl_host_cmd cmd = {
767 .id = NON_QOS_TX_COUNTER_CMD,
768 .flags = CMD_WANT_SKB,
770 int err;
771 u32 size;
773 cmd.data[0] = &query_cmd;
774 cmd.len[0] = sizeof(query_cmd);
776 err = iwl_mvm_send_cmd(mvm, &cmd);
777 if (err)
778 return err;
780 size = iwl_rx_packet_payload_len(cmd.resp_pkt);
781 if (size < sizeof(__le16)) {
782 err = -EINVAL;
783 } else {
784 err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
785 /* firmware returns next, not last-used seqno */
786 err = (u16) (err - 0x10);
789 iwl_free_resp(&cmd);
790 return err;
793 void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
795 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
796 struct iwl_nonqos_seq_query_cmd query_cmd = {
797 .get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
798 .mac_id_n_color =
799 cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
800 mvmvif->color)),
801 .value = cpu_to_le16(mvmvif->seqno),
804 /* return if called during restart, not resume from D3 */
805 if (!mvmvif->seqno_valid)
806 return;
808 mvmvif->seqno_valid = false;
810 if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
811 sizeof(query_cmd), &query_cmd))
812 IWL_ERR(mvm, "failed to set non-QoS seqno\n");
815 static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
817 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
819 iwl_mvm_stop_device(mvm);
821 * Set the HW restart bit -- this is mostly true as we're
822 * going to load new firmware and reprogram that, though
823 * the reprogramming is going to be manual to avoid adding
824 * all the MACs that aren't support.
825 * We don't have to clear up everything though because the
826 * reprogramming is manual. When we resume, we'll actually
827 * go through a proper restart sequence again to switch
828 * back to the runtime firmware image.
830 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
832 /* the fw is reset, so all the keys are cleared */
833 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
835 mvm->ptk_ivlen = 0;
836 mvm->ptk_icvlen = 0;
837 mvm->ptk_ivlen = 0;
838 mvm->ptk_icvlen = 0;
840 return iwl_mvm_load_d3_fw(mvm);
843 static int
844 iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
845 struct cfg80211_wowlan *wowlan,
846 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
847 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
848 struct ieee80211_sta *ap_sta)
850 int ret;
851 struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
853 /* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
855 wowlan_config_cmd->is_11n_connection =
856 ap_sta->ht_cap.ht_supported;
857 wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
858 ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
860 /* Query the last used seqno and set it */
861 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
862 if (ret < 0)
863 return ret;
865 wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
867 iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
869 if (wowlan->disconnect)
870 wowlan_config_cmd->wakeup_filter |=
871 cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
872 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
873 if (wowlan->magic_pkt)
874 wowlan_config_cmd->wakeup_filter |=
875 cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
876 if (wowlan->gtk_rekey_failure)
877 wowlan_config_cmd->wakeup_filter |=
878 cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
879 if (wowlan->eap_identity_req)
880 wowlan_config_cmd->wakeup_filter |=
881 cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
882 if (wowlan->four_way_handshake)
883 wowlan_config_cmd->wakeup_filter |=
884 cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
885 if (wowlan->n_patterns)
886 wowlan_config_cmd->wakeup_filter |=
887 cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
889 if (wowlan->rfkill_release)
890 wowlan_config_cmd->wakeup_filter |=
891 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
893 if (wowlan->tcp) {
895 * Set the "link change" (really "link lost") flag as well
896 * since that implies losing the TCP connection.
898 wowlan_config_cmd->wakeup_filter |=
899 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
900 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
901 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
902 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
905 return 0;
908 static void
909 iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm,
910 struct ieee80211_vif *vif,
911 void (*iter)(struct ieee80211_hw *hw,
912 struct ieee80211_vif *vif,
913 struct ieee80211_sta *sta,
914 struct ieee80211_key_conf *key,
915 void *data),
916 void *data)
918 struct ieee80211_sta *ap_sta;
920 rcu_read_lock();
922 ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
923 if (IS_ERR_OR_NULL(ap_sta))
924 goto out;
926 ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
927 out:
928 rcu_read_unlock();
931 int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
932 struct ieee80211_vif *vif,
933 bool d0i3,
934 u32 cmd_flags)
936 struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
937 struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
938 struct wowlan_key_data key_data = {
939 .configure_keys = !d0i3,
940 .use_rsc_tsc = false,
941 .tkip = &tkip_cmd,
942 .use_tkip = false,
944 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
945 int ret;
947 key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
948 if (!key_data.rsc_tsc)
949 return -ENOMEM;
952 * if we have to configure keys, call ieee80211_iter_keys(),
953 * as we need non-atomic context in order to take the
954 * required locks.
955 * for the d0i3 we can't use ieee80211_iter_keys(), as
956 * taking (almost) any mutex might result in deadlock.
958 if (!d0i3) {
960 * Note that currently we don't propagate cmd_flags
961 * to the iterator. In case of key_data.configure_keys,
962 * all the configured commands are SYNC, and
963 * iwl_mvm_wowlan_program_keys() will take care of
964 * locking/unlocking mvm->mutex.
966 ieee80211_iter_keys(mvm->hw, vif,
967 iwl_mvm_wowlan_program_keys,
968 &key_data);
969 } else {
970 iwl_mvm_iter_d0i3_ap_keys(mvm, vif,
971 iwl_mvm_wowlan_program_keys,
972 &key_data);
975 if (key_data.error) {
976 ret = -EIO;
977 goto out;
980 if (key_data.use_rsc_tsc) {
981 ret = iwl_mvm_send_cmd_pdu(mvm,
982 WOWLAN_TSC_RSC_PARAM, cmd_flags,
983 sizeof(*key_data.rsc_tsc),
984 key_data.rsc_tsc);
985 if (ret)
986 goto out;
989 if (key_data.use_tkip) {
990 ret = iwl_mvm_send_cmd_pdu(mvm,
991 WOWLAN_TKIP_PARAM,
992 cmd_flags, sizeof(tkip_cmd),
993 &tkip_cmd);
994 if (ret)
995 goto out;
998 /* configure rekey data only if offloaded rekey is supported (d3) */
999 if (mvmvif->rekey_data.valid && !d0i3) {
1000 memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
1001 memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
1002 NL80211_KCK_LEN);
1003 kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
1004 memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
1005 NL80211_KEK_LEN);
1006 kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
1007 kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
1009 ret = iwl_mvm_send_cmd_pdu(mvm,
1010 WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
1011 sizeof(kek_kck_cmd),
1012 &kek_kck_cmd);
1013 if (ret)
1014 goto out;
1016 ret = 0;
1017 out:
1018 kfree(key_data.rsc_tsc);
1019 return ret;
1022 static int
1023 iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
1024 struct cfg80211_wowlan *wowlan,
1025 struct iwl_wowlan_config_cmd *wowlan_config_cmd,
1026 struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
1027 struct ieee80211_sta *ap_sta)
1029 int ret;
1030 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1031 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1033 if (!unified_image) {
1034 ret = iwl_mvm_switch_to_d3(mvm);
1035 if (ret)
1036 return ret;
1038 ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
1039 if (ret)
1040 return ret;
1043 if (!iwlwifi_mod_params.sw_crypto) {
1045 * This needs to be unlocked due to lock ordering
1046 * constraints. Since we're in the suspend path
1047 * that isn't really a problem though.
1049 mutex_unlock(&mvm->mutex);
1050 ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false,
1051 CMD_ASYNC);
1052 mutex_lock(&mvm->mutex);
1053 if (ret)
1054 return ret;
1057 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
1058 sizeof(*wowlan_config_cmd),
1059 wowlan_config_cmd);
1060 if (ret)
1061 return ret;
1063 ret = iwl_mvm_send_patterns(mvm, wowlan);
1064 if (ret)
1065 return ret;
1067 ret = iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
1068 if (ret)
1069 return ret;
1071 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
1072 return ret;
1075 static int
1076 iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
1077 struct cfg80211_wowlan *wowlan,
1078 struct cfg80211_sched_scan_request *nd_config,
1079 struct ieee80211_vif *vif)
1081 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1082 int ret;
1083 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1084 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1086 if (!unified_image) {
1087 ret = iwl_mvm_switch_to_d3(mvm);
1088 if (ret)
1089 return ret;
1092 /* rfkill release can be either for wowlan or netdetect */
1093 if (wowlan->rfkill_release)
1094 wowlan_config_cmd.wakeup_filter |=
1095 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
1097 ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
1098 sizeof(wowlan_config_cmd),
1099 &wowlan_config_cmd);
1100 if (ret)
1101 return ret;
1103 ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
1104 IWL_MVM_SCAN_NETDETECT);
1105 if (ret)
1106 return ret;
1108 if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
1109 return -EBUSY;
1111 /* save the sched scan matchsets... */
1112 if (nd_config->n_match_sets) {
1113 mvm->nd_match_sets = kmemdup(nd_config->match_sets,
1114 sizeof(*nd_config->match_sets) *
1115 nd_config->n_match_sets,
1116 GFP_KERNEL);
1117 if (mvm->nd_match_sets)
1118 mvm->n_nd_match_sets = nd_config->n_match_sets;
1121 /* ...and the sched scan channels for later reporting */
1122 mvm->nd_channels = kmemdup(nd_config->channels,
1123 sizeof(*nd_config->channels) *
1124 nd_config->n_channels,
1125 GFP_KERNEL);
1126 if (mvm->nd_channels)
1127 mvm->n_nd_channels = nd_config->n_channels;
1129 return 0;
1132 static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
1134 kfree(mvm->nd_match_sets);
1135 mvm->nd_match_sets = NULL;
1136 mvm->n_nd_match_sets = 0;
1137 kfree(mvm->nd_channels);
1138 mvm->nd_channels = NULL;
1139 mvm->n_nd_channels = 0;
1142 static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
1143 struct cfg80211_wowlan *wowlan,
1144 bool test)
1146 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1147 struct ieee80211_vif *vif = NULL;
1148 struct iwl_mvm_vif *mvmvif = NULL;
1149 struct ieee80211_sta *ap_sta = NULL;
1150 struct iwl_d3_manager_config d3_cfg_cmd_data = {
1152 * Program the minimum sleep time to 10 seconds, as many
1153 * platforms have issues processing a wakeup signal while
1154 * still being in the process of suspending.
1156 .min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
1158 struct iwl_host_cmd d3_cfg_cmd = {
1159 .id = D3_CONFIG_CMD,
1160 .flags = CMD_WANT_SKB,
1161 .data[0] = &d3_cfg_cmd_data,
1162 .len[0] = sizeof(d3_cfg_cmd_data),
1164 int ret;
1165 int len __maybe_unused;
1166 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1167 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1169 if (!wowlan) {
1171 * mac80211 shouldn't get here, but for D3 test
1172 * it doesn't warrant a warning
1174 WARN_ON(!test);
1175 return -EINVAL;
1178 mutex_lock(&mvm->mutex);
1180 vif = iwl_mvm_get_bss_vif(mvm);
1181 if (IS_ERR_OR_NULL(vif)) {
1182 ret = 1;
1183 goto out_noreset;
1186 mvmvif = iwl_mvm_vif_from_mac80211(vif);
1188 if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) {
1189 /* if we're not associated, this must be netdetect */
1190 if (!wowlan->nd_config) {
1191 ret = 1;
1192 goto out_noreset;
1195 ret = iwl_mvm_netdetect_config(
1196 mvm, wowlan, wowlan->nd_config, vif);
1197 if (ret)
1198 goto out;
1200 mvm->net_detect = true;
1201 } else {
1202 struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
1204 ap_sta = rcu_dereference_protected(
1205 mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
1206 lockdep_is_held(&mvm->mutex));
1207 if (IS_ERR_OR_NULL(ap_sta)) {
1208 ret = -EINVAL;
1209 goto out_noreset;
1212 ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1213 vif, mvmvif, ap_sta);
1214 if (ret)
1215 goto out_noreset;
1216 ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
1217 vif, mvmvif, ap_sta);
1218 if (ret)
1219 goto out;
1221 mvm->net_detect = false;
1224 ret = iwl_mvm_power_update_device(mvm);
1225 if (ret)
1226 goto out;
1228 ret = iwl_mvm_power_update_mac(mvm);
1229 if (ret)
1230 goto out;
1232 #ifdef CONFIG_IWLWIFI_DEBUGFS
1233 if (mvm->d3_wake_sysassert)
1234 d3_cfg_cmd_data.wakeup_flags |=
1235 cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
1236 #endif
1238 /* must be last -- this switches firmware state */
1239 ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
1240 if (ret)
1241 goto out;
1242 #ifdef CONFIG_IWLWIFI_DEBUGFS
1243 len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
1244 if (len >= sizeof(u32)) {
1245 mvm->d3_test_pme_ptr =
1246 le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
1248 #endif
1249 iwl_free_resp(&d3_cfg_cmd);
1251 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1253 iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
1254 out:
1255 if (ret < 0) {
1256 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1257 ieee80211_restart_hw(mvm->hw);
1258 iwl_mvm_free_nd(mvm);
1260 out_noreset:
1261 mutex_unlock(&mvm->mutex);
1263 return ret;
1266 static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
1268 struct iwl_notification_wait wait_d3;
1269 static const u16 d3_notif[] = { D3_CONFIG_CMD };
1270 int ret;
1272 iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
1273 d3_notif, ARRAY_SIZE(d3_notif),
1274 NULL, NULL);
1276 ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
1277 if (ret)
1278 goto remove_notif;
1280 ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
1281 WARN_ON_ONCE(ret);
1282 return ret;
1284 remove_notif:
1285 iwl_remove_notification(&mvm->notif_wait, &wait_d3);
1286 return ret;
1289 int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1291 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1292 struct iwl_trans *trans = mvm->trans;
1293 int ret;
1295 /* make sure the d0i3 exit work is not pending */
1296 flush_work(&mvm->d0i3_exit_work);
1298 ret = iwl_trans_suspend(trans);
1299 if (ret)
1300 return ret;
1302 if (wowlan->any) {
1303 trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
1305 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
1306 ret = iwl_mvm_enter_d0i3_sync(mvm);
1308 if (ret)
1309 return ret;
1312 mutex_lock(&mvm->d0i3_suspend_mutex);
1313 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
1314 mutex_unlock(&mvm->d0i3_suspend_mutex);
1316 iwl_trans_d3_suspend(trans, false, false);
1318 return 0;
1321 trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
1323 return __iwl_mvm_suspend(hw, wowlan, false);
1326 /* converted data from the different status responses */
1327 struct iwl_wowlan_status_data {
1328 u16 pattern_number;
1329 u16 qos_seq_ctr[8];
1330 u32 wakeup_reasons;
1331 u32 wake_packet_length;
1332 u32 wake_packet_bufsize;
1333 const u8 *wake_packet;
1336 static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
1337 struct ieee80211_vif *vif,
1338 struct iwl_wowlan_status_data *status)
1340 struct sk_buff *pkt = NULL;
1341 struct cfg80211_wowlan_wakeup wakeup = {
1342 .pattern_idx = -1,
1344 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1345 u32 reasons = status->wakeup_reasons;
1347 if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
1348 wakeup_report = NULL;
1349 goto report;
1352 pm_wakeup_event(mvm->dev, 0);
1354 if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
1355 wakeup.magic_pkt = true;
1357 if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
1358 wakeup.pattern_idx =
1359 status->pattern_number;
1361 if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1362 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
1363 wakeup.disconnect = true;
1365 if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
1366 wakeup.gtk_rekey_failure = true;
1368 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1369 wakeup.rfkill_release = true;
1371 if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
1372 wakeup.eap_identity_req = true;
1374 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
1375 wakeup.four_way_handshake = true;
1377 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1378 wakeup.tcp_connlost = true;
1380 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1381 wakeup.tcp_nomoretokens = true;
1383 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1384 wakeup.tcp_match = true;
1386 if (status->wake_packet_bufsize) {
1387 int pktsize = status->wake_packet_bufsize;
1388 int pktlen = status->wake_packet_length;
1389 const u8 *pktdata = status->wake_packet;
1390 struct ieee80211_hdr *hdr = (void *)pktdata;
1391 int truncated = pktlen - pktsize;
1393 /* this would be a firmware bug */
1394 if (WARN_ON_ONCE(truncated < 0))
1395 truncated = 0;
1397 if (ieee80211_is_data(hdr->frame_control)) {
1398 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
1399 int ivlen = 0, icvlen = 4; /* also FCS */
1401 pkt = alloc_skb(pktsize, GFP_KERNEL);
1402 if (!pkt)
1403 goto report;
1405 memcpy(skb_put(pkt, hdrlen), pktdata, hdrlen);
1406 pktdata += hdrlen;
1407 pktsize -= hdrlen;
1409 if (ieee80211_has_protected(hdr->frame_control)) {
1411 * This is unlocked and using gtk_i(c)vlen,
1412 * but since everything is under RTNL still
1413 * that's not really a problem - changing
1414 * it would be difficult.
1416 if (is_multicast_ether_addr(hdr->addr1)) {
1417 ivlen = mvm->gtk_ivlen;
1418 icvlen += mvm->gtk_icvlen;
1419 } else {
1420 ivlen = mvm->ptk_ivlen;
1421 icvlen += mvm->ptk_icvlen;
1425 /* if truncated, FCS/ICV is (partially) gone */
1426 if (truncated >= icvlen) {
1427 icvlen = 0;
1428 truncated -= icvlen;
1429 } else {
1430 icvlen -= truncated;
1431 truncated = 0;
1434 pktsize -= ivlen + icvlen;
1435 pktdata += ivlen;
1437 memcpy(skb_put(pkt, pktsize), pktdata, pktsize);
1439 if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
1440 goto report;
1441 wakeup.packet = pkt->data;
1442 wakeup.packet_present_len = pkt->len;
1443 wakeup.packet_len = pkt->len - truncated;
1444 wakeup.packet_80211 = false;
1445 } else {
1446 int fcslen = 4;
1448 if (truncated >= 4) {
1449 truncated -= 4;
1450 fcslen = 0;
1451 } else {
1452 fcslen -= truncated;
1453 truncated = 0;
1455 pktsize -= fcslen;
1456 wakeup.packet = status->wake_packet;
1457 wakeup.packet_present_len = pktsize;
1458 wakeup.packet_len = pktlen - truncated;
1459 wakeup.packet_80211 = true;
1463 report:
1464 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
1465 kfree_skb(pkt);
1468 static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
1469 struct ieee80211_key_seq *seq)
1471 u64 pn;
1473 pn = le64_to_cpu(sc->pn);
1474 seq->ccmp.pn[0] = pn >> 40;
1475 seq->ccmp.pn[1] = pn >> 32;
1476 seq->ccmp.pn[2] = pn >> 24;
1477 seq->ccmp.pn[3] = pn >> 16;
1478 seq->ccmp.pn[4] = pn >> 8;
1479 seq->ccmp.pn[5] = pn;
1482 static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
1483 struct ieee80211_key_seq *seq)
1485 seq->tkip.iv32 = le32_to_cpu(sc->iv32);
1486 seq->tkip.iv16 = le16_to_cpu(sc->iv16);
1489 static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
1490 struct ieee80211_sta *sta,
1491 struct ieee80211_key_conf *key)
1493 int tid;
1495 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1497 if (sta && iwl_mvm_has_new_rx_api(mvm)) {
1498 struct iwl_mvm_sta *mvmsta;
1499 struct iwl_mvm_key_pn *ptk_pn;
1501 mvmsta = iwl_mvm_sta_from_mac80211(sta);
1503 ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
1504 lockdep_is_held(&mvm->mutex));
1505 if (WARN_ON(!ptk_pn))
1506 return;
1508 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
1509 struct ieee80211_key_seq seq = {};
1510 int i;
1512 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1513 ieee80211_set_key_rx_seq(key, tid, &seq);
1514 for (i = 1; i < mvm->trans->num_rx_queues; i++)
1515 memcpy(ptk_pn->q[i].pn[tid],
1516 seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
1518 } else {
1519 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1520 struct ieee80211_key_seq seq = {};
1522 iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
1523 ieee80211_set_key_rx_seq(key, tid, &seq);
1528 static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
1529 struct ieee80211_key_conf *key)
1531 int tid;
1533 BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
1535 for (tid = 0; tid < IWL_NUM_RSC; tid++) {
1536 struct ieee80211_key_seq seq = {};
1538 iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
1539 ieee80211_set_key_rx_seq(key, tid, &seq);
1543 static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
1544 struct ieee80211_key_conf *key,
1545 struct iwl_wowlan_status *status)
1547 union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
1549 switch (key->cipher) {
1550 case WLAN_CIPHER_SUITE_CCMP:
1551 iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
1552 break;
1553 case WLAN_CIPHER_SUITE_TKIP:
1554 iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
1555 break;
1556 default:
1557 WARN_ON(1);
1561 struct iwl_mvm_d3_gtk_iter_data {
1562 struct iwl_mvm *mvm;
1563 struct iwl_wowlan_status *status;
1564 void *last_gtk;
1565 u32 cipher;
1566 bool find_phase, unhandled_cipher;
1567 int num_keys;
1570 static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
1571 struct ieee80211_vif *vif,
1572 struct ieee80211_sta *sta,
1573 struct ieee80211_key_conf *key,
1574 void *_data)
1576 struct iwl_mvm_d3_gtk_iter_data *data = _data;
1578 if (data->unhandled_cipher)
1579 return;
1581 switch (key->cipher) {
1582 case WLAN_CIPHER_SUITE_WEP40:
1583 case WLAN_CIPHER_SUITE_WEP104:
1584 /* ignore WEP completely, nothing to do */
1585 return;
1586 case WLAN_CIPHER_SUITE_CCMP:
1587 case WLAN_CIPHER_SUITE_TKIP:
1588 /* we support these */
1589 break;
1590 default:
1591 /* everything else (even CMAC for MFP) - disconnect from AP */
1592 data->unhandled_cipher = true;
1593 return;
1596 data->num_keys++;
1599 * pairwise key - update sequence counters only;
1600 * note that this assumes no TDLS sessions are active
1602 if (sta) {
1603 struct ieee80211_key_seq seq = {};
1604 union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
1606 if (data->find_phase)
1607 return;
1609 switch (key->cipher) {
1610 case WLAN_CIPHER_SUITE_CCMP:
1611 iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
1612 sta, key);
1613 atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
1614 break;
1615 case WLAN_CIPHER_SUITE_TKIP:
1616 iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
1617 iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
1618 atomic64_set(&key->tx_pn,
1619 (u64)seq.tkip.iv16 |
1620 ((u64)seq.tkip.iv32 << 16));
1621 break;
1624 /* that's it for this key */
1625 return;
1628 if (data->find_phase) {
1629 data->last_gtk = key;
1630 data->cipher = key->cipher;
1631 return;
1634 if (data->status->num_of_gtk_rekeys)
1635 ieee80211_remove_key(key);
1636 else if (data->last_gtk == key)
1637 iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
1640 static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
1641 struct ieee80211_vif *vif,
1642 struct iwl_wowlan_status *status)
1644 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1645 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1646 .mvm = mvm,
1647 .status = status,
1649 u32 disconnection_reasons =
1650 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
1651 IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
1653 if (!status || !vif->bss_conf.bssid)
1654 return false;
1656 if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
1657 return false;
1659 /* find last GTK that we used initially, if any */
1660 gtkdata.find_phase = true;
1661 ieee80211_iter_keys(mvm->hw, vif,
1662 iwl_mvm_d3_update_keys, &gtkdata);
1663 /* not trying to keep connections with MFP/unhandled ciphers */
1664 if (gtkdata.unhandled_cipher)
1665 return false;
1666 if (!gtkdata.num_keys)
1667 goto out;
1668 if (!gtkdata.last_gtk)
1669 return false;
1672 * invalidate all other GTKs that might still exist and update
1673 * the one that we used
1675 gtkdata.find_phase = false;
1676 ieee80211_iter_keys(mvm->hw, vif,
1677 iwl_mvm_d3_update_keys, &gtkdata);
1679 if (status->num_of_gtk_rekeys) {
1680 struct ieee80211_key_conf *key;
1681 struct {
1682 struct ieee80211_key_conf conf;
1683 u8 key[32];
1684 } conf = {
1685 .conf.cipher = gtkdata.cipher,
1686 .conf.keyidx = status->gtk.key_index,
1689 switch (gtkdata.cipher) {
1690 case WLAN_CIPHER_SUITE_CCMP:
1691 conf.conf.keylen = WLAN_KEY_LEN_CCMP;
1692 memcpy(conf.conf.key, status->gtk.decrypt_key,
1693 WLAN_KEY_LEN_CCMP);
1694 break;
1695 case WLAN_CIPHER_SUITE_TKIP:
1696 conf.conf.keylen = WLAN_KEY_LEN_TKIP;
1697 memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
1698 /* leave TX MIC key zeroed, we don't use it anyway */
1699 memcpy(conf.conf.key +
1700 NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
1701 status->gtk.tkip_mic_key, 8);
1702 break;
1705 key = ieee80211_gtk_rekey_add(vif, &conf.conf);
1706 if (IS_ERR(key))
1707 return false;
1708 iwl_mvm_set_key_rx_seq(mvm, key, status);
1711 if (status->num_of_gtk_rekeys) {
1712 __be64 replay_ctr =
1713 cpu_to_be64(le64_to_cpu(status->replay_ctr));
1714 ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
1715 (void *)&replay_ctr, GFP_KERNEL);
1718 out:
1719 mvmvif->seqno_valid = true;
1720 /* +0x10 because the set API expects next-to-use, not last-used */
1721 mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
1723 return true;
1726 static struct iwl_wowlan_status *
1727 iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
1729 u32 base = mvm->error_event_table;
1730 struct error_table_start {
1731 /* cf. struct iwl_error_event_table */
1732 u32 valid;
1733 u32 error_id;
1734 } err_info;
1735 struct iwl_host_cmd cmd = {
1736 .id = WOWLAN_GET_STATUSES,
1737 .flags = CMD_WANT_SKB,
1739 struct iwl_wowlan_status *status, *fw_status;
1740 int ret, len, status_size;
1742 iwl_trans_read_mem_bytes(mvm->trans, base,
1743 &err_info, sizeof(err_info));
1745 if (err_info.valid) {
1746 IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
1747 err_info.valid, err_info.error_id);
1748 if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
1749 struct cfg80211_wowlan_wakeup wakeup = {
1750 .rfkill_release = true,
1752 ieee80211_report_wowlan_wakeup(vif, &wakeup,
1753 GFP_KERNEL);
1755 return ERR_PTR(-EIO);
1758 /* only for tracing for now */
1759 ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
1760 if (ret)
1761 IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
1763 ret = iwl_mvm_send_cmd(mvm, &cmd);
1764 if (ret) {
1765 IWL_ERR(mvm, "failed to query status (%d)\n", ret);
1766 return ERR_PTR(ret);
1769 /* RF-kill already asserted again... */
1770 if (!cmd.resp_pkt) {
1771 fw_status = ERR_PTR(-ERFKILL);
1772 goto out_free_resp;
1775 status_size = sizeof(*fw_status);
1777 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1778 if (len < status_size) {
1779 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1780 fw_status = ERR_PTR(-EIO);
1781 goto out_free_resp;
1784 status = (void *)cmd.resp_pkt->data;
1785 if (len != (status_size +
1786 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
1787 IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
1788 fw_status = ERR_PTR(-EIO);
1789 goto out_free_resp;
1792 fw_status = kmemdup(status, len, GFP_KERNEL);
1794 out_free_resp:
1795 iwl_free_resp(&cmd);
1796 return fw_status;
1799 /* releases the MVM mutex */
1800 static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
1801 struct ieee80211_vif *vif)
1803 struct iwl_wowlan_status_data status;
1804 struct iwl_wowlan_status *fw_status;
1805 int i;
1806 bool keep;
1807 struct iwl_mvm_sta *mvm_ap_sta;
1809 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1810 if (IS_ERR_OR_NULL(fw_status))
1811 goto out_unlock;
1813 status.pattern_number = le16_to_cpu(fw_status->pattern_number);
1814 for (i = 0; i < 8; i++)
1815 status.qos_seq_ctr[i] =
1816 le16_to_cpu(fw_status->qos_seq_ctr[i]);
1817 status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
1818 status.wake_packet_length =
1819 le32_to_cpu(fw_status->wake_packet_length);
1820 status.wake_packet_bufsize =
1821 le32_to_cpu(fw_status->wake_packet_bufsize);
1822 status.wake_packet = fw_status->wake_packet;
1824 /* still at hard-coded place 0 for D3 image */
1825 mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
1826 if (!mvm_ap_sta)
1827 goto out_free;
1829 for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
1830 u16 seq = status.qos_seq_ctr[i];
1831 /* firmware stores last-used value, we store next value */
1832 seq += 0x10;
1833 mvm_ap_sta->tid_data[i].seq_number = seq;
1836 /* now we have all the data we need, unlock to avoid mac80211 issues */
1837 mutex_unlock(&mvm->mutex);
1839 iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
1841 keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
1843 kfree(fw_status);
1844 return keep;
1846 out_free:
1847 kfree(fw_status);
1848 out_unlock:
1849 mutex_unlock(&mvm->mutex);
1850 return false;
1853 void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
1854 struct ieee80211_vif *vif,
1855 struct iwl_wowlan_status *status)
1857 struct iwl_mvm_d3_gtk_iter_data gtkdata = {
1858 .mvm = mvm,
1859 .status = status,
1863 * rekey handling requires taking locks that can't be taken now.
1864 * however, d0i3 doesn't offload rekey, so we're fine.
1866 if (WARN_ON_ONCE(status->num_of_gtk_rekeys))
1867 return;
1869 /* find last GTK that we used initially, if any */
1870 gtkdata.find_phase = true;
1871 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
1873 gtkdata.find_phase = false;
1874 iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
1877 struct iwl_mvm_nd_query_results {
1878 u32 matched_profiles;
1879 struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
1882 static int
1883 iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
1884 struct iwl_mvm_nd_query_results *results)
1886 struct iwl_scan_offload_profiles_query *query;
1887 struct iwl_host_cmd cmd = {
1888 .id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
1889 .flags = CMD_WANT_SKB,
1891 int ret, len;
1893 ret = iwl_mvm_send_cmd(mvm, &cmd);
1894 if (ret) {
1895 IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
1896 return ret;
1899 /* RF-kill already asserted again... */
1900 if (!cmd.resp_pkt) {
1901 ret = -ERFKILL;
1902 goto out_free_resp;
1905 len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1906 if (len < sizeof(*query)) {
1907 IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
1908 ret = -EIO;
1909 goto out_free_resp;
1912 query = (void *)cmd.resp_pkt->data;
1914 results->matched_profiles = le32_to_cpu(query->matched_profiles);
1915 memcpy(results->matches, query->matches, sizeof(results->matches));
1917 #ifdef CONFIG_IWLWIFI_DEBUGFS
1918 mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
1919 #endif
1921 out_free_resp:
1922 iwl_free_resp(&cmd);
1923 return ret;
1926 static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
1927 struct ieee80211_vif *vif)
1929 struct cfg80211_wowlan_nd_info *net_detect = NULL;
1930 struct cfg80211_wowlan_wakeup wakeup = {
1931 .pattern_idx = -1,
1933 struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
1934 struct iwl_mvm_nd_query_results query;
1935 struct iwl_wowlan_status *fw_status;
1936 unsigned long matched_profiles;
1937 u32 reasons = 0;
1938 int i, j, n_matches, ret;
1940 fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
1941 if (!IS_ERR_OR_NULL(fw_status)) {
1942 reasons = le32_to_cpu(fw_status->wakeup_reasons);
1943 kfree(fw_status);
1946 if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
1947 wakeup.rfkill_release = true;
1949 if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
1950 goto out;
1952 ret = iwl_mvm_netdetect_query_results(mvm, &query);
1953 if (ret || !query.matched_profiles) {
1954 wakeup_report = NULL;
1955 goto out;
1958 matched_profiles = query.matched_profiles;
1959 if (mvm->n_nd_match_sets) {
1960 n_matches = hweight_long(matched_profiles);
1961 } else {
1962 IWL_ERR(mvm, "no net detect match information available\n");
1963 n_matches = 0;
1966 net_detect = kzalloc(sizeof(*net_detect) +
1967 (n_matches * sizeof(net_detect->matches[0])),
1968 GFP_KERNEL);
1969 if (!net_detect || !n_matches)
1970 goto out_report_nd;
1972 for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
1973 struct iwl_scan_offload_profile_match *fw_match;
1974 struct cfg80211_wowlan_nd_match *match;
1975 int idx, n_channels = 0;
1977 fw_match = &query.matches[i];
1979 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
1980 n_channels += hweight8(fw_match->matching_channels[j]);
1982 match = kzalloc(sizeof(*match) +
1983 (n_channels * sizeof(*match->channels)),
1984 GFP_KERNEL);
1985 if (!match)
1986 goto out_report_nd;
1988 net_detect->matches[net_detect->n_matches++] = match;
1990 /* We inverted the order of the SSIDs in the scan
1991 * request, so invert the index here.
1993 idx = mvm->n_nd_match_sets - i - 1;
1994 match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
1995 memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
1996 match->ssid.ssid_len);
1998 if (mvm->n_nd_channels < n_channels)
1999 continue;
2001 for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
2002 if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
2003 match->channels[match->n_channels++] =
2004 mvm->nd_channels[j]->center_freq;
2007 out_report_nd:
2008 wakeup.net_detect = net_detect;
2009 out:
2010 iwl_mvm_free_nd(mvm);
2012 mutex_unlock(&mvm->mutex);
2013 ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
2015 if (net_detect) {
2016 for (i = 0; i < net_detect->n_matches; i++)
2017 kfree(net_detect->matches[i]);
2018 kfree(net_detect);
2022 static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
2024 #ifdef CONFIG_IWLWIFI_DEBUGFS
2025 const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
2026 u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
2027 u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
2029 if (!mvm->store_d3_resume_sram)
2030 return;
2032 if (!mvm->d3_resume_sram) {
2033 mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
2034 if (!mvm->d3_resume_sram)
2035 return;
2038 iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
2039 #endif
2042 static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
2043 struct ieee80211_vif *vif)
2045 /* skip the one we keep connection on */
2046 if (data == vif)
2047 return;
2049 if (vif->type == NL80211_IFTYPE_STATION)
2050 ieee80211_resume_disconnect(vif);
2053 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
2055 struct ieee80211_vif *vif = NULL;
2056 int ret = 1;
2057 enum iwl_d3_status d3_status;
2058 bool keep = false;
2059 bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2060 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2062 u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
2063 CMD_WAKE_UP_TRANS;
2065 mutex_lock(&mvm->mutex);
2067 /* get the BSS vif pointer again */
2068 vif = iwl_mvm_get_bss_vif(mvm);
2069 if (IS_ERR_OR_NULL(vif))
2070 goto err;
2072 ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
2073 if (ret)
2074 goto err;
2076 if (d3_status != IWL_D3_STATUS_ALIVE) {
2077 IWL_INFO(mvm, "Device was reset during suspend\n");
2078 goto err;
2081 /* query SRAM first in case we want event logging */
2082 iwl_mvm_read_d3_sram(mvm);
2085 * Query the current location and source from the D3 firmware so we
2086 * can play it back when we re-intiailize the D0 firmware
2088 iwl_mvm_update_changed_regdom(mvm);
2090 if (mvm->net_detect) {
2091 iwl_mvm_query_netdetect_reasons(mvm, vif);
2092 /* has unlocked the mutex, so skip that */
2093 goto out;
2094 } else {
2095 keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
2096 #ifdef CONFIG_IWLWIFI_DEBUGFS
2097 if (keep)
2098 mvm->keep_vif = vif;
2099 #endif
2100 /* has unlocked the mutex, so skip that */
2101 goto out_iterate;
2104 err:
2105 iwl_mvm_free_nd(mvm);
2106 mutex_unlock(&mvm->mutex);
2108 out_iterate:
2109 if (!test)
2110 ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
2111 IEEE80211_IFACE_ITER_NORMAL,
2112 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
2114 out:
2115 if (unified_image && !ret) {
2116 ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
2117 if (!ret) /* D3 ended successfully - no need to reset device */
2118 return 0;
2122 * Reconfigure the device in one of the following cases:
2123 * 1. We are not using a unified image
2124 * 2. We are using a unified image but had an error while exiting D3
2126 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
2127 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
2129 * When switching images we return 1, which causes mac80211
2130 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
2131 * This type of reconfig calls iwl_mvm_restart_complete(),
2132 * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
2133 * to take the reference here.
2135 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
2137 return 1;
2140 static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
2142 iwl_trans_resume(mvm->trans);
2144 return __iwl_mvm_resume(mvm, false);
2147 static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
2149 bool exit_now;
2150 enum iwl_d3_status d3_status;
2151 struct iwl_trans *trans = mvm->trans;
2153 iwl_trans_d3_resume(trans, &d3_status, false, false);
2156 * make sure to clear D0I3_DEFER_WAKEUP before
2157 * calling iwl_trans_resume(), which might wait
2158 * for d0i3 exit completion.
2160 mutex_lock(&mvm->d0i3_suspend_mutex);
2161 __clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
2162 exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
2163 &mvm->d0i3_suspend_flags);
2164 mutex_unlock(&mvm->d0i3_suspend_mutex);
2165 if (exit_now) {
2166 IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
2167 _iwl_mvm_exit_d0i3(mvm);
2170 iwl_trans_resume(trans);
2172 if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
2173 int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
2175 if (ret)
2176 return ret;
2178 * d0i3 exit will be deferred until reconfig_complete.
2179 * make sure there we are out of d0i3.
2182 return 0;
2185 int iwl_mvm_resume(struct ieee80211_hw *hw)
2187 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2188 int ret;
2190 if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)
2191 ret = iwl_mvm_resume_d0i3(mvm);
2192 else
2193 ret = iwl_mvm_resume_d3(mvm);
2195 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2197 return ret;
2200 void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
2202 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2204 device_set_wakeup_enable(mvm->trans->dev, enabled);
2207 #ifdef CONFIG_IWLWIFI_DEBUGFS
2208 static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2210 struct iwl_mvm *mvm = inode->i_private;
2211 int err;
2213 if (mvm->d3_test_active)
2214 return -EBUSY;
2216 file->private_data = inode->i_private;
2218 ieee80211_stop_queues(mvm->hw);
2219 synchronize_net();
2221 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2223 /* start pseudo D3 */
2224 rtnl_lock();
2225 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
2226 rtnl_unlock();
2227 if (err > 0)
2228 err = -EINVAL;
2229 if (err) {
2230 ieee80211_wake_queues(mvm->hw);
2231 return err;
2233 mvm->d3_test_active = true;
2234 mvm->keep_vif = NULL;
2235 return 0;
2238 static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
2239 size_t count, loff_t *ppos)
2241 struct iwl_mvm *mvm = file->private_data;
2242 u32 pme_asserted;
2244 while (true) {
2245 /* read pme_ptr if available */
2246 if (mvm->d3_test_pme_ptr) {
2247 pme_asserted = iwl_trans_read_mem32(mvm->trans,
2248 mvm->d3_test_pme_ptr);
2249 if (pme_asserted)
2250 break;
2253 if (msleep_interruptible(100))
2254 break;
2257 return 0;
2260 static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
2261 struct ieee80211_vif *vif)
2263 /* skip the one we keep connection on */
2264 if (_data == vif)
2265 return;
2267 if (vif->type == NL80211_IFTYPE_STATION)
2268 ieee80211_connection_loss(vif);
2271 static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2273 struct iwl_mvm *mvm = inode->i_private;
2274 int remaining_time = 10;
2276 mvm->d3_test_active = false;
2278 rtnl_lock();
2279 __iwl_mvm_resume(mvm, true);
2280 rtnl_unlock();
2282 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2284 iwl_abort_notification_waits(&mvm->notif_wait);
2285 ieee80211_restart_hw(mvm->hw);
2287 /* wait for restart and disconnect all interfaces */
2288 while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2289 remaining_time > 0) {
2290 remaining_time--;
2291 msleep(1000);
2294 if (remaining_time == 0)
2295 IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n");
2297 ieee80211_iterate_active_interfaces_atomic(
2298 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2299 iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
2301 ieee80211_wake_queues(mvm->hw);
2303 return 0;
2306 const struct file_operations iwl_dbgfs_d3_test_ops = {
2307 .llseek = no_llseek,
2308 .open = iwl_mvm_d3_test_open,
2309 .read = iwl_mvm_d3_test_read,
2310 .release = iwl_mvm_d3_test_release,
2312 #endif