drm/nouveau: consume the return of large GSP message
[drm/drm-misc.git] / drivers / net / wireless / ath / ath11k / wow.c
blob827085a926b27a1cadfd736a9ee9fd5e294c4b55
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
7 #include <linux/delay.h>
9 #include "mac.h"
11 #include <net/mac80211.h>
12 #include "core.h"
13 #include "hif.h"
14 #include "debug.h"
15 #include "wmi.h"
16 #include "wow.h"
17 #include "dp_rx.h"
19 static const struct wiphy_wowlan_support ath11k_wowlan_support = {
20 .flags = WIPHY_WOWLAN_DISCONNECT |
21 WIPHY_WOWLAN_MAGIC_PKT |
22 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
23 WIPHY_WOWLAN_GTK_REKEY_FAILURE,
24 .pattern_min_len = WOW_MIN_PATTERN_SIZE,
25 .pattern_max_len = WOW_MAX_PATTERN_SIZE,
26 .max_pkt_offset = WOW_MAX_PKT_OFFSET,
29 int ath11k_wow_enable(struct ath11k_base *ab)
31 struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
32 int i, ret;
34 clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
36 for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
37 reinit_completion(&ab->htc_suspend);
39 ret = ath11k_wmi_wow_enable(ar);
40 if (ret) {
41 ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
42 return ret;
45 ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
46 if (ret == 0) {
47 ath11k_warn(ab,
48 "timed out while waiting for htc suspend completion\n");
49 return -ETIMEDOUT;
52 if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
53 /* success, suspend complete received */
54 return 0;
56 ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
57 i);
58 msleep(ATH11K_WOW_RETRY_WAIT_MS);
61 ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
63 return -ETIMEDOUT;
66 int ath11k_wow_wakeup(struct ath11k_base *ab)
68 struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
69 int ret;
71 /* In the case of WCN6750, WoW wakeup is done
72 * by sending SMP2P power save exit message
73 * to the target processor.
75 if (ab->hw_params.smp2p_wow_exit)
76 return 0;
78 reinit_completion(&ab->wow.wakeup_completed);
80 ret = ath11k_wmi_wow_host_wakeup_ind(ar);
81 if (ret) {
82 ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
83 ret);
84 return ret;
87 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
88 if (ret == 0) {
89 ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
90 return -ETIMEDOUT;
93 return 0;
96 static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
98 struct ath11k *ar = arvif->ar;
99 int i, ret;
101 for (i = 0; i < WOW_EVENT_MAX; i++) {
102 ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
103 if (ret) {
104 ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
105 wow_wakeup_event(i), arvif->vdev_id, ret);
106 return ret;
110 for (i = 0; i < ar->wow.max_num_patterns; i++) {
111 ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
112 if (ret) {
113 ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
114 i, arvif->vdev_id, ret);
115 return ret;
119 return 0;
122 static int ath11k_wow_cleanup(struct ath11k *ar)
124 struct ath11k_vif *arvif;
125 int ret;
127 lockdep_assert_held(&ar->conf_mutex);
129 list_for_each_entry(arvif, &ar->arvifs, list) {
130 ret = ath11k_wow_vif_cleanup(arvif);
131 if (ret) {
132 ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
133 arvif->vdev_id, ret);
134 return ret;
138 return 0;
141 /* Convert a 802.3 format to a 802.11 format.
142 * +------------+-----------+--------+----------------+
143 * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
144 * +------------+-----------+--------+----------------+
145 * |__ |_______ |____________ |________
146 * | | | |
147 * +--+------------+----+-----------+---------------+-----------+
148 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
149 * +--+------------+----+-----------+---------------+-----------+
151 static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
152 const struct cfg80211_pkt_pattern *old)
154 u8 hdr_8023_pattern[ETH_HLEN] = {};
155 u8 hdr_8023_bit_mask[ETH_HLEN] = {};
156 u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
157 u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
158 u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
160 int total_len = old->pkt_offset + old->pattern_len;
161 int hdr_80211_end_offset;
163 struct ieee80211_hdr_3addr *new_hdr_pattern =
164 (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
165 struct ieee80211_hdr_3addr *new_hdr_mask =
166 (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
167 struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
168 struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
169 int hdr_len = sizeof(*new_hdr_pattern);
171 struct rfc1042_hdr *new_rfc_pattern =
172 (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
173 struct rfc1042_hdr *new_rfc_mask =
174 (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
175 int rfc_len = sizeof(*new_rfc_pattern);
176 int i;
178 /* convert bitmask to bytemask */
179 for (i = 0; i < old->pattern_len; i++)
180 if (old->mask[i / 8] & BIT(i % 8))
181 bytemask[i] = 0xff;
183 memcpy(hdr_8023_pattern + old->pkt_offset,
184 old->pattern, ETH_HLEN - old->pkt_offset);
185 memcpy(hdr_8023_bit_mask + old->pkt_offset,
186 bytemask, ETH_HLEN - old->pkt_offset);
188 /* Copy destination address */
189 memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
190 memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
192 /* Copy source address */
193 memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
194 memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
196 /* Copy logic link type */
197 memcpy(&new_rfc_pattern->snap_type,
198 &old_hdr_pattern->h_proto,
199 sizeof(old_hdr_pattern->h_proto));
200 memcpy(&new_rfc_mask->snap_type,
201 &old_hdr_mask->h_proto,
202 sizeof(old_hdr_mask->h_proto));
204 /* Compute new pkt_offset */
205 if (old->pkt_offset < ETH_ALEN)
206 new->pkt_offset = old->pkt_offset +
207 offsetof(struct ieee80211_hdr_3addr, addr1);
208 else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
209 new->pkt_offset = old->pkt_offset +
210 offsetof(struct ieee80211_hdr_3addr, addr3) -
211 offsetof(struct ethhdr, h_source);
212 else
213 new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
215 /* Compute new hdr end offset */
216 if (total_len > ETH_HLEN)
217 hdr_80211_end_offset = hdr_len + rfc_len;
218 else if (total_len > offsetof(struct ethhdr, h_proto))
219 hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
220 else if (total_len > ETH_ALEN)
221 hdr_80211_end_offset = total_len - ETH_ALEN +
222 offsetof(struct ieee80211_hdr_3addr, addr3);
223 else
224 hdr_80211_end_offset = total_len +
225 offsetof(struct ieee80211_hdr_3addr, addr1);
227 new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
229 memcpy((u8 *)new->pattern,
230 hdr_80211_pattern + new->pkt_offset,
231 new->pattern_len);
232 memcpy((u8 *)new->mask,
233 hdr_80211_bit_mask + new->pkt_offset,
234 new->pattern_len);
236 if (total_len > ETH_HLEN) {
237 /* Copy frame body */
238 memcpy((u8 *)new->pattern + new->pattern_len,
239 (void *)old->pattern + ETH_HLEN - old->pkt_offset,
240 total_len - ETH_HLEN);
241 memcpy((u8 *)new->mask + new->pattern_len,
242 bytemask + ETH_HLEN - old->pkt_offset,
243 total_len - ETH_HLEN);
245 new->pattern_len += total_len - ETH_HLEN;
249 static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
250 struct cfg80211_sched_scan_request *nd_config,
251 struct wmi_pno_scan_req *pno)
253 int i, j;
254 u8 ssid_len;
256 pno->enable = 1;
257 pno->vdev_id = vdev_id;
258 pno->uc_networks_count = nd_config->n_match_sets;
260 if (!pno->uc_networks_count ||
261 pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
262 return -EINVAL;
264 if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
265 return -EINVAL;
267 /* Filling per profile params */
268 for (i = 0; i < pno->uc_networks_count; i++) {
269 ssid_len = nd_config->match_sets[i].ssid.ssid_len;
271 if (ssid_len == 0 || ssid_len > 32)
272 return -EINVAL;
274 pno->a_networks[i].ssid.ssid_len = ssid_len;
276 memcpy(pno->a_networks[i].ssid.ssid,
277 nd_config->match_sets[i].ssid.ssid,
278 nd_config->match_sets[i].ssid.ssid_len);
279 pno->a_networks[i].authentication = 0;
280 pno->a_networks[i].encryption = 0;
281 pno->a_networks[i].bcast_nw_type = 0;
283 /* Copying list of valid channel into request */
284 pno->a_networks[i].channel_count = nd_config->n_channels;
285 pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
287 for (j = 0; j < nd_config->n_channels; j++) {
288 pno->a_networks[i].channels[j] =
289 nd_config->channels[j]->center_freq;
293 /* set scan to passive if no SSIDs are specified in the request */
294 if (nd_config->n_ssids == 0)
295 pno->do_passive_scan = true;
296 else
297 pno->do_passive_scan = false;
299 for (i = 0; i < nd_config->n_ssids; i++) {
300 j = 0;
301 while (j < pno->uc_networks_count) {
302 if (pno->a_networks[j].ssid.ssid_len ==
303 nd_config->ssids[i].ssid_len &&
304 (memcmp(pno->a_networks[j].ssid.ssid,
305 nd_config->ssids[i].ssid,
306 pno->a_networks[j].ssid.ssid_len) == 0)) {
307 pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
308 break;
310 j++;
314 if (nd_config->n_scan_plans == 2) {
315 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
316 pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
317 pno->slow_scan_period =
318 nd_config->scan_plans[1].interval * MSEC_PER_SEC;
319 } else if (nd_config->n_scan_plans == 1) {
320 pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
321 pno->fast_scan_max_cycles = 1;
322 pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
323 } else {
324 ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
325 nd_config->n_scan_plans);
328 if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
329 /* enable mac randomization */
330 pno->enable_pno_scan_randomization = 1;
331 memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
332 memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
335 pno->delay_start_time = nd_config->delay;
337 /* Current FW does not support min-max range for dwell time */
338 pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
339 pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
341 return 0;
344 static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
345 struct cfg80211_wowlan *wowlan)
347 int ret, i;
348 unsigned long wow_mask = 0;
349 struct ath11k *ar = arvif->ar;
350 const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
351 int pattern_id = 0;
353 /* Setup requested WOW features */
354 switch (arvif->vdev_type) {
355 case WMI_VDEV_TYPE_IBSS:
356 __set_bit(WOW_BEACON_EVENT, &wow_mask);
357 fallthrough;
358 case WMI_VDEV_TYPE_AP:
359 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
360 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
361 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
362 __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
363 __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
364 __set_bit(WOW_HTT_EVENT, &wow_mask);
365 __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
366 break;
367 case WMI_VDEV_TYPE_STA:
368 if (wowlan->disconnect) {
369 __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
370 __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
371 __set_bit(WOW_BMISS_EVENT, &wow_mask);
372 __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
375 if (wowlan->magic_pkt)
376 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
378 if (wowlan->nd_config) {
379 struct wmi_pno_scan_req *pno;
380 int ret;
382 pno = kzalloc(sizeof(*pno), GFP_KERNEL);
383 if (!pno)
384 return -ENOMEM;
386 ar->nlo_enabled = true;
388 ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
389 wowlan->nd_config, pno);
390 if (!ret) {
391 ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
392 __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
395 kfree(pno);
397 break;
398 default:
399 break;
402 for (i = 0; i < wowlan->n_patterns; i++) {
403 u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
404 u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
405 struct cfg80211_pkt_pattern new_pattern = {};
407 new_pattern.pattern = ath_pattern;
408 new_pattern.mask = ath_bitmask;
409 if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
410 continue;
412 if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
413 ATH11K_HW_TXRX_NATIVE_WIFI) {
414 if (patterns[i].pkt_offset < ETH_HLEN) {
415 ath11k_wow_convert_8023_to_80211(&new_pattern,
416 &patterns[i]);
417 } else {
418 int j;
420 new_pattern = patterns[i];
421 new_pattern.mask = ath_bitmask;
423 /* convert bitmask to bytemask */
424 for (j = 0; j < patterns[i].pattern_len; j++)
425 if (patterns[i].mask[j / 8] & BIT(j % 8))
426 ath_bitmask[j] = 0xff;
428 new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
432 if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
433 return -EINVAL;
435 ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
436 pattern_id,
437 new_pattern.pattern,
438 new_pattern.mask,
439 new_pattern.pattern_len,
440 new_pattern.pkt_offset);
441 if (ret) {
442 ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
443 pattern_id,
444 arvif->vdev_id, ret);
445 return ret;
448 pattern_id++;
449 __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
452 for (i = 0; i < WOW_EVENT_MAX; i++) {
453 if (!test_bit(i, &wow_mask))
454 continue;
455 ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
456 if (ret) {
457 ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
458 wow_wakeup_event(i), arvif->vdev_id, ret);
459 return ret;
463 return 0;
466 static int ath11k_wow_set_wakeups(struct ath11k *ar,
467 struct cfg80211_wowlan *wowlan)
469 struct ath11k_vif *arvif;
470 int ret;
472 lockdep_assert_held(&ar->conf_mutex);
474 list_for_each_entry(arvif, &ar->arvifs, list) {
475 ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
476 if (ret) {
477 ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
478 arvif->vdev_id, ret);
479 return ret;
483 return 0;
486 static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
488 int ret = 0;
489 struct ath11k *ar = arvif->ar;
491 switch (arvif->vdev_type) {
492 case WMI_VDEV_TYPE_STA:
493 if (ar->nlo_enabled) {
494 struct wmi_pno_scan_req *pno;
496 pno = kzalloc(sizeof(*pno), GFP_KERNEL);
497 if (!pno)
498 return -ENOMEM;
500 pno->enable = 0;
501 ar->nlo_enabled = false;
502 ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
503 kfree(pno);
505 break;
506 default:
507 break;
509 return ret;
512 static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
514 struct ath11k_vif *arvif;
515 int ret;
517 lockdep_assert_held(&ar->conf_mutex);
519 list_for_each_entry(arvif, &ar->arvifs, list) {
520 ret = ath11k_vif_wow_clean_nlo(arvif);
521 if (ret) {
522 ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
523 arvif->vdev_id, ret);
524 return ret;
528 return 0;
531 static int ath11k_wow_set_hw_filter(struct ath11k *ar)
533 struct ath11k_vif *arvif;
534 u32 bitmap;
535 int ret;
537 lockdep_assert_held(&ar->conf_mutex);
539 list_for_each_entry(arvif, &ar->arvifs, list) {
540 bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
541 WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
542 ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
543 bitmap,
544 true);
545 if (ret) {
546 ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
547 arvif->vdev_id, ret);
548 return ret;
552 return 0;
555 static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
557 struct ath11k_vif *arvif;
558 int ret;
560 lockdep_assert_held(&ar->conf_mutex);
562 list_for_each_entry(arvif, &ar->arvifs, list) {
563 ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
565 if (ret) {
566 ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
567 arvif->vdev_id, ret);
568 return ret;
572 return 0;
575 static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
577 struct ath11k_vif *arvif;
578 int ret;
580 lockdep_assert_held(&ar->conf_mutex);
582 list_for_each_entry(arvif, &ar->arvifs, list) {
583 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
584 continue;
586 ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
588 if (ret) {
589 ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
590 arvif->vdev_id, enable, ret);
591 return ret;
595 return 0;
598 static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
600 struct ath11k_vif *arvif;
601 int ret;
603 lockdep_assert_held(&ar->conf_mutex);
605 list_for_each_entry(arvif, &ar->arvifs, list) {
606 if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
607 !arvif->is_up ||
608 !arvif->rekey_data.enable_offload)
609 continue;
611 /* get rekey info before disable rekey offload */
612 if (!enable) {
613 ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
614 if (ret) {
615 ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
616 arvif->vdev_id, ret);
617 return ret;
621 ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
623 if (ret) {
624 ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
625 arvif->vdev_id, enable, ret);
626 return ret;
630 return 0;
633 static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
635 int ret;
637 ret = ath11k_wow_arp_ns_offload(ar, enable);
638 if (ret) {
639 ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
640 enable, ret);
641 return ret;
644 ret = ath11k_gtk_rekey_offload(ar, enable);
645 if (ret) {
646 ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
647 enable, ret);
648 return ret;
651 return 0;
654 static int ath11k_wow_set_keepalive(struct ath11k *ar,
655 enum wmi_sta_keepalive_method method,
656 u32 interval)
658 struct ath11k_vif *arvif;
659 int ret;
661 lockdep_assert_held(&ar->conf_mutex);
663 list_for_each_entry(arvif, &ar->arvifs, list) {
664 ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
665 if (ret)
666 return ret;
669 return 0;
672 int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
673 struct cfg80211_wowlan *wowlan)
675 struct ath11k *ar = hw->priv;
676 int ret;
678 ret = ath11k_mac_wait_tx_complete(ar);
679 if (ret) {
680 ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
681 return ret;
684 mutex_lock(&ar->conf_mutex);
686 ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
687 if (ret) {
688 ath11k_warn(ar->ab,
689 "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
690 ret);
691 goto exit;
694 ret = ath11k_wow_cleanup(ar);
695 if (ret) {
696 ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
697 ret);
698 goto exit;
701 ret = ath11k_wow_set_wakeups(ar, wowlan);
702 if (ret) {
703 ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
704 ret);
705 goto cleanup;
708 ret = ath11k_wow_protocol_offload(ar, true);
709 if (ret) {
710 ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
711 ret);
712 goto cleanup;
715 ret = ath11k_wow_set_hw_filter(ar);
716 if (ret) {
717 ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
718 ret);
719 goto cleanup;
722 ret = ath11k_wow_set_keepalive(ar,
723 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
724 WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
725 if (ret) {
726 ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
727 goto cleanup;
730 ret = ath11k_wow_enable(ar->ab);
731 if (ret) {
732 ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
733 goto cleanup;
736 ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
737 if (ret) {
738 ath11k_warn(ar->ab,
739 "failed to stop dp rx pktlog during wow suspend: %d\n",
740 ret);
741 goto cleanup;
744 ath11k_ce_stop_shadow_timers(ar->ab);
745 ath11k_dp_stop_shadow_timers(ar->ab);
747 ath11k_hif_irq_disable(ar->ab);
748 ath11k_hif_ce_irq_disable(ar->ab);
750 ret = ath11k_hif_suspend(ar->ab);
751 if (ret) {
752 ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
753 goto wakeup;
756 goto exit;
758 wakeup:
759 ath11k_wow_wakeup(ar->ab);
761 cleanup:
762 ath11k_wow_cleanup(ar);
764 exit:
765 mutex_unlock(&ar->conf_mutex);
766 return ret ? 1 : 0;
769 void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
771 struct ath11k *ar = hw->priv;
773 mutex_lock(&ar->conf_mutex);
774 device_set_wakeup_enable(ar->ab->dev, enabled);
775 mutex_unlock(&ar->conf_mutex);
778 int ath11k_wow_op_resume(struct ieee80211_hw *hw)
780 struct ath11k *ar = hw->priv;
781 int ret;
783 mutex_lock(&ar->conf_mutex);
785 ret = ath11k_hif_resume(ar->ab);
786 if (ret) {
787 ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
788 goto exit;
791 ath11k_hif_ce_irq_enable(ar->ab);
792 ath11k_hif_irq_enable(ar->ab);
794 ret = ath11k_dp_rx_pktlog_start(ar->ab);
795 if (ret) {
796 ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
797 goto exit;
800 ret = ath11k_wow_wakeup(ar->ab);
801 if (ret) {
802 ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
803 goto exit;
806 ret = ath11k_wow_nlo_cleanup(ar);
807 if (ret) {
808 ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
809 goto exit;
812 ret = ath11k_wow_clear_hw_filter(ar);
813 if (ret) {
814 ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
815 goto exit;
818 ret = ath11k_wow_protocol_offload(ar, false);
819 if (ret) {
820 ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
821 ret);
822 goto exit;
825 ret = ath11k_wow_set_keepalive(ar,
826 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
827 WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
828 if (ret) {
829 ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
830 goto exit;
833 exit:
834 if (ret) {
835 switch (ar->state) {
836 case ATH11K_STATE_ON:
837 ar->state = ATH11K_STATE_RESTARTING;
838 ret = 1;
839 break;
840 case ATH11K_STATE_OFF:
841 case ATH11K_STATE_RESTARTING:
842 case ATH11K_STATE_RESTARTED:
843 case ATH11K_STATE_WEDGED:
844 case ATH11K_STATE_FTM:
845 ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
846 ar->state);
847 ret = -EIO;
848 break;
852 mutex_unlock(&ar->conf_mutex);
853 return ret;
856 int ath11k_wow_init(struct ath11k *ar)
858 if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
859 return 0;
861 ar->wow.wowlan_support = ath11k_wowlan_support;
863 if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
864 ATH11K_HW_TXRX_NATIVE_WIFI) {
865 ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
866 ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
869 if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
870 ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
871 ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
874 ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
875 ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
876 ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
878 device_set_wakeup_capable(ar->ab->dev, true);
880 return 0;