1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
10 #include <net/mac80211.h>
17 static const struct wiphy_wowlan_support ath10k_wowlan_support
= {
18 .flags
= WIPHY_WOWLAN_DISCONNECT
|
19 WIPHY_WOWLAN_MAGIC_PKT
,
20 .pattern_min_len
= WOW_MIN_PATTERN_SIZE
,
21 .pattern_max_len
= WOW_MAX_PATTERN_SIZE
,
22 .max_pkt_offset
= WOW_MAX_PKT_OFFSET
,
25 static int ath10k_wow_vif_cleanup(struct ath10k_vif
*arvif
)
27 struct ath10k
*ar
= arvif
->ar
;
30 for (i
= 0; i
< WOW_EVENT_MAX
; i
++) {
31 ret
= ath10k_wmi_wow_add_wakeup_event(ar
, arvif
->vdev_id
, i
, 0);
33 ath10k_warn(ar
, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
34 wow_wakeup_event(i
), arvif
->vdev_id
, ret
);
39 for (i
= 0; i
< ar
->wow
.max_num_patterns
; i
++) {
40 ret
= ath10k_wmi_wow_del_pattern(ar
, arvif
->vdev_id
, i
);
42 ath10k_warn(ar
, "failed to delete wow pattern %d for vdev %i: %d\n",
43 i
, arvif
->vdev_id
, ret
);
51 static int ath10k_wow_cleanup(struct ath10k
*ar
)
53 struct ath10k_vif
*arvif
;
56 lockdep_assert_held(&ar
->conf_mutex
);
58 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
59 ret
= ath10k_wow_vif_cleanup(arvif
);
61 ath10k_warn(ar
, "failed to clean wow wakeups on vdev %i: %d\n",
71 * Convert a 802.3 format to a 802.11 format.
72 * +------------+-----------+--------+----------------+
73 * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
74 * +------------+-----------+--------+----------------+
75 * |__ |_______ |____________ |________
77 * +--+------------+----+-----------+---------------+-----------+
78 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
79 * +--+------------+----+-----------+---------------+-----------+
81 static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern
*new,
82 const struct cfg80211_pkt_pattern
*old
)
84 u8 hdr_8023_pattern
[ETH_HLEN
] = {};
85 u8 hdr_8023_bit_mask
[ETH_HLEN
] = {};
86 u8 hdr_80211_pattern
[WOW_HDR_LEN
] = {};
87 u8 hdr_80211_bit_mask
[WOW_HDR_LEN
] = {};
89 int total_len
= old
->pkt_offset
+ old
->pattern_len
;
90 int hdr_80211_end_offset
;
92 struct ieee80211_hdr_3addr
*new_hdr_pattern
=
93 (struct ieee80211_hdr_3addr
*)hdr_80211_pattern
;
94 struct ieee80211_hdr_3addr
*new_hdr_mask
=
95 (struct ieee80211_hdr_3addr
*)hdr_80211_bit_mask
;
96 struct ethhdr
*old_hdr_pattern
= (struct ethhdr
*)hdr_8023_pattern
;
97 struct ethhdr
*old_hdr_mask
= (struct ethhdr
*)hdr_8023_bit_mask
;
98 int hdr_len
= sizeof(*new_hdr_pattern
);
100 struct rfc1042_hdr
*new_rfc_pattern
=
101 (struct rfc1042_hdr
*)(hdr_80211_pattern
+ hdr_len
);
102 struct rfc1042_hdr
*new_rfc_mask
=
103 (struct rfc1042_hdr
*)(hdr_80211_bit_mask
+ hdr_len
);
104 int rfc_len
= sizeof(*new_rfc_pattern
);
106 memcpy(hdr_8023_pattern
+ old
->pkt_offset
,
107 old
->pattern
, ETH_HLEN
- old
->pkt_offset
);
108 memcpy(hdr_8023_bit_mask
+ old
->pkt_offset
,
109 old
->mask
, ETH_HLEN
- old
->pkt_offset
);
111 /* Copy destination address */
112 memcpy(new_hdr_pattern
->addr1
, old_hdr_pattern
->h_dest
, ETH_ALEN
);
113 memcpy(new_hdr_mask
->addr1
, old_hdr_mask
->h_dest
, ETH_ALEN
);
115 /* Copy source address */
116 memcpy(new_hdr_pattern
->addr3
, old_hdr_pattern
->h_source
, ETH_ALEN
);
117 memcpy(new_hdr_mask
->addr3
, old_hdr_mask
->h_source
, ETH_ALEN
);
119 /* Copy logic link type */
120 memcpy(&new_rfc_pattern
->snap_type
,
121 &old_hdr_pattern
->h_proto
,
122 sizeof(old_hdr_pattern
->h_proto
));
123 memcpy(&new_rfc_mask
->snap_type
,
124 &old_hdr_mask
->h_proto
,
125 sizeof(old_hdr_mask
->h_proto
));
127 /* Calculate new pkt_offset */
128 if (old
->pkt_offset
< ETH_ALEN
)
129 new->pkt_offset
= old
->pkt_offset
+
130 offsetof(struct ieee80211_hdr_3addr
, addr1
);
131 else if (old
->pkt_offset
< offsetof(struct ethhdr
, h_proto
))
132 new->pkt_offset
= old
->pkt_offset
+
133 offsetof(struct ieee80211_hdr_3addr
, addr3
) -
134 offsetof(struct ethhdr
, h_source
);
136 new->pkt_offset
= old
->pkt_offset
+ hdr_len
+ rfc_len
- ETH_HLEN
;
138 /* Calculate new hdr end offset */
139 if (total_len
> ETH_HLEN
)
140 hdr_80211_end_offset
= hdr_len
+ rfc_len
;
141 else if (total_len
> offsetof(struct ethhdr
, h_proto
))
142 hdr_80211_end_offset
= hdr_len
+ rfc_len
+ total_len
- ETH_HLEN
;
143 else if (total_len
> ETH_ALEN
)
144 hdr_80211_end_offset
= total_len
- ETH_ALEN
+
145 offsetof(struct ieee80211_hdr_3addr
, addr3
);
147 hdr_80211_end_offset
= total_len
+
148 offsetof(struct ieee80211_hdr_3addr
, addr1
);
150 new->pattern_len
= hdr_80211_end_offset
- new->pkt_offset
;
152 memcpy((u8
*)new->pattern
,
153 hdr_80211_pattern
+ new->pkt_offset
,
155 memcpy((u8
*)new->mask
,
156 hdr_80211_bit_mask
+ new->pkt_offset
,
159 if (total_len
> ETH_HLEN
) {
160 /* Copy frame body */
161 memcpy((u8
*)new->pattern
+ new->pattern_len
,
162 (void *)old
->pattern
+ ETH_HLEN
- old
->pkt_offset
,
163 total_len
- ETH_HLEN
);
164 memcpy((u8
*)new->mask
+ new->pattern_len
,
165 (void *)old
->mask
+ ETH_HLEN
- old
->pkt_offset
,
166 total_len
- ETH_HLEN
);
168 new->pattern_len
+= total_len
- ETH_HLEN
;
172 static int ath10k_wmi_pno_check(struct ath10k
*ar
, u32 vdev_id
,
173 struct cfg80211_sched_scan_request
*nd_config
,
174 struct wmi_pno_scan_req
*pno
)
180 pno
->vdev_id
= vdev_id
;
181 pno
->uc_networks_count
= nd_config
->n_match_sets
;
183 if (!pno
->uc_networks_count
||
184 pno
->uc_networks_count
> WMI_PNO_MAX_SUPP_NETWORKS
)
187 if (nd_config
->n_channels
> WMI_PNO_MAX_NETW_CHANNELS_EX
)
190 /* Filling per profile params */
191 for (i
= 0; i
< pno
->uc_networks_count
; i
++) {
192 ssid_len
= nd_config
->match_sets
[i
].ssid
.ssid_len
;
194 if (ssid_len
== 0 || ssid_len
> 32)
197 pno
->a_networks
[i
].ssid
.ssid_len
= __cpu_to_le32(ssid_len
);
199 memcpy(pno
->a_networks
[i
].ssid
.ssid
,
200 nd_config
->match_sets
[i
].ssid
.ssid
,
201 nd_config
->match_sets
[i
].ssid
.ssid_len
);
202 pno
->a_networks
[i
].authentication
= 0;
203 pno
->a_networks
[i
].encryption
= 0;
204 pno
->a_networks
[i
].bcast_nw_type
= 0;
206 /*Copying list of valid channel into request */
207 pno
->a_networks
[i
].channel_count
= nd_config
->n_channels
;
208 pno
->a_networks
[i
].rssi_threshold
= nd_config
->match_sets
[i
].rssi_thold
;
210 for (j
= 0; j
< nd_config
->n_channels
; j
++) {
211 pno
->a_networks
[i
].channels
[j
] =
212 nd_config
->channels
[j
]->center_freq
;
216 /* set scan to passive if no SSIDs are specified in the request */
217 if (nd_config
->n_ssids
== 0)
218 pno
->do_passive_scan
= true;
220 pno
->do_passive_scan
= false;
222 for (i
= 0; i
< nd_config
->n_ssids
; i
++) {
224 while (j
< pno
->uc_networks_count
) {
225 if (__le32_to_cpu(pno
->a_networks
[j
].ssid
.ssid_len
) ==
226 nd_config
->ssids
[i
].ssid_len
&&
227 (memcmp(pno
->a_networks
[j
].ssid
.ssid
,
228 nd_config
->ssids
[i
].ssid
,
229 __le32_to_cpu(pno
->a_networks
[j
].ssid
.ssid_len
)) == 0)) {
230 pno
->a_networks
[j
].bcast_nw_type
= BCAST_HIDDEN
;
237 if (nd_config
->n_scan_plans
== 2) {
238 pno
->fast_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
239 pno
->fast_scan_max_cycles
= nd_config
->scan_plans
[0].iterations
;
240 pno
->slow_scan_period
=
241 nd_config
->scan_plans
[1].interval
* MSEC_PER_SEC
;
242 } else if (nd_config
->n_scan_plans
== 1) {
243 pno
->fast_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
244 pno
->fast_scan_max_cycles
= 1;
245 pno
->slow_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
247 ath10k_warn(ar
, "Invalid number of scan plans %d !!",
248 nd_config
->n_scan_plans
);
251 if (nd_config
->flags
& NL80211_SCAN_FLAG_RANDOM_ADDR
) {
252 /* enable mac randomization */
253 pno
->enable_pno_scan_randomization
= 1;
254 memcpy(pno
->mac_addr
, nd_config
->mac_addr
, ETH_ALEN
);
255 memcpy(pno
->mac_addr_mask
, nd_config
->mac_addr_mask
, ETH_ALEN
);
258 pno
->delay_start_time
= nd_config
->delay
;
260 /* Current FW does not support min-max range for dwell time */
261 pno
->active_max_time
= WMI_ACTIVE_MAX_CHANNEL_TIME
;
262 pno
->passive_max_time
= WMI_PASSIVE_MAX_CHANNEL_TIME
;
266 static int ath10k_vif_wow_set_wakeups(struct ath10k_vif
*arvif
,
267 struct cfg80211_wowlan
*wowlan
)
270 unsigned long wow_mask
= 0;
271 struct ath10k
*ar
= arvif
->ar
;
272 const struct cfg80211_pkt_pattern
*patterns
= wowlan
->patterns
;
275 /* Setup requested WOW features */
276 switch (arvif
->vdev_type
) {
277 case WMI_VDEV_TYPE_IBSS
:
278 __set_bit(WOW_BEACON_EVENT
, &wow_mask
);
280 case WMI_VDEV_TYPE_AP
:
281 __set_bit(WOW_DEAUTH_RECVD_EVENT
, &wow_mask
);
282 __set_bit(WOW_DISASSOC_RECVD_EVENT
, &wow_mask
);
283 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT
, &wow_mask
);
284 __set_bit(WOW_AUTH_REQ_EVENT
, &wow_mask
);
285 __set_bit(WOW_ASSOC_REQ_EVENT
, &wow_mask
);
286 __set_bit(WOW_HTT_EVENT
, &wow_mask
);
287 __set_bit(WOW_RA_MATCH_EVENT
, &wow_mask
);
289 case WMI_VDEV_TYPE_STA
:
290 if (wowlan
->disconnect
) {
291 __set_bit(WOW_DEAUTH_RECVD_EVENT
, &wow_mask
);
292 __set_bit(WOW_DISASSOC_RECVD_EVENT
, &wow_mask
);
293 __set_bit(WOW_BMISS_EVENT
, &wow_mask
);
294 __set_bit(WOW_CSA_IE_EVENT
, &wow_mask
);
297 if (wowlan
->magic_pkt
)
298 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT
, &wow_mask
);
300 if (wowlan
->nd_config
) {
301 struct wmi_pno_scan_req
*pno
;
304 pno
= kzalloc(sizeof(*pno
), GFP_KERNEL
);
308 ar
->nlo_enabled
= true;
310 ret
= ath10k_wmi_pno_check(ar
, arvif
->vdev_id
,
311 wowlan
->nd_config
, pno
);
313 ath10k_wmi_wow_config_pno(ar
, arvif
->vdev_id
, pno
);
314 __set_bit(WOW_NLO_DETECTED_EVENT
, &wow_mask
);
324 for (i
= 0; i
< wowlan
->n_patterns
; i
++) {
325 u8 bitmask
[WOW_MAX_PATTERN_SIZE
] = {};
326 u8 ath_pattern
[WOW_MAX_PATTERN_SIZE
] = {};
327 u8 ath_bitmask
[WOW_MAX_PATTERN_SIZE
] = {};
328 struct cfg80211_pkt_pattern new_pattern
= {};
329 struct cfg80211_pkt_pattern old_pattern
= patterns
[i
];
332 new_pattern
.pattern
= ath_pattern
;
333 new_pattern
.mask
= ath_bitmask
;
334 if (patterns
[i
].pattern_len
> WOW_MAX_PATTERN_SIZE
)
336 /* convert bytemask to bitmask */
337 for (j
= 0; j
< patterns
[i
].pattern_len
; j
++)
338 if (patterns
[i
].mask
[j
/ 8] & BIT(j
% 8))
340 old_pattern
.mask
= bitmask
;
342 if (ar
->wmi
.rx_decap_mode
== ATH10K_HW_TXRX_NATIVE_WIFI
) {
343 if (patterns
[i
].pkt_offset
< ETH_HLEN
) {
344 ath10k_wow_convert_8023_to_80211(&new_pattern
,
347 new_pattern
= old_pattern
;
348 new_pattern
.pkt_offset
+= WOW_HDR_LEN
- ETH_HLEN
;
352 if (WARN_ON(new_pattern
.pattern_len
> WOW_MAX_PATTERN_SIZE
))
355 ret
= ath10k_wmi_wow_add_pattern(ar
, arvif
->vdev_id
,
359 new_pattern
.pattern_len
,
360 new_pattern
.pkt_offset
);
362 ath10k_warn(ar
, "failed to add pattern %i to vdev %i: %d\n",
364 arvif
->vdev_id
, ret
);
369 __set_bit(WOW_PATTERN_MATCH_EVENT
, &wow_mask
);
372 for (i
= 0; i
< WOW_EVENT_MAX
; i
++) {
373 if (!test_bit(i
, &wow_mask
))
375 ret
= ath10k_wmi_wow_add_wakeup_event(ar
, arvif
->vdev_id
, i
, 1);
377 ath10k_warn(ar
, "failed to enable wakeup event %s on vdev %i: %d\n",
378 wow_wakeup_event(i
), arvif
->vdev_id
, ret
);
386 static int ath10k_wow_set_wakeups(struct ath10k
*ar
,
387 struct cfg80211_wowlan
*wowlan
)
389 struct ath10k_vif
*arvif
;
392 lockdep_assert_held(&ar
->conf_mutex
);
394 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
395 ret
= ath10k_vif_wow_set_wakeups(arvif
, wowlan
);
397 ath10k_warn(ar
, "failed to set wow wakeups on vdev %i: %d\n",
398 arvif
->vdev_id
, ret
);
406 static int ath10k_vif_wow_clean_nlo(struct ath10k_vif
*arvif
)
409 struct ath10k
*ar
= arvif
->ar
;
411 switch (arvif
->vdev_type
) {
412 case WMI_VDEV_TYPE_STA
:
413 if (ar
->nlo_enabled
) {
414 struct wmi_pno_scan_req
*pno
;
416 pno
= kzalloc(sizeof(*pno
), GFP_KERNEL
);
421 ar
->nlo_enabled
= false;
422 ret
= ath10k_wmi_wow_config_pno(ar
, arvif
->vdev_id
, pno
);
432 static int ath10k_wow_nlo_cleanup(struct ath10k
*ar
)
434 struct ath10k_vif
*arvif
;
437 lockdep_assert_held(&ar
->conf_mutex
);
439 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
440 ret
= ath10k_vif_wow_clean_nlo(arvif
);
442 ath10k_warn(ar
, "failed to clean nlo settings on vdev %i: %d\n",
443 arvif
->vdev_id
, ret
);
451 static int ath10k_wow_enable(struct ath10k
*ar
)
455 lockdep_assert_held(&ar
->conf_mutex
);
457 reinit_completion(&ar
->target_suspend
);
459 ret
= ath10k_wmi_wow_enable(ar
);
461 ath10k_warn(ar
, "failed to issue wow enable: %d\n", ret
);
465 ret
= wait_for_completion_timeout(&ar
->target_suspend
, 3 * HZ
);
467 ath10k_warn(ar
, "timed out while waiting for suspend completion\n");
474 static int ath10k_wow_wakeup(struct ath10k
*ar
)
478 lockdep_assert_held(&ar
->conf_mutex
);
480 reinit_completion(&ar
->wow
.wakeup_completed
);
482 ret
= ath10k_wmi_wow_host_wakeup_ind(ar
);
484 ath10k_warn(ar
, "failed to send wow wakeup indication: %d\n",
489 ret
= wait_for_completion_timeout(&ar
->wow
.wakeup_completed
, 3 * HZ
);
491 ath10k_warn(ar
, "timed out while waiting for wow wakeup completion\n");
498 int ath10k_wow_op_suspend(struct ieee80211_hw
*hw
,
499 struct cfg80211_wowlan
*wowlan
)
501 struct ath10k
*ar
= hw
->priv
;
504 mutex_lock(&ar
->conf_mutex
);
506 if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
507 ar
->running_fw
->fw_file
.fw_features
))) {
512 ret
= ath10k_wow_cleanup(ar
);
514 ath10k_warn(ar
, "failed to clear wow wakeup events: %d\n",
519 ret
= ath10k_wow_set_wakeups(ar
, wowlan
);
521 ath10k_warn(ar
, "failed to set wow wakeup events: %d\n",
526 ath10k_mac_wait_tx_complete(ar
);
528 ret
= ath10k_wow_enable(ar
);
530 ath10k_warn(ar
, "failed to start wow: %d\n", ret
);
534 ret
= ath10k_hif_suspend(ar
);
536 ath10k_warn(ar
, "failed to suspend hif: %d\n", ret
);
543 ath10k_wow_wakeup(ar
);
546 ath10k_wow_cleanup(ar
);
549 mutex_unlock(&ar
->conf_mutex
);
553 void ath10k_wow_op_set_wakeup(struct ieee80211_hw
*hw
, bool enabled
)
555 struct ath10k
*ar
= hw
->priv
;
557 mutex_lock(&ar
->conf_mutex
);
558 if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
559 ar
->running_fw
->fw_file
.fw_features
)) {
560 device_set_wakeup_enable(ar
->dev
, enabled
);
562 mutex_unlock(&ar
->conf_mutex
);
565 int ath10k_wow_op_resume(struct ieee80211_hw
*hw
)
567 struct ath10k
*ar
= hw
->priv
;
570 mutex_lock(&ar
->conf_mutex
);
572 if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
573 ar
->running_fw
->fw_file
.fw_features
))) {
578 ret
= ath10k_hif_resume(ar
);
580 ath10k_warn(ar
, "failed to resume hif: %d\n", ret
);
584 ret
= ath10k_wow_wakeup(ar
);
586 ath10k_warn(ar
, "failed to wakeup from wow: %d\n", ret
);
588 ret
= ath10k_wow_nlo_cleanup(ar
);
590 ath10k_warn(ar
, "failed to cleanup nlo: %d\n", ret
);
595 case ATH10K_STATE_ON
:
596 ar
->state
= ATH10K_STATE_RESTARTING
;
599 case ATH10K_STATE_OFF
:
600 case ATH10K_STATE_RESTARTING
:
601 case ATH10K_STATE_RESTARTED
:
602 case ATH10K_STATE_UTF
:
603 case ATH10K_STATE_WEDGED
:
604 ath10k_warn(ar
, "encountered unexpected device state %d on resume, cannot recover\n",
611 mutex_unlock(&ar
->conf_mutex
);
615 int ath10k_wow_init(struct ath10k
*ar
)
617 if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
618 ar
->running_fw
->fw_file
.fw_features
))
621 if (WARN_ON(!test_bit(WMI_SERVICE_WOW
, ar
->wmi
.svc_map
)))
624 ar
->wow
.wowlan_support
= ath10k_wowlan_support
;
626 if (ar
->wmi
.rx_decap_mode
== ATH10K_HW_TXRX_NATIVE_WIFI
) {
627 ar
->wow
.wowlan_support
.pattern_max_len
-= WOW_MAX_REDUCE
;
628 ar
->wow
.wowlan_support
.max_pkt_offset
-= WOW_MAX_REDUCE
;
631 if (test_bit(WMI_SERVICE_NLO
, ar
->wmi
.svc_map
)) {
632 ar
->wow
.wowlan_support
.flags
|= WIPHY_WOWLAN_NET_DETECT
;
633 ar
->wow
.wowlan_support
.max_nd_match_sets
= WMI_PNO_MAX_SUPP_NETWORKS
;
636 ar
->wow
.wowlan_support
.n_patterns
= ar
->wow
.max_num_patterns
;
637 ar
->hw
->wiphy
->wowlan
= &ar
->wow
.wowlan_support
;
639 device_set_wakeup_capable(ar
->dev
, true);