1 // SPDX-License-Identifier: ISC
3 * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
4 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
9 #include <net/mac80211.h>
16 static const struct wiphy_wowlan_support ath10k_wowlan_support
= {
17 .flags
= WIPHY_WOWLAN_DISCONNECT
|
18 WIPHY_WOWLAN_MAGIC_PKT
,
19 .pattern_min_len
= WOW_MIN_PATTERN_SIZE
,
20 .pattern_max_len
= WOW_MAX_PATTERN_SIZE
,
21 .max_pkt_offset
= WOW_MAX_PKT_OFFSET
,
24 static int ath10k_wow_vif_cleanup(struct ath10k_vif
*arvif
)
26 struct ath10k
*ar
= arvif
->ar
;
29 for (i
= 0; i
< WOW_EVENT_MAX
; i
++) {
30 ret
= ath10k_wmi_wow_add_wakeup_event(ar
, arvif
->vdev_id
, i
, 0);
32 ath10k_warn(ar
, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
33 wow_wakeup_event(i
), arvif
->vdev_id
, ret
);
38 for (i
= 0; i
< ar
->wow
.max_num_patterns
; i
++) {
39 ret
= ath10k_wmi_wow_del_pattern(ar
, arvif
->vdev_id
, i
);
41 ath10k_warn(ar
, "failed to delete wow pattern %d for vdev %i: %d\n",
42 i
, arvif
->vdev_id
, ret
);
50 static int ath10k_wow_cleanup(struct ath10k
*ar
)
52 struct ath10k_vif
*arvif
;
55 lockdep_assert_held(&ar
->conf_mutex
);
57 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
58 ret
= ath10k_wow_vif_cleanup(arvif
);
60 ath10k_warn(ar
, "failed to clean wow wakeups on vdev %i: %d\n",
70 * Convert a 802.3 format to a 802.11 format.
71 * +------------+-----------+--------+----------------+
72 * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
73 * +------------+-----------+--------+----------------+
74 * |__ |_______ |____________ |________
76 * +--+------------+----+-----------+---------------+-----------+
77 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
78 * +--+------------+----+-----------+---------------+-----------+
80 static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern
*new,
81 const struct cfg80211_pkt_pattern
*old
)
83 u8 hdr_8023_pattern
[ETH_HLEN
] = {};
84 u8 hdr_8023_bit_mask
[ETH_HLEN
] = {};
85 u8 hdr_80211_pattern
[WOW_HDR_LEN
] = {};
86 u8 hdr_80211_bit_mask
[WOW_HDR_LEN
] = {};
88 int total_len
= old
->pkt_offset
+ old
->pattern_len
;
89 int hdr_80211_end_offset
;
91 struct ieee80211_hdr_3addr
*new_hdr_pattern
=
92 (struct ieee80211_hdr_3addr
*)hdr_80211_pattern
;
93 struct ieee80211_hdr_3addr
*new_hdr_mask
=
94 (struct ieee80211_hdr_3addr
*)hdr_80211_bit_mask
;
95 struct ethhdr
*old_hdr_pattern
= (struct ethhdr
*)hdr_8023_pattern
;
96 struct ethhdr
*old_hdr_mask
= (struct ethhdr
*)hdr_8023_bit_mask
;
97 int hdr_len
= sizeof(*new_hdr_pattern
);
99 struct rfc1042_hdr
*new_rfc_pattern
=
100 (struct rfc1042_hdr
*)(hdr_80211_pattern
+ hdr_len
);
101 struct rfc1042_hdr
*new_rfc_mask
=
102 (struct rfc1042_hdr
*)(hdr_80211_bit_mask
+ hdr_len
);
103 int rfc_len
= sizeof(*new_rfc_pattern
);
105 memcpy(hdr_8023_pattern
+ old
->pkt_offset
,
106 old
->pattern
, ETH_HLEN
- old
->pkt_offset
);
107 memcpy(hdr_8023_bit_mask
+ old
->pkt_offset
,
108 old
->mask
, ETH_HLEN
- old
->pkt_offset
);
110 /* Copy destination address */
111 memcpy(new_hdr_pattern
->addr1
, old_hdr_pattern
->h_dest
, ETH_ALEN
);
112 memcpy(new_hdr_mask
->addr1
, old_hdr_mask
->h_dest
, ETH_ALEN
);
114 /* Copy source address */
115 memcpy(new_hdr_pattern
->addr3
, old_hdr_pattern
->h_source
, ETH_ALEN
);
116 memcpy(new_hdr_mask
->addr3
, old_hdr_mask
->h_source
, ETH_ALEN
);
118 /* Copy logic link type */
119 memcpy(&new_rfc_pattern
->snap_type
,
120 &old_hdr_pattern
->h_proto
,
121 sizeof(old_hdr_pattern
->h_proto
));
122 memcpy(&new_rfc_mask
->snap_type
,
123 &old_hdr_mask
->h_proto
,
124 sizeof(old_hdr_mask
->h_proto
));
126 /* Calculate new pkt_offset */
127 if (old
->pkt_offset
< ETH_ALEN
)
128 new->pkt_offset
= old
->pkt_offset
+
129 offsetof(struct ieee80211_hdr_3addr
, addr1
);
130 else if (old
->pkt_offset
< offsetof(struct ethhdr
, h_proto
))
131 new->pkt_offset
= old
->pkt_offset
+
132 offsetof(struct ieee80211_hdr_3addr
, addr3
) -
133 offsetof(struct ethhdr
, h_source
);
135 new->pkt_offset
= old
->pkt_offset
+ hdr_len
+ rfc_len
- ETH_HLEN
;
137 /* Calculate new hdr end offset */
138 if (total_len
> ETH_HLEN
)
139 hdr_80211_end_offset
= hdr_len
+ rfc_len
;
140 else if (total_len
> offsetof(struct ethhdr
, h_proto
))
141 hdr_80211_end_offset
= hdr_len
+ rfc_len
+ total_len
- ETH_HLEN
;
142 else if (total_len
> ETH_ALEN
)
143 hdr_80211_end_offset
= total_len
- ETH_ALEN
+
144 offsetof(struct ieee80211_hdr_3addr
, addr3
);
146 hdr_80211_end_offset
= total_len
+
147 offsetof(struct ieee80211_hdr_3addr
, addr1
);
149 new->pattern_len
= hdr_80211_end_offset
- new->pkt_offset
;
151 memcpy((u8
*)new->pattern
,
152 hdr_80211_pattern
+ new->pkt_offset
,
154 memcpy((u8
*)new->mask
,
155 hdr_80211_bit_mask
+ new->pkt_offset
,
158 if (total_len
> ETH_HLEN
) {
159 /* Copy frame body */
160 memcpy((u8
*)new->pattern
+ new->pattern_len
,
161 (void *)old
->pattern
+ ETH_HLEN
- old
->pkt_offset
,
162 total_len
- ETH_HLEN
);
163 memcpy((u8
*)new->mask
+ new->pattern_len
,
164 (void *)old
->mask
+ ETH_HLEN
- old
->pkt_offset
,
165 total_len
- ETH_HLEN
);
167 new->pattern_len
+= total_len
- ETH_HLEN
;
171 static int ath10k_wmi_pno_check(struct ath10k
*ar
, u32 vdev_id
,
172 struct cfg80211_sched_scan_request
*nd_config
,
173 struct wmi_pno_scan_req
*pno
)
179 pno
->vdev_id
= vdev_id
;
180 pno
->uc_networks_count
= nd_config
->n_match_sets
;
182 if (!pno
->uc_networks_count
||
183 pno
->uc_networks_count
> WMI_PNO_MAX_SUPP_NETWORKS
)
186 if (nd_config
->n_channels
> WMI_PNO_MAX_NETW_CHANNELS_EX
)
189 /* Filling per profile params */
190 for (i
= 0; i
< pno
->uc_networks_count
; i
++) {
191 ssid_len
= nd_config
->match_sets
[i
].ssid
.ssid_len
;
193 if (ssid_len
== 0 || ssid_len
> 32)
196 pno
->a_networks
[i
].ssid
.ssid_len
= __cpu_to_le32(ssid_len
);
198 memcpy(pno
->a_networks
[i
].ssid
.ssid
,
199 nd_config
->match_sets
[i
].ssid
.ssid
,
200 nd_config
->match_sets
[i
].ssid
.ssid_len
);
201 pno
->a_networks
[i
].authentication
= 0;
202 pno
->a_networks
[i
].encryption
= 0;
203 pno
->a_networks
[i
].bcast_nw_type
= 0;
205 /*Copying list of valid channel into request */
206 pno
->a_networks
[i
].channel_count
= nd_config
->n_channels
;
207 pno
->a_networks
[i
].rssi_threshold
= nd_config
->match_sets
[i
].rssi_thold
;
209 for (j
= 0; j
< nd_config
->n_channels
; j
++) {
210 pno
->a_networks
[i
].channels
[j
] =
211 nd_config
->channels
[j
]->center_freq
;
215 /* set scan to passive if no SSIDs are specified in the request */
216 if (nd_config
->n_ssids
== 0)
217 pno
->do_passive_scan
= true;
219 pno
->do_passive_scan
= false;
221 for (i
= 0; i
< nd_config
->n_ssids
; i
++) {
223 while (j
< pno
->uc_networks_count
) {
224 if (__le32_to_cpu(pno
->a_networks
[j
].ssid
.ssid_len
) ==
225 nd_config
->ssids
[i
].ssid_len
&&
226 (memcmp(pno
->a_networks
[j
].ssid
.ssid
,
227 nd_config
->ssids
[i
].ssid
,
228 __le32_to_cpu(pno
->a_networks
[j
].ssid
.ssid_len
)) == 0)) {
229 pno
->a_networks
[j
].bcast_nw_type
= BCAST_HIDDEN
;
236 if (nd_config
->n_scan_plans
== 2) {
237 pno
->fast_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
238 pno
->fast_scan_max_cycles
= nd_config
->scan_plans
[0].iterations
;
239 pno
->slow_scan_period
=
240 nd_config
->scan_plans
[1].interval
* MSEC_PER_SEC
;
241 } else if (nd_config
->n_scan_plans
== 1) {
242 pno
->fast_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
243 pno
->fast_scan_max_cycles
= 1;
244 pno
->slow_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
246 ath10k_warn(ar
, "Invalid number of scan plans %d !!",
247 nd_config
->n_scan_plans
);
250 if (nd_config
->flags
& NL80211_SCAN_FLAG_RANDOM_ADDR
) {
251 /* enable mac randomization */
252 pno
->enable_pno_scan_randomization
= 1;
253 memcpy(pno
->mac_addr
, nd_config
->mac_addr
, ETH_ALEN
);
254 memcpy(pno
->mac_addr_mask
, nd_config
->mac_addr_mask
, ETH_ALEN
);
257 pno
->delay_start_time
= nd_config
->delay
;
259 /* Current FW does not support min-max range for dwell time */
260 pno
->active_max_time
= WMI_ACTIVE_MAX_CHANNEL_TIME
;
261 pno
->passive_max_time
= WMI_PASSIVE_MAX_CHANNEL_TIME
;
265 static int ath10k_vif_wow_set_wakeups(struct ath10k_vif
*arvif
,
266 struct cfg80211_wowlan
*wowlan
)
269 unsigned long wow_mask
= 0;
270 struct ath10k
*ar
= arvif
->ar
;
271 const struct cfg80211_pkt_pattern
*patterns
= wowlan
->patterns
;
274 /* Setup requested WOW features */
275 switch (arvif
->vdev_type
) {
276 case WMI_VDEV_TYPE_IBSS
:
277 __set_bit(WOW_BEACON_EVENT
, &wow_mask
);
279 case WMI_VDEV_TYPE_AP
:
280 __set_bit(WOW_DEAUTH_RECVD_EVENT
, &wow_mask
);
281 __set_bit(WOW_DISASSOC_RECVD_EVENT
, &wow_mask
);
282 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT
, &wow_mask
);
283 __set_bit(WOW_AUTH_REQ_EVENT
, &wow_mask
);
284 __set_bit(WOW_ASSOC_REQ_EVENT
, &wow_mask
);
285 __set_bit(WOW_HTT_EVENT
, &wow_mask
);
286 __set_bit(WOW_RA_MATCH_EVENT
, &wow_mask
);
288 case WMI_VDEV_TYPE_STA
:
289 if (wowlan
->disconnect
) {
290 __set_bit(WOW_DEAUTH_RECVD_EVENT
, &wow_mask
);
291 __set_bit(WOW_DISASSOC_RECVD_EVENT
, &wow_mask
);
292 __set_bit(WOW_BMISS_EVENT
, &wow_mask
);
293 __set_bit(WOW_CSA_IE_EVENT
, &wow_mask
);
296 if (wowlan
->magic_pkt
)
297 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT
, &wow_mask
);
299 if (wowlan
->nd_config
) {
300 struct wmi_pno_scan_req
*pno
;
303 pno
= kzalloc(sizeof(*pno
), GFP_KERNEL
);
307 ar
->nlo_enabled
= true;
309 ret
= ath10k_wmi_pno_check(ar
, arvif
->vdev_id
,
310 wowlan
->nd_config
, pno
);
312 ath10k_wmi_wow_config_pno(ar
, arvif
->vdev_id
, pno
);
313 __set_bit(WOW_NLO_DETECTED_EVENT
, &wow_mask
);
323 for (i
= 0; i
< wowlan
->n_patterns
; i
++) {
324 u8 bitmask
[WOW_MAX_PATTERN_SIZE
] = {};
325 u8 ath_pattern
[WOW_MAX_PATTERN_SIZE
] = {};
326 u8 ath_bitmask
[WOW_MAX_PATTERN_SIZE
] = {};
327 struct cfg80211_pkt_pattern new_pattern
= {};
328 struct cfg80211_pkt_pattern old_pattern
= patterns
[i
];
331 new_pattern
.pattern
= ath_pattern
;
332 new_pattern
.mask
= ath_bitmask
;
333 if (patterns
[i
].pattern_len
> WOW_MAX_PATTERN_SIZE
)
335 /* convert bytemask to bitmask */
336 for (j
= 0; j
< patterns
[i
].pattern_len
; j
++)
337 if (patterns
[i
].mask
[j
/ 8] & BIT(j
% 8))
339 old_pattern
.mask
= bitmask
;
340 new_pattern
= old_pattern
;
342 if (ar
->wmi
.rx_decap_mode
== ATH10K_HW_TXRX_NATIVE_WIFI
) {
343 if (patterns
[i
].pkt_offset
< ETH_HLEN
)
344 ath10k_wow_convert_8023_to_80211(&new_pattern
,
347 new_pattern
.pkt_offset
+= WOW_HDR_LEN
- ETH_HLEN
;
350 if (WARN_ON(new_pattern
.pattern_len
> WOW_MAX_PATTERN_SIZE
))
353 ret
= ath10k_wmi_wow_add_pattern(ar
, arvif
->vdev_id
,
357 new_pattern
.pattern_len
,
358 new_pattern
.pkt_offset
);
360 ath10k_warn(ar
, "failed to add pattern %i to vdev %i: %d\n",
362 arvif
->vdev_id
, ret
);
367 __set_bit(WOW_PATTERN_MATCH_EVENT
, &wow_mask
);
370 for (i
= 0; i
< WOW_EVENT_MAX
; i
++) {
371 if (!test_bit(i
, &wow_mask
))
373 ret
= ath10k_wmi_wow_add_wakeup_event(ar
, arvif
->vdev_id
, i
, 1);
375 ath10k_warn(ar
, "failed to enable wakeup event %s on vdev %i: %d\n",
376 wow_wakeup_event(i
), arvif
->vdev_id
, ret
);
384 static int ath10k_wow_set_wakeups(struct ath10k
*ar
,
385 struct cfg80211_wowlan
*wowlan
)
387 struct ath10k_vif
*arvif
;
390 lockdep_assert_held(&ar
->conf_mutex
);
392 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
393 ret
= ath10k_vif_wow_set_wakeups(arvif
, wowlan
);
395 ath10k_warn(ar
, "failed to set wow wakeups on vdev %i: %d\n",
396 arvif
->vdev_id
, ret
);
404 static int ath10k_vif_wow_clean_nlo(struct ath10k_vif
*arvif
)
407 struct ath10k
*ar
= arvif
->ar
;
409 switch (arvif
->vdev_type
) {
410 case WMI_VDEV_TYPE_STA
:
411 if (ar
->nlo_enabled
) {
412 struct wmi_pno_scan_req
*pno
;
414 pno
= kzalloc(sizeof(*pno
), GFP_KERNEL
);
419 ar
->nlo_enabled
= false;
420 ret
= ath10k_wmi_wow_config_pno(ar
, arvif
->vdev_id
, pno
);
430 static int ath10k_wow_nlo_cleanup(struct ath10k
*ar
)
432 struct ath10k_vif
*arvif
;
435 lockdep_assert_held(&ar
->conf_mutex
);
437 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
438 ret
= ath10k_vif_wow_clean_nlo(arvif
);
440 ath10k_warn(ar
, "failed to clean nlo settings on vdev %i: %d\n",
441 arvif
->vdev_id
, ret
);
449 static int ath10k_wow_enable(struct ath10k
*ar
)
453 lockdep_assert_held(&ar
->conf_mutex
);
455 reinit_completion(&ar
->target_suspend
);
457 ret
= ath10k_wmi_wow_enable(ar
);
459 ath10k_warn(ar
, "failed to issue wow enable: %d\n", ret
);
463 ret
= wait_for_completion_timeout(&ar
->target_suspend
, 3 * HZ
);
465 ath10k_warn(ar
, "timed out while waiting for suspend completion\n");
472 static int ath10k_wow_wakeup(struct ath10k
*ar
)
476 lockdep_assert_held(&ar
->conf_mutex
);
478 reinit_completion(&ar
->wow
.wakeup_completed
);
480 ret
= ath10k_wmi_wow_host_wakeup_ind(ar
);
482 ath10k_warn(ar
, "failed to send wow wakeup indication: %d\n",
487 ret
= wait_for_completion_timeout(&ar
->wow
.wakeup_completed
, 3 * HZ
);
489 ath10k_warn(ar
, "timed out while waiting for wow wakeup completion\n");
496 int ath10k_wow_op_suspend(struct ieee80211_hw
*hw
,
497 struct cfg80211_wowlan
*wowlan
)
499 struct ath10k
*ar
= hw
->priv
;
502 mutex_lock(&ar
->conf_mutex
);
504 if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
505 ar
->running_fw
->fw_file
.fw_features
))) {
510 ret
= ath10k_wow_cleanup(ar
);
512 ath10k_warn(ar
, "failed to clear wow wakeup events: %d\n",
517 ret
= ath10k_wow_set_wakeups(ar
, wowlan
);
519 ath10k_warn(ar
, "failed to set wow wakeup events: %d\n",
524 ath10k_mac_wait_tx_complete(ar
);
526 ret
= ath10k_wow_enable(ar
);
528 ath10k_warn(ar
, "failed to start wow: %d\n", ret
);
532 ret
= ath10k_hif_suspend(ar
);
534 ath10k_warn(ar
, "failed to suspend hif: %d\n", ret
);
541 ath10k_wow_wakeup(ar
);
544 ath10k_wow_cleanup(ar
);
547 mutex_unlock(&ar
->conf_mutex
);
551 void ath10k_wow_op_set_wakeup(struct ieee80211_hw
*hw
, bool enabled
)
553 struct ath10k
*ar
= hw
->priv
;
555 mutex_lock(&ar
->conf_mutex
);
556 if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
557 ar
->running_fw
->fw_file
.fw_features
)) {
558 device_set_wakeup_enable(ar
->dev
, enabled
);
560 mutex_unlock(&ar
->conf_mutex
);
563 int ath10k_wow_op_resume(struct ieee80211_hw
*hw
)
565 struct ath10k
*ar
= hw
->priv
;
568 mutex_lock(&ar
->conf_mutex
);
570 if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
571 ar
->running_fw
->fw_file
.fw_features
))) {
576 ret
= ath10k_hif_resume(ar
);
578 ath10k_warn(ar
, "failed to resume hif: %d\n", ret
);
582 ret
= ath10k_wow_wakeup(ar
);
584 ath10k_warn(ar
, "failed to wakeup from wow: %d\n", ret
);
586 ret
= ath10k_wow_nlo_cleanup(ar
);
588 ath10k_warn(ar
, "failed to cleanup nlo: %d\n", ret
);
593 case ATH10K_STATE_ON
:
594 ar
->state
= ATH10K_STATE_RESTARTING
;
597 case ATH10K_STATE_OFF
:
598 case ATH10K_STATE_RESTARTING
:
599 case ATH10K_STATE_RESTARTED
:
600 case ATH10K_STATE_UTF
:
601 case ATH10K_STATE_WEDGED
:
602 ath10k_warn(ar
, "encountered unexpected device state %d on resume, cannot recover\n",
609 mutex_unlock(&ar
->conf_mutex
);
613 int ath10k_wow_init(struct ath10k
*ar
)
615 if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT
,
616 ar
->running_fw
->fw_file
.fw_features
))
619 if (WARN_ON(!test_bit(WMI_SERVICE_WOW
, ar
->wmi
.svc_map
)))
622 ar
->wow
.wowlan_support
= ath10k_wowlan_support
;
624 if (ar
->wmi
.rx_decap_mode
== ATH10K_HW_TXRX_NATIVE_WIFI
) {
625 ar
->wow
.wowlan_support
.pattern_max_len
-= WOW_MAX_REDUCE
;
626 ar
->wow
.wowlan_support
.max_pkt_offset
-= WOW_MAX_REDUCE
;
629 if (test_bit(WMI_SERVICE_NLO
, ar
->wmi
.svc_map
)) {
630 ar
->wow
.wowlan_support
.flags
|= WIPHY_WOWLAN_NET_DETECT
;
631 ar
->wow
.wowlan_support
.max_nd_match_sets
= WMI_PNO_MAX_SUPP_NETWORKS
;
634 ar
->wow
.wowlan_support
.n_patterns
= ar
->wow
.max_num_patterns
;
635 ar
->hw
->wiphy
->wowlan
= &ar
->wow
.wowlan_support
;
637 device_set_wakeup_capable(ar
->dev
, true);