1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2020 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 #include <linux/delay.h>
8 #include <linux/inetdevice.h>
9 #include <net/addrconf.h>
10 #include <net/if_inet6.h>
15 #include <net/mac80211.h>
22 static const struct wiphy_wowlan_support ath12k_wowlan_support
= {
23 .flags
= WIPHY_WOWLAN_DISCONNECT
|
24 WIPHY_WOWLAN_MAGIC_PKT
|
25 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY
|
26 WIPHY_WOWLAN_GTK_REKEY_FAILURE
,
27 .pattern_min_len
= WOW_MIN_PATTERN_SIZE
,
28 .pattern_max_len
= WOW_MAX_PATTERN_SIZE
,
29 .max_pkt_offset
= WOW_MAX_PKT_OFFSET
,
32 static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif
*ahvif
)
34 return (ahvif
->vdev_subtype
== WMI_VDEV_SUBTYPE_P2P_DEVICE
||
35 ahvif
->vdev_subtype
== WMI_VDEV_SUBTYPE_P2P_CLIENT
||
36 ahvif
->vdev_subtype
== WMI_VDEV_SUBTYPE_P2P_GO
);
39 int ath12k_wow_enable(struct ath12k
*ar
)
41 struct ath12k_base
*ab
= ar
->ab
;
44 clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE
, &ab
->dev_flags
);
46 /* The firmware might be busy and it can not enter WoW immediately.
47 * In that case firmware notifies host with
48 * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
49 * later. Per the firmware team there could be up to 10 loops.
51 for (i
= 0; i
< ATH12K_WOW_RETRY_NUM
; i
++) {
52 reinit_completion(&ab
->htc_suspend
);
54 ret
= ath12k_wmi_wow_enable(ar
);
56 ath12k_warn(ab
, "failed to issue wow enable: %d\n", ret
);
60 ret
= wait_for_completion_timeout(&ab
->htc_suspend
, 3 * HZ
);
63 "timed out while waiting for htc suspend completion\n");
67 if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE
, &ab
->dev_flags
))
68 /* success, suspend complete received */
71 ath12k_warn(ab
, "htc suspend not complete, retrying (try %d)\n",
73 msleep(ATH12K_WOW_RETRY_WAIT_MS
);
76 ath12k_warn(ab
, "htc suspend not complete, failing after %d tries\n", i
);
81 int ath12k_wow_wakeup(struct ath12k
*ar
)
83 struct ath12k_base
*ab
= ar
->ab
;
86 reinit_completion(&ab
->wow
.wakeup_completed
);
88 ret
= ath12k_wmi_wow_host_wakeup_ind(ar
);
90 ath12k_warn(ab
, "failed to send wow wakeup indication: %d\n",
95 ret
= wait_for_completion_timeout(&ab
->wow
.wakeup_completed
, 3 * HZ
);
97 ath12k_warn(ab
, "timed out while waiting for wow wakeup completion\n");
104 static int ath12k_wow_vif_cleanup(struct ath12k_link_vif
*arvif
)
106 struct ath12k
*ar
= arvif
->ar
;
109 for (i
= 0; i
< WOW_EVENT_MAX
; i
++) {
110 ret
= ath12k_wmi_wow_add_wakeup_event(ar
, arvif
->vdev_id
, i
, 0);
112 ath12k_warn(ar
->ab
, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
113 wow_wakeup_event(i
), arvif
->vdev_id
, ret
);
118 for (i
= 0; i
< ar
->wow
.max_num_patterns
; i
++) {
119 ret
= ath12k_wmi_wow_del_pattern(ar
, arvif
->vdev_id
, i
);
121 ath12k_warn(ar
->ab
, "failed to delete wow pattern %d for vdev %i: %d\n",
122 i
, arvif
->vdev_id
, ret
);
130 static int ath12k_wow_cleanup(struct ath12k
*ar
)
132 struct ath12k_link_vif
*arvif
;
135 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
137 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
138 ret
= ath12k_wow_vif_cleanup(arvif
);
140 ath12k_warn(ar
->ab
, "failed to clean wow wakeups on vdev %i: %d\n",
141 arvif
->vdev_id
, ret
);
149 /* Convert a 802.3 format to a 802.11 format.
150 * +------------+-----------+--------+----------------+
151 * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
152 * +------------+-----------+--------+----------------+
153 * |__ |_______ |____________ |________
155 * +--+------------+----+-----------+---------------+-----------+
156 * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
157 * +--+------------+----+-----------+---------------+-----------+
160 ath12k_wow_convert_8023_to_80211(struct ath12k
*ar
,
161 const struct cfg80211_pkt_pattern
*eth_pattern
,
162 struct ath12k_pkt_pattern
*i80211_pattern
)
164 size_t r1042_eth_ofs
= offsetof(struct rfc1042_hdr
, eth_type
);
165 size_t a1_ofs
= offsetof(struct ieee80211_hdr_3addr
, addr1
);
166 size_t a3_ofs
= offsetof(struct ieee80211_hdr_3addr
, addr3
);
167 size_t i80211_hdr_len
= sizeof(struct ieee80211_hdr_3addr
);
168 size_t prot_ofs
= offsetof(struct ethhdr
, h_proto
);
169 size_t src_ofs
= offsetof(struct ethhdr
, h_source
);
170 u8 eth_bytemask
[WOW_MAX_PATTERN_SIZE
] = {};
171 const u8
*eth_pat
= eth_pattern
->pattern
;
172 size_t eth_pat_len
= eth_pattern
->pattern_len
;
173 size_t eth_pkt_ofs
= eth_pattern
->pkt_offset
;
174 u8
*bytemask
= i80211_pattern
->bytemask
;
175 u8
*pat
= i80211_pattern
->pattern
;
181 /* convert bitmask to bytemask */
182 for (i
= 0; i
< eth_pat_len
; i
++)
183 if (eth_pattern
->mask
[i
/ 8] & BIT(i
% 8))
184 eth_bytemask
[i
] = 0xff;
186 if (eth_pkt_ofs
< ETH_ALEN
) {
187 pkt_ofs
= eth_pkt_ofs
+ a1_ofs
;
189 if (size_add(eth_pkt_ofs
, eth_pat_len
) < ETH_ALEN
) {
190 memcpy(pat
, eth_pat
, eth_pat_len
);
191 memcpy(bytemask
, eth_bytemask
, eth_pat_len
);
193 pat_len
= eth_pat_len
;
194 } else if (size_add(eth_pkt_ofs
, eth_pat_len
) < prot_ofs
) {
195 memcpy(pat
, eth_pat
, ETH_ALEN
- eth_pkt_ofs
);
196 memcpy(bytemask
, eth_bytemask
, ETH_ALEN
- eth_pkt_ofs
);
198 delta
= eth_pkt_ofs
+ eth_pat_len
- src_ofs
;
199 memcpy(pat
+ a3_ofs
- pkt_ofs
,
200 eth_pat
+ ETH_ALEN
- eth_pkt_ofs
,
202 memcpy(bytemask
+ a3_ofs
- pkt_ofs
,
203 eth_bytemask
+ ETH_ALEN
- eth_pkt_ofs
,
206 pat_len
= a3_ofs
- pkt_ofs
+ delta
;
208 memcpy(pat
, eth_pat
, ETH_ALEN
- eth_pkt_ofs
);
209 memcpy(bytemask
, eth_bytemask
, ETH_ALEN
- eth_pkt_ofs
);
211 memcpy(pat
+ a3_ofs
- pkt_ofs
,
212 eth_pat
+ ETH_ALEN
- eth_pkt_ofs
,
214 memcpy(bytemask
+ a3_ofs
- pkt_ofs
,
215 eth_bytemask
+ ETH_ALEN
- eth_pkt_ofs
,
218 delta
= eth_pkt_ofs
+ eth_pat_len
- prot_ofs
;
219 memcpy(pat
+ i80211_hdr_len
+ r1042_eth_ofs
- pkt_ofs
,
220 eth_pat
+ prot_ofs
- eth_pkt_ofs
,
222 memcpy(bytemask
+ i80211_hdr_len
+ r1042_eth_ofs
- pkt_ofs
,
223 eth_bytemask
+ prot_ofs
- eth_pkt_ofs
,
226 pat_len
= i80211_hdr_len
+ r1042_eth_ofs
- pkt_ofs
+ delta
;
228 } else if (eth_pkt_ofs
< prot_ofs
) {
229 pkt_ofs
= eth_pkt_ofs
- ETH_ALEN
+ a3_ofs
;
231 if (size_add(eth_pkt_ofs
, eth_pat_len
) < prot_ofs
) {
232 memcpy(pat
, eth_pat
, eth_pat_len
);
233 memcpy(bytemask
, eth_bytemask
, eth_pat_len
);
235 pat_len
= eth_pat_len
;
237 memcpy(pat
, eth_pat
, prot_ofs
- eth_pkt_ofs
);
238 memcpy(bytemask
, eth_bytemask
, prot_ofs
- eth_pkt_ofs
);
240 delta
= eth_pkt_ofs
+ eth_pat_len
- prot_ofs
;
241 memcpy(pat
+ i80211_hdr_len
+ r1042_eth_ofs
- pkt_ofs
,
242 eth_pat
+ prot_ofs
- eth_pkt_ofs
,
244 memcpy(bytemask
+ i80211_hdr_len
+ r1042_eth_ofs
- pkt_ofs
,
245 eth_bytemask
+ prot_ofs
- eth_pkt_ofs
,
248 pat_len
= i80211_hdr_len
+ r1042_eth_ofs
- pkt_ofs
+ delta
;
251 pkt_ofs
= eth_pkt_ofs
- prot_ofs
+ i80211_hdr_len
+ r1042_eth_ofs
;
253 memcpy(pat
, eth_pat
, eth_pat_len
);
254 memcpy(bytemask
, eth_bytemask
, eth_pat_len
);
256 pat_len
= eth_pat_len
;
259 i80211_pattern
->pattern_len
= pat_len
;
260 i80211_pattern
->pkt_offset
= pkt_ofs
;
264 ath12k_wow_pno_check_and_convert(struct ath12k
*ar
, u32 vdev_id
,
265 const struct cfg80211_sched_scan_request
*nd_config
,
266 struct wmi_pno_scan_req_arg
*pno
)
272 pno
->vdev_id
= vdev_id
;
273 pno
->uc_networks_count
= nd_config
->n_match_sets
;
275 if (!pno
->uc_networks_count
||
276 pno
->uc_networks_count
> WMI_PNO_MAX_SUPP_NETWORKS
)
279 if (nd_config
->n_channels
> WMI_PNO_MAX_NETW_CHANNELS_EX
)
282 /* Filling per profile params */
283 for (i
= 0; i
< pno
->uc_networks_count
; i
++) {
284 ssid_len
= nd_config
->match_sets
[i
].ssid
.ssid_len
;
286 if (ssid_len
== 0 || ssid_len
> 32)
289 pno
->a_networks
[i
].ssid
.ssid_len
= ssid_len
;
291 memcpy(pno
->a_networks
[i
].ssid
.ssid
,
292 nd_config
->match_sets
[i
].ssid
.ssid
,
294 pno
->a_networks
[i
].authentication
= 0;
295 pno
->a_networks
[i
].encryption
= 0;
296 pno
->a_networks
[i
].bcast_nw_type
= 0;
298 /* Copying list of valid channel into request */
299 pno
->a_networks
[i
].channel_count
= nd_config
->n_channels
;
300 pno
->a_networks
[i
].rssi_threshold
= nd_config
->match_sets
[i
].rssi_thold
;
302 for (j
= 0; j
< nd_config
->n_channels
; j
++) {
303 pno
->a_networks
[i
].channels
[j
] =
304 nd_config
->channels
[j
]->center_freq
;
308 /* set scan to passive if no SSIDs are specified in the request */
309 if (nd_config
->n_ssids
== 0)
310 pno
->do_passive_scan
= true;
312 pno
->do_passive_scan
= false;
314 for (i
= 0; i
< nd_config
->n_ssids
; i
++) {
315 for (j
= 0; j
< pno
->uc_networks_count
; j
++) {
316 if (pno
->a_networks
[j
].ssid
.ssid_len
==
317 nd_config
->ssids
[i
].ssid_len
&&
318 !memcmp(pno
->a_networks
[j
].ssid
.ssid
,
319 nd_config
->ssids
[i
].ssid
,
320 pno
->a_networks
[j
].ssid
.ssid_len
)) {
321 pno
->a_networks
[j
].bcast_nw_type
= BCAST_HIDDEN
;
327 if (nd_config
->n_scan_plans
== 2) {
328 pno
->fast_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
329 pno
->fast_scan_max_cycles
= nd_config
->scan_plans
[0].iterations
;
330 pno
->slow_scan_period
=
331 nd_config
->scan_plans
[1].interval
* MSEC_PER_SEC
;
332 } else if (nd_config
->n_scan_plans
== 1) {
333 pno
->fast_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
334 pno
->fast_scan_max_cycles
= 1;
335 pno
->slow_scan_period
= nd_config
->scan_plans
[0].interval
* MSEC_PER_SEC
;
337 ath12k_warn(ar
->ab
, "Invalid number of PNO scan plans: %d",
338 nd_config
->n_scan_plans
);
341 if (nd_config
->flags
& NL80211_SCAN_FLAG_RANDOM_ADDR
) {
342 /* enable mac randomization */
343 pno
->enable_pno_scan_randomization
= 1;
344 memcpy(pno
->mac_addr
, nd_config
->mac_addr
, ETH_ALEN
);
345 memcpy(pno
->mac_addr_mask
, nd_config
->mac_addr_mask
, ETH_ALEN
);
348 pno
->delay_start_time
= nd_config
->delay
;
350 /* Current FW does not support min-max range for dwell time */
351 pno
->active_max_time
= WMI_ACTIVE_MAX_CHANNEL_TIME
;
352 pno
->passive_max_time
= WMI_PASSIVE_MAX_CHANNEL_TIME
;
357 static int ath12k_wow_vif_set_wakeups(struct ath12k_link_vif
*arvif
,
358 struct cfg80211_wowlan
*wowlan
)
360 const struct cfg80211_pkt_pattern
*patterns
= wowlan
->patterns
;
361 struct ath12k
*ar
= arvif
->ar
;
362 unsigned long wow_mask
= 0;
366 /* Setup requested WOW features */
367 switch (arvif
->ahvif
->vdev_type
) {
368 case WMI_VDEV_TYPE_IBSS
:
369 __set_bit(WOW_BEACON_EVENT
, &wow_mask
);
371 case WMI_VDEV_TYPE_AP
:
372 __set_bit(WOW_DEAUTH_RECVD_EVENT
, &wow_mask
);
373 __set_bit(WOW_DISASSOC_RECVD_EVENT
, &wow_mask
);
374 __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT
, &wow_mask
);
375 __set_bit(WOW_AUTH_REQ_EVENT
, &wow_mask
);
376 __set_bit(WOW_ASSOC_REQ_EVENT
, &wow_mask
);
377 __set_bit(WOW_HTT_EVENT
, &wow_mask
);
378 __set_bit(WOW_RA_MATCH_EVENT
, &wow_mask
);
380 case WMI_VDEV_TYPE_STA
:
381 if (wowlan
->disconnect
) {
382 __set_bit(WOW_DEAUTH_RECVD_EVENT
, &wow_mask
);
383 __set_bit(WOW_DISASSOC_RECVD_EVENT
, &wow_mask
);
384 __set_bit(WOW_BMISS_EVENT
, &wow_mask
);
385 __set_bit(WOW_CSA_IE_EVENT
, &wow_mask
);
388 if (wowlan
->magic_pkt
)
389 __set_bit(WOW_MAGIC_PKT_RECVD_EVENT
, &wow_mask
);
391 if (wowlan
->nd_config
) {
392 struct wmi_pno_scan_req_arg
*pno
;
395 pno
= kzalloc(sizeof(*pno
), GFP_KERNEL
);
399 ar
->nlo_enabled
= true;
401 ret
= ath12k_wow_pno_check_and_convert(ar
, arvif
->vdev_id
,
402 wowlan
->nd_config
, pno
);
404 ath12k_wmi_wow_config_pno(ar
, arvif
->vdev_id
, pno
);
405 __set_bit(WOW_NLO_DETECTED_EVENT
, &wow_mask
);
415 for (i
= 0; i
< wowlan
->n_patterns
; i
++) {
416 const struct cfg80211_pkt_pattern
*eth_pattern
= &patterns
[i
];
417 struct ath12k_pkt_pattern new_pattern
= {};
419 if (WARN_ON(eth_pattern
->pattern_len
> WOW_MAX_PATTERN_SIZE
))
422 if (ar
->ab
->wow
.wmi_conf_rx_decap_mode
==
423 ATH12K_HW_TXRX_NATIVE_WIFI
) {
424 ath12k_wow_convert_8023_to_80211(ar
, eth_pattern
,
427 if (WARN_ON(new_pattern
.pattern_len
> WOW_MAX_PATTERN_SIZE
))
430 memcpy(new_pattern
.pattern
, eth_pattern
->pattern
,
431 eth_pattern
->pattern_len
);
433 /* convert bitmask to bytemask */
434 for (j
= 0; j
< eth_pattern
->pattern_len
; j
++)
435 if (eth_pattern
->mask
[j
/ 8] & BIT(j
% 8))
436 new_pattern
.bytemask
[j
] = 0xff;
438 new_pattern
.pattern_len
= eth_pattern
->pattern_len
;
439 new_pattern
.pkt_offset
= eth_pattern
->pkt_offset
;
442 ret
= ath12k_wmi_wow_add_pattern(ar
, arvif
->vdev_id
,
445 new_pattern
.bytemask
,
446 new_pattern
.pattern_len
,
447 new_pattern
.pkt_offset
);
449 ath12k_warn(ar
->ab
, "failed to add pattern %i to vdev %i: %d\n",
451 arvif
->vdev_id
, ret
);
456 __set_bit(WOW_PATTERN_MATCH_EVENT
, &wow_mask
);
459 for (i
= 0; i
< WOW_EVENT_MAX
; i
++) {
460 if (!test_bit(i
, &wow_mask
))
462 ret
= ath12k_wmi_wow_add_wakeup_event(ar
, arvif
->vdev_id
, i
, 1);
464 ath12k_warn(ar
->ab
, "failed to enable wakeup event %s on vdev %i: %d\n",
465 wow_wakeup_event(i
), arvif
->vdev_id
, ret
);
473 static int ath12k_wow_set_wakeups(struct ath12k
*ar
,
474 struct cfg80211_wowlan
*wowlan
)
476 struct ath12k_link_vif
*arvif
;
479 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
481 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
482 if (ath12k_wow_is_p2p_vdev(arvif
->ahvif
))
484 ret
= ath12k_wow_vif_set_wakeups(arvif
, wowlan
);
486 ath12k_warn(ar
->ab
, "failed to set wow wakeups on vdev %i: %d\n",
487 arvif
->vdev_id
, ret
);
495 static int ath12k_wow_vdev_clean_nlo(struct ath12k
*ar
, u32 vdev_id
)
497 struct wmi_pno_scan_req_arg
*pno
;
500 if (!ar
->nlo_enabled
)
503 pno
= kzalloc(sizeof(*pno
), GFP_KERNEL
);
508 ret
= ath12k_wmi_wow_config_pno(ar
, vdev_id
, pno
);
510 ath12k_warn(ar
->ab
, "failed to disable PNO: %d", ret
);
514 ar
->nlo_enabled
= false;
521 static int ath12k_wow_vif_clean_nlo(struct ath12k_link_vif
*arvif
)
523 struct ath12k
*ar
= arvif
->ar
;
525 switch (arvif
->ahvif
->vdev_type
) {
526 case WMI_VDEV_TYPE_STA
:
527 return ath12k_wow_vdev_clean_nlo(ar
, arvif
->vdev_id
);
533 static int ath12k_wow_nlo_cleanup(struct ath12k
*ar
)
535 struct ath12k_link_vif
*arvif
;
538 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
540 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
541 if (ath12k_wow_is_p2p_vdev(arvif
->ahvif
))
544 ret
= ath12k_wow_vif_clean_nlo(arvif
);
546 ath12k_warn(ar
->ab
, "failed to clean nlo settings on vdev %i: %d\n",
547 arvif
->vdev_id
, ret
);
555 static int ath12k_wow_set_hw_filter(struct ath12k
*ar
)
557 struct wmi_hw_data_filter_arg arg
;
558 struct ath12k_link_vif
*arvif
;
561 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
563 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
564 if (arvif
->ahvif
->vdev_type
!= WMI_VDEV_TYPE_STA
)
567 arg
.vdev_id
= arvif
->vdev_id
;
569 arg
.hw_filter_bitmap
= WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC
;
570 ret
= ath12k_wmi_hw_data_filter_cmd(ar
, &arg
);
572 ath12k_warn(ar
->ab
, "failed to set hw data filter on vdev %i: %d\n",
573 arvif
->vdev_id
, ret
);
581 static int ath12k_wow_clear_hw_filter(struct ath12k
*ar
)
583 struct wmi_hw_data_filter_arg arg
;
584 struct ath12k_link_vif
*arvif
;
587 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
589 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
590 if (arvif
->ahvif
->vdev_type
!= WMI_VDEV_TYPE_STA
)
593 arg
.vdev_id
= arvif
->vdev_id
;
595 arg
.hw_filter_bitmap
= 0;
596 ret
= ath12k_wmi_hw_data_filter_cmd(ar
, &arg
);
599 ath12k_warn(ar
->ab
, "failed to clear hw data filter on vdev %i: %d\n",
600 arvif
->vdev_id
, ret
);
608 static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base
*ab
,
609 struct wmi_arp_ns_offload_arg
*offload
)
613 for (i
= 0; i
< offload
->ipv6_count
; i
++) {
614 offload
->self_ipv6_addr
[i
][0] = 0xff;
615 offload
->self_ipv6_addr
[i
][1] = 0x02;
616 offload
->self_ipv6_addr
[i
][11] = 0x01;
617 offload
->self_ipv6_addr
[i
][12] = 0xff;
618 offload
->self_ipv6_addr
[i
][13] =
619 offload
->ipv6_addr
[i
][13];
620 offload
->self_ipv6_addr
[i
][14] =
621 offload
->ipv6_addr
[i
][14];
622 offload
->self_ipv6_addr
[i
][15] =
623 offload
->ipv6_addr
[i
][15];
624 ath12k_dbg(ab
, ATH12K_DBG_WOW
, "NS solicited addr %pI6\n",
625 offload
->self_ipv6_addr
[i
]);
629 static void ath12k_wow_prepare_ns_offload(struct ath12k_link_vif
*arvif
,
630 struct wmi_arp_ns_offload_arg
*offload
)
632 struct net_device
*ndev
= ieee80211_vif_to_wdev(arvif
->ahvif
->vif
)->netdev
;
633 struct ath12k_base
*ab
= arvif
->ar
->ab
;
634 struct inet6_ifaddr
*ifa6
;
635 struct ifacaddr6
*ifaca6
;
636 struct inet6_dev
*idev
;
637 u32 count
= 0, scope
;
642 idev
= in6_dev_get(ndev
);
646 ath12k_dbg(ab
, ATH12K_DBG_WOW
, "wow prepare ns offload\n");
648 read_lock_bh(&idev
->lock
);
650 /* get unicast address */
651 list_for_each_entry(ifa6
, &idev
->addr_list
, if_list
) {
652 if (count
>= WMI_IPV6_MAX_COUNT
)
655 if (ifa6
->flags
& IFA_F_DADFAILED
)
658 scope
= ipv6_addr_src_scope(&ifa6
->addr
);
659 if (scope
!= IPV6_ADDR_SCOPE_LINKLOCAL
&&
660 scope
!= IPV6_ADDR_SCOPE_GLOBAL
) {
661 ath12k_dbg(ab
, ATH12K_DBG_WOW
,
662 "Unsupported ipv6 scope: %d\n", scope
);
666 memcpy(offload
->ipv6_addr
[count
], &ifa6
->addr
.s6_addr
,
667 sizeof(ifa6
->addr
.s6_addr
));
668 offload
->ipv6_type
[count
] = WMI_IPV6_UC_TYPE
;
669 ath12k_dbg(ab
, ATH12K_DBG_WOW
, "mac count %d ipv6 uc %pI6 scope %d\n",
670 count
, offload
->ipv6_addr
[count
],
675 /* get anycast address */
678 for (ifaca6
= rcu_dereference(idev
->ac_list
); ifaca6
;
679 ifaca6
= rcu_dereference(ifaca6
->aca_next
)) {
680 if (count
>= WMI_IPV6_MAX_COUNT
) {
685 scope
= ipv6_addr_src_scope(&ifaca6
->aca_addr
);
686 if (scope
!= IPV6_ADDR_SCOPE_LINKLOCAL
&&
687 scope
!= IPV6_ADDR_SCOPE_GLOBAL
) {
688 ath12k_dbg(ab
, ATH12K_DBG_WOW
,
689 "Unsupported ipv scope: %d\n", scope
);
693 memcpy(offload
->ipv6_addr
[count
], &ifaca6
->aca_addr
,
694 sizeof(ifaca6
->aca_addr
));
695 offload
->ipv6_type
[count
] = WMI_IPV6_AC_TYPE
;
696 ath12k_dbg(ab
, ATH12K_DBG_WOW
, "mac count %d ipv6 ac %pI6 scope %d\n",
697 count
, offload
->ipv6_addr
[count
],
705 read_unlock_bh(&idev
->lock
);
709 offload
->ipv6_count
= count
;
710 ath12k_wow_generate_ns_mc_addr(ab
, offload
);
713 static void ath12k_wow_prepare_arp_offload(struct ath12k_link_vif
*arvif
,
714 struct wmi_arp_ns_offload_arg
*offload
)
716 struct ieee80211_vif
*vif
= arvif
->ahvif
->vif
;
717 struct ieee80211_vif_cfg vif_cfg
= vif
->cfg
;
718 struct ath12k_base
*ab
= arvif
->ar
->ab
;
721 ath12k_dbg(ab
, ATH12K_DBG_WOW
, "wow prepare arp offload\n");
723 ipv4_cnt
= min(vif_cfg
.arp_addr_cnt
, WMI_IPV4_MAX_COUNT
);
724 memcpy(offload
->ipv4_addr
, vif_cfg
.arp_addr_list
, ipv4_cnt
* sizeof(u32
));
725 offload
->ipv4_count
= ipv4_cnt
;
727 ath12k_dbg(ab
, ATH12K_DBG_WOW
,
728 "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
729 vif_cfg
.arp_addr_cnt
, vif
->addr
, offload
->ipv4_addr
);
732 static int ath12k_wow_arp_ns_offload(struct ath12k
*ar
, bool enable
)
734 struct wmi_arp_ns_offload_arg
*offload
;
735 struct ath12k_link_vif
*arvif
;
736 struct ath12k_vif
*ahvif
;
739 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
741 offload
= kmalloc(sizeof(*offload
), GFP_KERNEL
);
745 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
746 ahvif
= arvif
->ahvif
;
748 if (ahvif
->vdev_type
!= WMI_VDEV_TYPE_STA
)
751 memset(offload
, 0, sizeof(*offload
));
753 memcpy(offload
->mac_addr
, ahvif
->vif
->addr
, ETH_ALEN
);
754 ath12k_wow_prepare_ns_offload(arvif
, offload
);
755 ath12k_wow_prepare_arp_offload(arvif
, offload
);
757 ret
= ath12k_wmi_arp_ns_offload(ar
, arvif
, offload
, enable
);
759 ath12k_warn(ar
->ab
, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
760 arvif
->vdev_id
, enable
, ret
);
770 static int ath12k_gtk_rekey_offload(struct ath12k
*ar
, bool enable
)
772 struct ath12k_link_vif
*arvif
;
775 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
777 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
778 if (arvif
->ahvif
->vdev_type
!= WMI_VDEV_TYPE_STA
||
780 !arvif
->rekey_data
.enable_offload
)
783 /* get rekey info before disable rekey offload */
785 ret
= ath12k_wmi_gtk_rekey_getinfo(ar
, arvif
);
787 ath12k_warn(ar
->ab
, "failed to request rekey info vdev %i, ret %d\n",
788 arvif
->vdev_id
, ret
);
793 ret
= ath12k_wmi_gtk_rekey_offload(ar
, arvif
, enable
);
796 ath12k_warn(ar
->ab
, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
797 arvif
->vdev_id
, enable
, ret
);
805 static int ath12k_wow_protocol_offload(struct ath12k
*ar
, bool enable
)
809 ret
= ath12k_wow_arp_ns_offload(ar
, enable
);
811 ath12k_warn(ar
->ab
, "failed to offload ARP and NS %d %d\n",
816 ret
= ath12k_gtk_rekey_offload(ar
, enable
);
818 ath12k_warn(ar
->ab
, "failed to offload gtk rekey %d %d\n",
826 static int ath12k_wow_set_keepalive(struct ath12k
*ar
,
827 enum wmi_sta_keepalive_method method
,
830 struct ath12k_link_vif
*arvif
;
833 lockdep_assert_wiphy(ath12k_ar_to_hw(ar
)->wiphy
);
835 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
836 ret
= ath12k_mac_vif_set_keepalive(arvif
, method
, interval
);
844 int ath12k_wow_op_suspend(struct ieee80211_hw
*hw
,
845 struct cfg80211_wowlan
*wowlan
)
847 struct ath12k_hw
*ah
= ath12k_hw_to_ah(hw
);
848 struct ath12k
*ar
= ath12k_ah_to_ar(ah
, 0);
851 lockdep_assert_wiphy(hw
->wiphy
);
853 ret
= ath12k_wow_cleanup(ar
);
855 ath12k_warn(ar
->ab
, "failed to clear wow wakeup events: %d\n",
860 ret
= ath12k_wow_set_wakeups(ar
, wowlan
);
862 ath12k_warn(ar
->ab
, "failed to set wow wakeup events: %d\n",
867 ret
= ath12k_wow_protocol_offload(ar
, true);
869 ath12k_warn(ar
->ab
, "failed to set wow protocol offload events: %d\n",
874 ret
= ath12k_mac_wait_tx_complete(ar
);
876 ath12k_warn(ar
->ab
, "failed to wait tx complete: %d\n", ret
);
880 ret
= ath12k_wow_set_hw_filter(ar
);
882 ath12k_warn(ar
->ab
, "failed to set hw filter: %d\n",
887 ret
= ath12k_wow_set_keepalive(ar
,
888 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME
,
889 WMI_STA_KEEPALIVE_INTERVAL_DEFAULT
);
891 ath12k_warn(ar
->ab
, "failed to enable wow keepalive: %d\n", ret
);
895 ret
= ath12k_wow_enable(ar
);
897 ath12k_warn(ar
->ab
, "failed to start wow: %d\n", ret
);
901 ath12k_hif_irq_disable(ar
->ab
);
902 ath12k_hif_ce_irq_disable(ar
->ab
);
904 ret
= ath12k_hif_suspend(ar
->ab
);
906 ath12k_warn(ar
->ab
, "failed to suspend hif: %d\n", ret
);
913 ath12k_wow_wakeup(ar
);
916 ath12k_wow_cleanup(ar
);
922 void ath12k_wow_op_set_wakeup(struct ieee80211_hw
*hw
, bool enabled
)
924 struct ath12k_hw
*ah
= ath12k_hw_to_ah(hw
);
925 struct ath12k
*ar
= ath12k_ah_to_ar(ah
, 0);
927 lockdep_assert_wiphy(hw
->wiphy
);
929 device_set_wakeup_enable(ar
->ab
->dev
, enabled
);
932 int ath12k_wow_op_resume(struct ieee80211_hw
*hw
)
934 struct ath12k_hw
*ah
= ath12k_hw_to_ah(hw
);
935 struct ath12k
*ar
= ath12k_ah_to_ar(ah
, 0);
938 lockdep_assert_wiphy(hw
->wiphy
);
940 ret
= ath12k_hif_resume(ar
->ab
);
942 ath12k_warn(ar
->ab
, "failed to resume hif: %d\n", ret
);
946 ath12k_hif_ce_irq_enable(ar
->ab
);
947 ath12k_hif_irq_enable(ar
->ab
);
949 ret
= ath12k_wow_wakeup(ar
);
951 ath12k_warn(ar
->ab
, "failed to wakeup from wow: %d\n", ret
);
955 ret
= ath12k_wow_nlo_cleanup(ar
);
957 ath12k_warn(ar
->ab
, "failed to cleanup nlo: %d\n", ret
);
961 ret
= ath12k_wow_clear_hw_filter(ar
);
963 ath12k_warn(ar
->ab
, "failed to clear hw filter: %d\n", ret
);
967 ret
= ath12k_wow_protocol_offload(ar
, false);
969 ath12k_warn(ar
->ab
, "failed to clear wow protocol offload events: %d\n",
974 ret
= ath12k_wow_set_keepalive(ar
,
975 WMI_STA_KEEPALIVE_METHOD_NULL_FRAME
,
976 WMI_STA_KEEPALIVE_INTERVAL_DISABLE
);
978 ath12k_warn(ar
->ab
, "failed to disable wow keepalive: %d\n", ret
);
985 case ATH12K_HW_STATE_ON
:
986 ah
->state
= ATH12K_HW_STATE_RESTARTING
;
989 case ATH12K_HW_STATE_OFF
:
990 case ATH12K_HW_STATE_RESTARTING
:
991 case ATH12K_HW_STATE_RESTARTED
:
992 case ATH12K_HW_STATE_WEDGED
:
993 ath12k_warn(ar
->ab
, "encountered unexpected device state %d on resume, cannot recover\n",
1003 int ath12k_wow_init(struct ath12k
*ar
)
1005 if (!test_bit(WMI_TLV_SERVICE_WOW
, ar
->wmi
->wmi_ab
->svc_map
))
1008 ar
->wow
.wowlan_support
= ath12k_wowlan_support
;
1010 if (ar
->ab
->wow
.wmi_conf_rx_decap_mode
== ATH12K_HW_TXRX_NATIVE_WIFI
) {
1011 ar
->wow
.wowlan_support
.pattern_max_len
-= WOW_MAX_REDUCE
;
1012 ar
->wow
.wowlan_support
.max_pkt_offset
-= WOW_MAX_REDUCE
;
1015 if (test_bit(WMI_TLV_SERVICE_NLO
, ar
->wmi
->wmi_ab
->svc_map
)) {
1016 ar
->wow
.wowlan_support
.flags
|= WIPHY_WOWLAN_NET_DETECT
;
1017 ar
->wow
.wowlan_support
.max_nd_match_sets
= WMI_PNO_MAX_SUPP_NETWORKS
;
1020 ar
->wow
.max_num_patterns
= ATH12K_WOW_PATTERNS
;
1021 ar
->wow
.wowlan_support
.n_patterns
= ar
->wow
.max_num_patterns
;
1022 ar
->ah
->hw
->wiphy
->wowlan
= &ar
->wow
.wowlan_support
;
1024 device_set_wakeup_capable(ar
->ab
->dev
, true);