1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
5 #include <linux/skbuff.h>
6 #include <linux/ctype.h>
7 #include <net/mac80211.h>
8 #include <net/cfg80211.h>
9 #include <linux/completion.h>
10 #include <linux/if_ether.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/uuid.h>
14 #include <linux/time.h>
22 struct wmi_tlv_policy
{
26 struct wmi_tlv_svc_ready_parse
{
27 bool wmi_svc_bitmap_done
;
30 struct wmi_tlv_svc_rdy_ext_parse
{
31 struct ath11k_service_ext_param param
;
32 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
;
33 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
36 struct wmi_hw_mode_capabilities pref_hw_mode_caps
;
37 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
39 struct wmi_soc_hal_reg_capabilities
*soc_hal_reg_caps
;
40 struct wmi_hal_reg_capabilities_ext
*ext_hal_reg_caps
;
41 u32 n_ext_hal_reg_caps
;
44 bool ext_hal_reg_done
;
47 struct wmi_tlv_rdy_parse
{
48 u32 num_extra_mac_addr
;
51 static const struct wmi_tlv_policy wmi_tlv_policies
[] = {
54 [WMI_TAG_ARRAY_UINT32
]
56 [WMI_TAG_SERVICE_READY_EVENT
]
57 = { .min_len
= sizeof(struct wmi_service_ready_event
) },
58 [WMI_TAG_SERVICE_READY_EXT_EVENT
]
59 = { .min_len
= sizeof(struct wmi_service_ready_ext_event
) },
60 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
]
61 = { .min_len
= sizeof(struct wmi_soc_mac_phy_hw_mode_caps
) },
62 [WMI_TAG_SOC_HAL_REG_CAPABILITIES
]
63 = { .min_len
= sizeof(struct wmi_soc_hal_reg_capabilities
) },
64 [WMI_TAG_VDEV_START_RESPONSE_EVENT
]
65 = { .min_len
= sizeof(struct wmi_vdev_start_resp_event
) },
66 [WMI_TAG_PEER_DELETE_RESP_EVENT
]
67 = { .min_len
= sizeof(struct wmi_peer_delete_resp_event
) },
68 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
]
69 = { .min_len
= sizeof(struct wmi_bcn_tx_status_event
) },
70 [WMI_TAG_VDEV_STOPPED_EVENT
]
71 = { .min_len
= sizeof(struct wmi_vdev_stopped_event
) },
72 [WMI_TAG_REG_CHAN_LIST_CC_EVENT
]
73 = { .min_len
= sizeof(struct wmi_reg_chan_list_cc_event
) },
75 = { .min_len
= sizeof(struct wmi_mgmt_rx_hdr
) },
76 [WMI_TAG_MGMT_TX_COMPL_EVENT
]
77 = { .min_len
= sizeof(struct wmi_mgmt_tx_compl_event
) },
79 = { .min_len
= sizeof(struct wmi_scan_event
) },
80 [WMI_TAG_PEER_STA_KICKOUT_EVENT
]
81 = { .min_len
= sizeof(struct wmi_peer_sta_kickout_event
) },
83 = { .min_len
= sizeof(struct wmi_roam_event
) },
84 [WMI_TAG_CHAN_INFO_EVENT
]
85 = { .min_len
= sizeof(struct wmi_chan_info_event
) },
86 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
]
87 = { .min_len
= sizeof(struct wmi_pdev_bss_chan_info_event
) },
88 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
]
89 = { .min_len
= sizeof(struct wmi_vdev_install_key_compl_event
) },
91 = {.min_len
= sizeof(struct wmi_ready_event
) },
92 [WMI_TAG_SERVICE_AVAILABLE_EVENT
]
93 = {.min_len
= sizeof(struct wmi_service_available_event
) },
94 [WMI_TAG_PEER_ASSOC_CONF_EVENT
]
95 = { .min_len
= sizeof(struct wmi_peer_assoc_conf_event
) },
97 = { .min_len
= sizeof(struct wmi_stats_event
) },
98 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
]
99 = { .min_len
= sizeof(struct wmi_pdev_ctl_failsafe_chk_event
) },
102 #define PRIMAP(_hw_mode_) \
103 [_hw_mode_] = _hw_mode_##_PRI
105 static const int ath11k_hw_mode_pri_map
[] = {
106 PRIMAP(WMI_HOST_HW_MODE_SINGLE
),
107 PRIMAP(WMI_HOST_HW_MODE_DBS
),
108 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE
),
109 PRIMAP(WMI_HOST_HW_MODE_SBS
),
110 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS
),
111 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS
),
113 PRIMAP(WMI_HOST_HW_MODE_MAX
),
117 ath11k_wmi_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
118 int (*iter
)(struct ath11k_base
*ab
, u16 tag
, u16 len
,
119 const void *ptr
, void *data
),
122 const void *begin
= ptr
;
123 const struct wmi_tlv
*tlv
;
124 u16 tlv_tag
, tlv_len
;
128 if (len
< sizeof(*tlv
)) {
129 ath11k_err(ab
, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
130 ptr
- begin
, len
, sizeof(*tlv
));
135 tlv_tag
= FIELD_GET(WMI_TLV_TAG
, tlv
->header
);
136 tlv_len
= FIELD_GET(WMI_TLV_LEN
, tlv
->header
);
141 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
142 tlv_tag
, ptr
- begin
, len
, tlv_len
);
146 if (tlv_tag
< ARRAY_SIZE(wmi_tlv_policies
) &&
147 wmi_tlv_policies
[tlv_tag
].min_len
&&
148 wmi_tlv_policies
[tlv_tag
].min_len
> tlv_len
) {
149 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
150 tlv_tag
, ptr
- begin
, tlv_len
,
151 wmi_tlv_policies
[tlv_tag
].min_len
);
155 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
166 static int ath11k_wmi_tlv_iter_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
167 const void *ptr
, void *data
)
169 const void **tb
= data
;
171 if (tag
< WMI_TAG_MAX
)
177 static int ath11k_wmi_tlv_parse(struct ath11k_base
*ar
, const void **tb
,
178 const void *ptr
, size_t len
)
180 return ath11k_wmi_tlv_iter(ar
, ptr
, len
, ath11k_wmi_tlv_iter_parse
,
185 ath11k_wmi_tlv_parse_alloc(struct ath11k_base
*ab
, const void *ptr
,
186 size_t len
, gfp_t gfp
)
191 tb
= kcalloc(WMI_TAG_MAX
, sizeof(*tb
), gfp
);
193 return ERR_PTR(-ENOMEM
);
195 ret
= ath11k_wmi_tlv_parse(ab
, tb
, ptr
, len
);
204 static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
207 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
208 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
209 struct wmi_cmd_hdr
*cmd_hdr
;
213 if (skb_push(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
216 cmd
|= FIELD_PREP(WMI_CMD_HDR_CMD_ID
, cmd_id
);
218 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
219 cmd_hdr
->cmd_id
= cmd
;
221 memset(skb_cb
, 0, sizeof(*skb_cb
));
222 ret
= ath11k_htc_send(&ab
->htc
, wmi
->eid
, skb
);
230 skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
234 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
237 struct ath11k_wmi_base
*wmi_sc
= wmi
->wmi_ab
;
238 int ret
= -EOPNOTSUPP
;
242 wait_event_timeout(wmi_sc
->tx_credits_wq
, ({
243 ret
= ath11k_wmi_cmd_send_nowait(wmi
, skb
, cmd_id
);
245 if (ret
&& test_bit(ATH11K_FLAG_CRASH_FLUSH
, &wmi_sc
->ab
->dev_flags
))
249 }), WMI_SEND_TIMEOUT_HZ
);
252 ath11k_warn(wmi_sc
->ab
, "wmi command %d timeout\n", cmd_id
);
257 static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
259 struct ath11k_service_ext_param
*param
)
261 const struct wmi_service_ready_ext_event
*ev
= ptr
;
266 /* Move this to host based bitmap */
267 param
->default_conc_scan_config_bits
= ev
->default_conc_scan_config_bits
;
268 param
->default_fw_config_bits
= ev
->default_fw_config_bits
;
269 param
->he_cap_info
= ev
->he_cap_info
;
270 param
->mpdu_density
= ev
->mpdu_density
;
271 param
->max_bssid_rx_filters
= ev
->max_bssid_rx_filters
;
272 memcpy(¶m
->ppet
, &ev
->ppet
, sizeof(param
->ppet
));
278 ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
279 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
,
280 struct wmi_hw_mode_capabilities
*wmi_hw_mode_caps
,
281 struct wmi_soc_hal_reg_capabilities
*hal_reg_caps
,
282 struct wmi_mac_phy_capabilities
*wmi_mac_phy_caps
,
283 u8 hw_mode_id
, u8 phy_id
,
284 struct ath11k_pdev
*pdev
)
286 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
287 struct ath11k_band_cap
*cap_band
;
288 struct ath11k_pdev_cap
*pdev_cap
= &pdev
->cap
;
290 u32 hw_idx
, phy_idx
= 0;
292 if (!hw_caps
|| !wmi_hw_mode_caps
|| !hal_reg_caps
)
295 for (hw_idx
= 0; hw_idx
< hw_caps
->num_hw_modes
; hw_idx
++) {
296 if (hw_mode_id
== wmi_hw_mode_caps
[hw_idx
].hw_mode_id
)
299 phy_map
= wmi_hw_mode_caps
[hw_idx
].phy_id_map
;
306 if (hw_idx
== hw_caps
->num_hw_modes
)
310 if (phy_id
>= hal_reg_caps
->num_phy
)
313 mac_phy_caps
= wmi_mac_phy_caps
+ phy_idx
;
315 pdev
->pdev_id
= mac_phy_caps
->pdev_id
;
316 pdev_cap
->supported_bands
= mac_phy_caps
->supported_bands
;
317 pdev_cap
->ampdu_density
= mac_phy_caps
->ampdu_density
;
319 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
320 * band to band for a single radio, need to see how this should be
323 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_2G_CAP
) {
324 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_2g
;
325 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_2g
;
326 } else if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_5G_CAP
) {
327 pdev_cap
->vht_cap
= mac_phy_caps
->vht_cap_info_5g
;
328 pdev_cap
->vht_mcs
= mac_phy_caps
->vht_supp_mcs_5g
;
329 pdev_cap
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
330 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_5g
;
331 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_5g
;
336 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
337 * For example, for 4x4 capable macphys, first 4 chains can be used for first
338 * mac and the remaing 4 chains can be used for the second mac or vice-versa.
339 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
340 * will be advertised for second mac or vice-versa. Compute the shift value for
341 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
344 pdev_cap
->tx_chain_mask_shift
=
345 find_first_bit((unsigned long *)&pdev_cap
->tx_chain_mask
, 32);
346 pdev_cap
->rx_chain_mask_shift
=
347 find_first_bit((unsigned long *)&pdev_cap
->rx_chain_mask
, 32);
349 cap_band
= &pdev_cap
->band
[NL80211_BAND_2GHZ
];
350 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_2g
;
351 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_2g
;
352 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_2g
;
353 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_2g_ext
;
354 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_2g
;
355 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_2g
,
356 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
357 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet2g
,
358 sizeof(struct ath11k_ppe_threshold
));
360 cap_band
= &pdev_cap
->band
[NL80211_BAND_5GHZ
];
361 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_5g
;
362 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_5g
;
363 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_5g
;
364 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_5g_ext
;
365 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
366 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_5g
,
367 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
368 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet5g
,
369 sizeof(struct ath11k_ppe_threshold
));
375 ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi
*wmi_handle
,
376 struct wmi_soc_hal_reg_capabilities
*reg_caps
,
377 struct wmi_hal_reg_capabilities_ext
*wmi_ext_reg_cap
,
379 struct ath11k_hal_reg_capabilities_ext
*param
)
381 struct wmi_hal_reg_capabilities_ext
*ext_reg_cap
;
383 if (!reg_caps
|| !wmi_ext_reg_cap
)
386 if (phy_idx
>= reg_caps
->num_phy
)
389 ext_reg_cap
= &wmi_ext_reg_cap
[phy_idx
];
391 param
->phy_id
= ext_reg_cap
->phy_id
;
392 param
->eeprom_reg_domain
= ext_reg_cap
->eeprom_reg_domain
;
393 param
->eeprom_reg_domain_ext
=
394 ext_reg_cap
->eeprom_reg_domain_ext
;
395 param
->regcap1
= ext_reg_cap
->regcap1
;
396 param
->regcap2
= ext_reg_cap
->regcap2
;
397 /* check if param->wireless_mode is needed */
398 param
->low_2ghz_chan
= ext_reg_cap
->low_2ghz_chan
;
399 param
->high_2ghz_chan
= ext_reg_cap
->high_2ghz_chan
;
400 param
->low_5ghz_chan
= ext_reg_cap
->low_5ghz_chan
;
401 param
->high_5ghz_chan
= ext_reg_cap
->high_5ghz_chan
;
406 static int ath11k_pull_service_ready_tlv(struct ath11k_base
*ab
,
408 struct ath11k_targ_cap
*cap
)
410 const struct wmi_service_ready_event
*ev
= evt_buf
;
413 ath11k_err(ab
, "%s: failed by NULL param\n",
418 cap
->phy_capability
= ev
->phy_capability
;
419 cap
->max_frag_entry
= ev
->max_frag_entry
;
420 cap
->num_rf_chains
= ev
->num_rf_chains
;
421 cap
->ht_cap_info
= ev
->ht_cap_info
;
422 cap
->vht_cap_info
= ev
->vht_cap_info
;
423 cap
->vht_supp_mcs
= ev
->vht_supp_mcs
;
424 cap
->hw_min_tx_power
= ev
->hw_min_tx_power
;
425 cap
->hw_max_tx_power
= ev
->hw_max_tx_power
;
426 cap
->sys_cap_info
= ev
->sys_cap_info
;
427 cap
->min_pkt_size_enable
= ev
->min_pkt_size_enable
;
428 cap
->max_bcn_ie_size
= ev
->max_bcn_ie_size
;
429 cap
->max_num_scan_channels
= ev
->max_num_scan_channels
;
430 cap
->max_supported_macs
= ev
->max_supported_macs
;
431 cap
->wmi_fw_sub_feat_caps
= ev
->wmi_fw_sub_feat_caps
;
432 cap
->txrx_chainmask
= ev
->txrx_chainmask
;
433 cap
->default_dbs_hw_mode_index
= ev
->default_dbs_hw_mode_index
;
434 cap
->num_msdu_desc
= ev
->num_msdu_desc
;
439 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
440 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
443 static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi
*wmi
,
444 const u32
*wmi_svc_bm
)
448 for (i
= 0, j
= 0; i
< WMI_SERVICE_BM_SIZE
&& j
< WMI_MAX_SERVICE
; i
++) {
450 if (wmi_svc_bm
[i
] & BIT(j
% WMI_SERVICE_BITS_IN_SIZE32
))
451 set_bit(j
, wmi
->wmi_ab
->svc_map
);
452 } while (++j
% WMI_SERVICE_BITS_IN_SIZE32
);
456 static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
457 const void *ptr
, void *data
)
459 struct wmi_tlv_svc_ready_parse
*svc_ready
= data
;
460 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
464 case WMI_TAG_SERVICE_READY_EVENT
:
465 if (ath11k_pull_service_ready_tlv(ab
, ptr
, &ab
->target_caps
))
469 case WMI_TAG_ARRAY_UINT32
:
470 if (!svc_ready
->wmi_svc_bitmap_done
) {
471 expect_len
= WMI_SERVICE_BM_SIZE
* sizeof(u32
);
472 if (len
< expect_len
) {
473 ath11k_warn(ab
, "invalid len %d for the tag 0x%x\n",
478 ath11k_wmi_service_bitmap_copy(wmi_handle
, ptr
);
480 svc_ready
->wmi_svc_bitmap_done
= true;
490 static int ath11k_service_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
492 struct wmi_tlv_svc_ready_parse svc_ready
= { };
495 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
496 ath11k_wmi_tlv_svc_rdy_parse
,
499 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
506 struct sk_buff
*ath11k_wmi_alloc_skb(struct ath11k_wmi_base
*wmi_sc
, u32 len
)
509 struct ath11k_base
*ab
= wmi_sc
->ab
;
510 u32 round_len
= roundup(len
, 4);
512 skb
= ath11k_htc_alloc_skb(ab
, WMI_SKB_HEADROOM
+ round_len
);
516 skb_reserve(skb
, WMI_SKB_HEADROOM
);
517 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
518 ath11k_warn(ab
, "unaligned WMI skb data\n");
520 skb_put(skb
, round_len
);
521 memset(skb
->data
, 0, round_len
);
526 int ath11k_wmi_mgmt_send(struct ath11k
*ar
, u32 vdev_id
, u32 buf_id
,
527 struct sk_buff
*frame
)
529 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
530 struct wmi_mgmt_send_cmd
*cmd
;
531 struct wmi_tlv
*frame_tlv
;
536 buf_len
= frame
->len
< WMI_MGMT_SEND_DOWNLD_LEN
?
537 frame
->len
: WMI_MGMT_SEND_DOWNLD_LEN
;
539 len
= sizeof(*cmd
) + sizeof(*frame_tlv
) + roundup(buf_len
, 4);
541 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
545 cmd
= (struct wmi_mgmt_send_cmd
*)skb
->data
;
546 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_MGMT_TX_SEND_CMD
) |
547 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
548 cmd
->vdev_id
= vdev_id
;
549 cmd
->desc_id
= buf_id
;
551 cmd
->paddr_lo
= lower_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
552 cmd
->paddr_hi
= upper_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
553 cmd
->frame_len
= frame
->len
;
554 cmd
->buf_len
= buf_len
;
555 cmd
->tx_params_valid
= 0;
557 frame_tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
558 frame_tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
559 FIELD_PREP(WMI_TLV_LEN
, buf_len
);
561 memcpy(frame_tlv
->value
, frame
->data
, buf_len
);
563 ath11k_ce_byte_swap(frame_tlv
->value
, buf_len
);
565 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_MGMT_TX_SEND_CMDID
);
568 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
575 int ath11k_wmi_vdev_create(struct ath11k
*ar
, u8
*macaddr
,
576 struct vdev_create_params
*param
)
578 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
579 struct wmi_vdev_create_cmd
*cmd
;
581 struct wmi_vdev_txrx_streams
*txrx_streams
;
586 /* It can be optimized my sending tx/rx chain configuration
587 * only for supported bands instead of always sending it for
590 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
591 (WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
));
593 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
597 cmd
= (struct wmi_vdev_create_cmd
*)skb
->data
;
598 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_CREATE_CMD
) |
599 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
601 cmd
->vdev_id
= param
->if_id
;
602 cmd
->vdev_type
= param
->type
;
603 cmd
->vdev_subtype
= param
->subtype
;
604 cmd
->num_cfg_txrx_streams
= WMI_NUM_SUPPORTED_BAND_MAX
;
605 cmd
->pdev_id
= param
->pdev_id
;
606 ether_addr_copy(cmd
->vdev_macaddr
.addr
, macaddr
);
608 ptr
= skb
->data
+ sizeof(*cmd
);
609 len
= WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
);
612 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
613 FIELD_PREP(WMI_TLV_LEN
, len
);
617 len
= sizeof(*txrx_streams
);
618 txrx_streams
->tlv_header
=
619 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
620 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
621 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_2G
;
622 txrx_streams
->supported_tx_streams
=
623 param
->chains
[NL80211_BAND_2GHZ
].tx
;
624 txrx_streams
->supported_rx_streams
=
625 param
->chains
[NL80211_BAND_2GHZ
].rx
;
628 txrx_streams
->tlv_header
=
629 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
630 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
631 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_5G
;
632 txrx_streams
->supported_tx_streams
=
633 param
->chains
[NL80211_BAND_5GHZ
].tx
;
634 txrx_streams
->supported_rx_streams
=
635 param
->chains
[NL80211_BAND_5GHZ
].rx
;
637 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_CREATE_CMDID
);
640 "failed to submit WMI_VDEV_CREATE_CMDID\n");
644 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
645 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
646 param
->if_id
, param
->type
, param
->subtype
,
647 macaddr
, param
->pdev_id
);
652 int ath11k_wmi_vdev_delete(struct ath11k
*ar
, u8 vdev_id
)
654 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
655 struct wmi_vdev_delete_cmd
*cmd
;
659 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
663 cmd
= (struct wmi_vdev_delete_cmd
*)skb
->data
;
664 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DELETE_CMD
) |
665 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
666 cmd
->vdev_id
= vdev_id
;
668 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DELETE_CMDID
);
670 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DELETE_CMDID\n");
674 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev delete id %d\n", vdev_id
);
679 int ath11k_wmi_vdev_stop(struct ath11k
*ar
, u8 vdev_id
)
681 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
682 struct wmi_vdev_stop_cmd
*cmd
;
686 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
690 cmd
= (struct wmi_vdev_stop_cmd
*)skb
->data
;
692 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_STOP_CMD
) |
693 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
694 cmd
->vdev_id
= vdev_id
;
696 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_STOP_CMDID
);
698 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_STOP cmd\n");
702 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev stop id 0x%x\n", vdev_id
);
707 int ath11k_wmi_vdev_down(struct ath11k
*ar
, u8 vdev_id
)
709 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
710 struct wmi_vdev_down_cmd
*cmd
;
714 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
718 cmd
= (struct wmi_vdev_down_cmd
*)skb
->data
;
720 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DOWN_CMD
) |
721 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
722 cmd
->vdev_id
= vdev_id
;
724 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DOWN_CMDID
);
726 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DOWN cmd\n");
730 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev down id 0x%x\n", vdev_id
);
735 static void ath11k_wmi_put_wmi_channel(struct wmi_channel
*chan
,
736 struct wmi_vdev_start_req_arg
*arg
)
738 memset(chan
, 0, sizeof(*chan
));
740 chan
->mhz
= arg
->channel
.freq
;
741 chan
->band_center_freq1
= arg
->channel
.band_center_freq1
;
742 if (arg
->channel
.mode
== MODE_11AC_VHT80_80
)
743 chan
->band_center_freq2
= arg
->channel
.band_center_freq2
;
745 chan
->band_center_freq2
= 0;
747 chan
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
, arg
->channel
.mode
);
748 if (arg
->channel
.passive
)
749 chan
->info
|= WMI_CHAN_INFO_PASSIVE
;
750 if (arg
->channel
.allow_ibss
)
751 chan
->info
|= WMI_CHAN_INFO_ADHOC_ALLOWED
;
752 if (arg
->channel
.allow_ht
)
753 chan
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
754 if (arg
->channel
.allow_vht
)
755 chan
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
756 if (arg
->channel
.allow_he
)
757 chan
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
758 if (arg
->channel
.ht40plus
)
759 chan
->info
|= WMI_CHAN_INFO_HT40_PLUS
;
760 if (arg
->channel
.chan_radar
)
761 chan
->info
|= WMI_CHAN_INFO_DFS
;
762 if (arg
->channel
.freq2_radar
)
763 chan
->info
|= WMI_CHAN_INFO_DFS_FREQ2
;
765 chan
->reg_info_1
= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
766 arg
->channel
.max_power
) |
767 FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
768 arg
->channel
.max_reg_power
);
770 chan
->reg_info_2
= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
771 arg
->channel
.max_antenna_gain
) |
772 FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR
,
773 arg
->channel
.max_power
);
776 int ath11k_wmi_vdev_start(struct ath11k
*ar
, struct wmi_vdev_start_req_arg
*arg
,
779 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
780 struct wmi_vdev_start_request_cmd
*cmd
;
782 struct wmi_channel
*chan
;
787 if (WARN_ON(arg
->ssid_len
> sizeof(cmd
->ssid
.ssid
)))
790 len
= sizeof(*cmd
) + sizeof(*chan
) + TLV_HDR_SIZE
;
792 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
796 cmd
= (struct wmi_vdev_start_request_cmd
*)skb
->data
;
797 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
798 WMI_TAG_VDEV_START_REQUEST_CMD
) |
799 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
800 cmd
->vdev_id
= arg
->vdev_id
;
801 cmd
->beacon_interval
= arg
->bcn_intval
;
802 cmd
->bcn_tx_rate
= arg
->bcn_tx_rate
;
803 cmd
->dtim_period
= arg
->dtim_period
;
804 cmd
->num_noa_descriptors
= arg
->num_noa_descriptors
;
805 cmd
->preferred_rx_streams
= arg
->pref_rx_streams
;
806 cmd
->preferred_tx_streams
= arg
->pref_tx_streams
;
807 cmd
->cac_duration_ms
= arg
->cac_duration_ms
;
808 cmd
->regdomain
= arg
->regdomain
;
809 cmd
->he_ops
= arg
->he_ops
;
813 cmd
->ssid
.ssid_len
= arg
->ssid_len
;
814 memcpy(cmd
->ssid
.ssid
, arg
->ssid
, arg
->ssid_len
);
816 if (arg
->hidden_ssid
)
817 cmd
->flags
|= WMI_VDEV_START_HIDDEN_SSID
;
818 if (arg
->pmf_enabled
)
819 cmd
->flags
|= WMI_VDEV_START_PMF_ENABLED
;
822 cmd
->flags
|= WMI_VDEV_START_LDPC_RX_ENABLED
;
824 ptr
= skb
->data
+ sizeof(*cmd
);
827 ath11k_wmi_put_wmi_channel(chan
, arg
);
829 chan
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_CHANNEL
) |
830 FIELD_PREP(WMI_TLV_LEN
,
831 sizeof(*chan
) - TLV_HDR_SIZE
);
832 ptr
+= sizeof(*chan
);
835 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
836 FIELD_PREP(WMI_TLV_LEN
, 0);
838 /* Note: This is a nested TLV containing:
839 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
845 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
846 WMI_VDEV_RESTART_REQUEST_CMDID
);
848 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
849 WMI_VDEV_START_REQUEST_CMDID
);
851 ath11k_warn(ar
->ab
, "failed to submit vdev_%s cmd\n",
852 restart
? "restart" : "start");
856 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
857 restart
? "restart" : "start", arg
->vdev_id
,
858 arg
->channel
.freq
, arg
->channel
.mode
);
863 int ath11k_wmi_vdev_up(struct ath11k
*ar
, u32 vdev_id
, u32 aid
, const u8
*bssid
)
865 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
866 struct wmi_vdev_up_cmd
*cmd
;
870 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
874 cmd
= (struct wmi_vdev_up_cmd
*)skb
->data
;
876 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_UP_CMD
) |
877 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
878 cmd
->vdev_id
= vdev_id
;
879 cmd
->vdev_assoc_id
= aid
;
881 ether_addr_copy(cmd
->vdev_bssid
.addr
, bssid
);
883 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_UP_CMDID
);
885 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_UP cmd\n");
889 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
890 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
891 vdev_id
, aid
, bssid
);
896 int ath11k_wmi_send_peer_create_cmd(struct ath11k
*ar
,
897 struct peer_create_params
*param
)
899 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
900 struct wmi_peer_create_cmd
*cmd
;
904 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
908 cmd
= (struct wmi_peer_create_cmd
*)skb
->data
;
909 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_CREATE_CMD
) |
910 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
912 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_addr
);
913 cmd
->peer_type
= param
->peer_type
;
914 cmd
->vdev_id
= param
->vdev_id
;
916 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_CREATE_CMDID
);
918 ath11k_warn(ar
->ab
, "failed to submit WMI_PEER_CREATE cmd\n");
922 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
923 "WMI peer create vdev_id %d peer_addr %pM\n",
924 param
->vdev_id
, param
->peer_addr
);
929 int ath11k_wmi_send_peer_delete_cmd(struct ath11k
*ar
,
930 const u8
*peer_addr
, u8 vdev_id
)
932 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
933 struct wmi_peer_delete_cmd
*cmd
;
937 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
941 cmd
= (struct wmi_peer_delete_cmd
*)skb
->data
;
942 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_DELETE_CMD
) |
943 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
945 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
946 cmd
->vdev_id
= vdev_id
;
948 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
949 "WMI peer delete vdev_id %d peer_addr %pM\n",
952 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_DELETE_CMDID
);
954 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_DELETE cmd\n");
961 int ath11k_wmi_send_pdev_set_regdomain(struct ath11k
*ar
,
962 struct pdev_set_regdomain_params
*param
)
964 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
965 struct wmi_pdev_set_regdomain_cmd
*cmd
;
969 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
973 cmd
= (struct wmi_pdev_set_regdomain_cmd
*)skb
->data
;
974 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
975 WMI_TAG_PDEV_SET_REGDOMAIN_CMD
) |
976 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
978 cmd
->reg_domain
= param
->current_rd_in_use
;
979 cmd
->reg_domain_2g
= param
->current_rd_2g
;
980 cmd
->reg_domain_5g
= param
->current_rd_5g
;
981 cmd
->conformance_test_limit_2g
= param
->ctl_2g
;
982 cmd
->conformance_test_limit_5g
= param
->ctl_5g
;
983 cmd
->dfs_domain
= param
->dfs_domain
;
984 cmd
->pdev_id
= param
->pdev_id
;
986 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
987 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
988 param
->current_rd_in_use
, param
->current_rd_2g
,
989 param
->current_rd_5g
, param
->dfs_domain
, param
->pdev_id
);
991 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_REGDOMAIN_CMDID
);
994 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1001 int ath11k_wmi_set_peer_param(struct ath11k
*ar
, const u8
*peer_addr
,
1002 u32 vdev_id
, u32 param_id
, u32 param_val
)
1004 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1005 struct wmi_peer_set_param_cmd
*cmd
;
1006 struct sk_buff
*skb
;
1009 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1013 cmd
= (struct wmi_peer_set_param_cmd
*)skb
->data
;
1014 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_SET_PARAM_CMD
) |
1015 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1016 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1017 cmd
->vdev_id
= vdev_id
;
1018 cmd
->param_id
= param_id
;
1019 cmd
->param_value
= param_val
;
1021 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_SET_PARAM_CMDID
);
1023 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_SET_PARAM cmd\n");
1027 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1028 "WMI vdev %d peer 0x%pM set param %d value %d\n",
1029 vdev_id
, peer_addr
, param_id
, param_val
);
1034 int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k
*ar
,
1035 u8 peer_addr
[ETH_ALEN
],
1036 struct peer_flush_params
*param
)
1038 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1039 struct wmi_peer_flush_tids_cmd
*cmd
;
1040 struct sk_buff
*skb
;
1043 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1047 cmd
= (struct wmi_peer_flush_tids_cmd
*)skb
->data
;
1048 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_FLUSH_TIDS_CMD
) |
1049 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1051 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1052 cmd
->peer_tid_bitmap
= param
->peer_tid_bitmap
;
1053 cmd
->vdev_id
= param
->vdev_id
;
1055 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_FLUSH_TIDS_CMDID
);
1058 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1062 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1063 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1064 param
->vdev_id
, peer_addr
, param
->peer_tid_bitmap
);
1069 int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k
*ar
,
1070 int vdev_id
, const u8
*addr
,
1071 dma_addr_t paddr
, u8 tid
,
1072 u8 ba_window_size_valid
,
1075 struct wmi_peer_reorder_queue_setup_cmd
*cmd
;
1076 struct sk_buff
*skb
;
1079 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
1083 cmd
= (struct wmi_peer_reorder_queue_setup_cmd
*)skb
->data
;
1084 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1085 WMI_TAG_REORDER_QUEUE_SETUP_CMD
) |
1086 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1088 ether_addr_copy(cmd
->peer_macaddr
.addr
, addr
);
1089 cmd
->vdev_id
= vdev_id
;
1091 cmd
->queue_ptr_lo
= lower_32_bits(paddr
);
1092 cmd
->queue_ptr_hi
= upper_32_bits(paddr
);
1093 cmd
->queue_no
= tid
;
1094 cmd
->ba_window_size_valid
= ba_window_size_valid
;
1095 cmd
->ba_window_size
= ba_window_size
;
1097 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
1098 WMI_PEER_REORDER_QUEUE_SETUP_CMDID
);
1101 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1105 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1106 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1107 addr
, vdev_id
, tid
);
1113 ath11k_wmi_rx_reord_queue_remove(struct ath11k
*ar
,
1114 struct rx_reorder_queue_remove_params
*param
)
1116 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1117 struct wmi_peer_reorder_queue_remove_cmd
*cmd
;
1118 struct sk_buff
*skb
;
1121 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1125 cmd
= (struct wmi_peer_reorder_queue_remove_cmd
*)skb
->data
;
1126 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1127 WMI_TAG_REORDER_QUEUE_REMOVE_CMD
) |
1128 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1130 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_macaddr
);
1131 cmd
->vdev_id
= param
->vdev_id
;
1132 cmd
->tid_mask
= param
->peer_tid_bitmap
;
1134 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1135 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__
,
1136 param
->peer_macaddr
, param
->vdev_id
, param
->peer_tid_bitmap
);
1138 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1139 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID
);
1142 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1149 int ath11k_wmi_pdev_set_param(struct ath11k
*ar
, u32 param_id
,
1150 u32 param_value
, u8 pdev_id
)
1152 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1153 struct wmi_pdev_set_param_cmd
*cmd
;
1154 struct sk_buff
*skb
;
1157 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1161 cmd
= (struct wmi_pdev_set_param_cmd
*)skb
->data
;
1162 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SET_PARAM_CMD
) |
1163 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1164 cmd
->pdev_id
= pdev_id
;
1165 cmd
->param_id
= param_id
;
1166 cmd
->param_value
= param_value
;
1168 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_PARAM_CMDID
);
1170 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1174 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1175 "WMI pdev set param %d pdev id %d value %d\n",
1176 param_id
, pdev_id
, param_value
);
1181 int ath11k_wmi_pdev_set_ps_mode(struct ath11k
*ar
, int vdev_id
, u32 enable
)
1183 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1184 struct wmi_pdev_set_ps_mode_cmd
*cmd
;
1185 struct sk_buff
*skb
;
1188 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1192 cmd
= (struct wmi_pdev_set_ps_mode_cmd
*)skb
->data
;
1193 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STA_POWERSAVE_MODE_CMD
) |
1194 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1195 cmd
->vdev_id
= vdev_id
;
1196 cmd
->sta_ps_mode
= enable
;
1198 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_MODE_CMDID
);
1200 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1204 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1205 "WMI vdev set psmode %d vdev id %d\n",
1211 int ath11k_wmi_pdev_suspend(struct ath11k
*ar
, u32 suspend_opt
,
1214 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1215 struct wmi_pdev_suspend_cmd
*cmd
;
1216 struct sk_buff
*skb
;
1219 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1223 cmd
= (struct wmi_pdev_suspend_cmd
*)skb
->data
;
1225 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SUSPEND_CMD
) |
1226 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1228 cmd
->suspend_opt
= suspend_opt
;
1229 cmd
->pdev_id
= pdev_id
;
1231 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SUSPEND_CMDID
);
1233 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SUSPEND cmd\n");
1237 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1238 "WMI pdev suspend pdev_id %d\n", pdev_id
);
1243 int ath11k_wmi_pdev_resume(struct ath11k
*ar
, u32 pdev_id
)
1245 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1246 struct wmi_pdev_resume_cmd
*cmd
;
1247 struct sk_buff
*skb
;
1250 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1254 cmd
= (struct wmi_pdev_resume_cmd
*)skb
->data
;
1256 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_RESUME_CMD
) |
1257 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1258 cmd
->pdev_id
= pdev_id
;
1260 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1261 "WMI pdev resume pdev id %d\n", pdev_id
);
1263 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_RESUME_CMDID
);
1265 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_RESUME cmd\n");
1272 /* TODO FW Support for the cmd is not available yet.
1273 * Can be tested once the command and corresponding
1274 * event is implemented in FW
1276 int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k
*ar
,
1277 enum wmi_bss_chan_info_req_type type
)
1279 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1280 struct wmi_pdev_bss_chan_info_req_cmd
*cmd
;
1281 struct sk_buff
*skb
;
1284 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1288 cmd
= (struct wmi_pdev_bss_chan_info_req_cmd
*)skb
->data
;
1290 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1291 WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST
) |
1292 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1293 cmd
->req_type
= type
;
1295 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1296 "WMI bss chan info req type %d\n", type
);
1298 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1299 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID
);
1302 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1309 int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k
*ar
, u8
*peer_addr
,
1310 struct ap_ps_params
*param
)
1312 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1313 struct wmi_ap_ps_peer_cmd
*cmd
;
1314 struct sk_buff
*skb
;
1317 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1321 cmd
= (struct wmi_ap_ps_peer_cmd
*)skb
->data
;
1322 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_AP_PS_PEER_CMD
) |
1323 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1325 cmd
->vdev_id
= param
->vdev_id
;
1326 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1327 cmd
->param
= param
->param
;
1328 cmd
->value
= param
->value
;
1330 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_AP_PS_PEER_PARAM_CMDID
);
1333 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1337 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1338 "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1339 param
->vdev_id
, peer_addr
, param
->param
, param
->value
);
1344 int ath11k_wmi_set_sta_ps_param(struct ath11k
*ar
, u32 vdev_id
,
1345 u32 param
, u32 param_value
)
1347 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1348 struct wmi_sta_powersave_param_cmd
*cmd
;
1349 struct sk_buff
*skb
;
1352 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1356 cmd
= (struct wmi_sta_powersave_param_cmd
*)skb
->data
;
1357 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1358 WMI_TAG_STA_POWERSAVE_PARAM_CMD
) |
1359 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1361 cmd
->vdev_id
= vdev_id
;
1363 cmd
->value
= param_value
;
1365 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1366 "WMI set sta ps vdev_id %d param %d value %d\n",
1367 vdev_id
, param
, param_value
);
1369 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_PARAM_CMDID
);
1371 ath11k_warn(ar
->ab
, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1378 int ath11k_wmi_force_fw_hang_cmd(struct ath11k
*ar
, u32 type
, u32 delay_time_ms
)
1380 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1381 struct wmi_force_fw_hang_cmd
*cmd
;
1382 struct sk_buff
*skb
;
1387 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1391 cmd
= (struct wmi_force_fw_hang_cmd
*)skb
->data
;
1392 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_FORCE_FW_HANG_CMD
) |
1393 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1396 cmd
->delay_time_ms
= delay_time_ms
;
1398 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_FORCE_FW_HANG_CMDID
);
1401 ath11k_warn(ar
->ab
, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1407 int ath11k_wmi_vdev_set_param_cmd(struct ath11k
*ar
, u32 vdev_id
,
1408 u32 param_id
, u32 param_value
)
1410 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1411 struct wmi_vdev_set_param_cmd
*cmd
;
1412 struct sk_buff
*skb
;
1415 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1419 cmd
= (struct wmi_vdev_set_param_cmd
*)skb
->data
;
1420 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_SET_PARAM_CMD
) |
1421 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1423 cmd
->vdev_id
= vdev_id
;
1424 cmd
->param_id
= param_id
;
1425 cmd
->param_value
= param_value
;
1427 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_SET_PARAM_CMDID
);
1430 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1434 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1435 "WMI vdev id 0x%x set param %d value %d\n",
1436 vdev_id
, param_id
, param_value
);
1441 int ath11k_wmi_send_stats_request_cmd(struct ath11k
*ar
,
1442 struct stats_request_params
*param
)
1444 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1445 struct wmi_request_stats_cmd
*cmd
;
1446 struct sk_buff
*skb
;
1449 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1453 cmd
= (struct wmi_request_stats_cmd
*)skb
->data
;
1454 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_REQUEST_STATS_CMD
) |
1455 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1457 cmd
->stats_id
= param
->stats_id
;
1458 cmd
->vdev_id
= param
->vdev_id
;
1459 cmd
->pdev_id
= param
->pdev_id
;
1461 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_REQUEST_STATS_CMDID
);
1463 ath11k_warn(ar
->ab
, "failed to send WMI_REQUEST_STATS cmd\n");
1467 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1468 "WMI request stats 0x%x vdev id %d pdev id %d\n",
1469 param
->stats_id
, param
->vdev_id
, param
->pdev_id
);
1474 int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k
*ar
,
1475 u32 vdev_id
, u32 bcn_ctrl_op
)
1477 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1478 struct wmi_bcn_offload_ctrl_cmd
*cmd
;
1479 struct sk_buff
*skb
;
1482 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1486 cmd
= (struct wmi_bcn_offload_ctrl_cmd
*)skb
->data
;
1487 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1488 WMI_TAG_BCN_OFFLOAD_CTRL_CMD
) |
1489 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1491 cmd
->vdev_id
= vdev_id
;
1492 cmd
->bcn_ctrl_op
= bcn_ctrl_op
;
1494 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1495 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1496 vdev_id
, bcn_ctrl_op
);
1498 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_OFFLOAD_CTRL_CMDID
);
1501 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1508 int ath11k_wmi_bcn_tmpl(struct ath11k
*ar
, u32 vdev_id
,
1509 struct ieee80211_mutable_offsets
*offs
,
1510 struct sk_buff
*bcn
)
1512 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1513 struct wmi_bcn_tmpl_cmd
*cmd
;
1514 struct wmi_bcn_prb_info
*bcn_prb_info
;
1515 struct wmi_tlv
*tlv
;
1516 struct sk_buff
*skb
;
1519 size_t aligned_len
= roundup(bcn
->len
, 4);
1521 len
= sizeof(*cmd
) + sizeof(*bcn_prb_info
) + TLV_HDR_SIZE
+ aligned_len
;
1523 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1527 cmd
= (struct wmi_bcn_tmpl_cmd
*)skb
->data
;
1528 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BCN_TMPL_CMD
) |
1529 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1530 cmd
->vdev_id
= vdev_id
;
1531 cmd
->tim_ie_offset
= offs
->tim_offset
;
1532 cmd
->csa_switch_count_offset
= offs
->csa_counter_offs
[0];
1533 cmd
->ext_csa_switch_count_offset
= offs
->csa_counter_offs
[1];
1534 cmd
->buf_len
= bcn
->len
;
1536 ptr
= skb
->data
+ sizeof(*cmd
);
1539 len
= sizeof(*bcn_prb_info
);
1540 bcn_prb_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1541 WMI_TAG_BCN_PRB_INFO
) |
1542 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1543 bcn_prb_info
->caps
= 0;
1544 bcn_prb_info
->erp
= 0;
1546 ptr
+= sizeof(*bcn_prb_info
);
1549 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1550 FIELD_PREP(WMI_TLV_LEN
, aligned_len
);
1551 memcpy(tlv
->value
, bcn
->data
, bcn
->len
);
1553 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_TMPL_CMDID
);
1555 ath11k_warn(ar
->ab
, "failed to send WMI_BCN_TMPL_CMDID\n");
1562 int ath11k_wmi_vdev_install_key(struct ath11k
*ar
,
1563 struct wmi_vdev_install_key_arg
*arg
)
1565 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1566 struct wmi_vdev_install_key_cmd
*cmd
;
1567 struct wmi_tlv
*tlv
;
1568 struct sk_buff
*skb
;
1570 int key_len_aligned
= roundup(arg
->key_len
, sizeof(uint32_t));
1572 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ key_len_aligned
;
1574 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1578 cmd
= (struct wmi_vdev_install_key_cmd
*)skb
->data
;
1579 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_INSTALL_KEY_CMD
) |
1580 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1581 cmd
->vdev_id
= arg
->vdev_id
;
1582 ether_addr_copy(cmd
->peer_macaddr
.addr
, arg
->macaddr
);
1583 cmd
->key_idx
= arg
->key_idx
;
1584 cmd
->key_flags
= arg
->key_flags
;
1585 cmd
->key_cipher
= arg
->key_cipher
;
1586 cmd
->key_len
= arg
->key_len
;
1587 cmd
->key_txmic_len
= arg
->key_txmic_len
;
1588 cmd
->key_rxmic_len
= arg
->key_rxmic_len
;
1590 if (arg
->key_rsc_counter
)
1591 memcpy(&cmd
->key_rsc_counter
, &arg
->key_rsc_counter
,
1592 sizeof(struct wmi_key_seq_counter
));
1594 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
1595 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1596 FIELD_PREP(WMI_TLV_LEN
, key_len_aligned
);
1597 memcpy(tlv
->value
, (u8
*)arg
->key_data
, key_len_aligned
);
1599 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_INSTALL_KEY_CMDID
);
1602 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1606 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1607 "WMI vdev install key idx %d cipher %d len %d\n",
1608 arg
->key_idx
, arg
->key_cipher
, arg
->key_len
);
1614 ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd
*cmd
,
1615 struct peer_assoc_params
*param
)
1617 cmd
->peer_flags
= 0;
1619 if (param
->is_wme_set
) {
1620 if (param
->qos_flag
)
1621 cmd
->peer_flags
|= WMI_PEER_QOS
;
1622 if (param
->apsd_flag
)
1623 cmd
->peer_flags
|= WMI_PEER_APSD
;
1625 cmd
->peer_flags
|= WMI_PEER_HT
;
1627 cmd
->peer_flags
|= WMI_PEER_40MHZ
;
1629 cmd
->peer_flags
|= WMI_PEER_80MHZ
;
1631 cmd
->peer_flags
|= WMI_PEER_160MHZ
;
1633 /* Typically if STBC is enabled for VHT it should be enabled
1636 if (param
->stbc_flag
)
1637 cmd
->peer_flags
|= WMI_PEER_STBC
;
1639 /* Typically if LDPC is enabled for VHT it should be enabled
1642 if (param
->ldpc_flag
)
1643 cmd
->peer_flags
|= WMI_PEER_LDPC
;
1645 if (param
->static_mimops_flag
)
1646 cmd
->peer_flags
|= WMI_PEER_STATIC_MIMOPS
;
1647 if (param
->dynamic_mimops_flag
)
1648 cmd
->peer_flags
|= WMI_PEER_DYN_MIMOPS
;
1649 if (param
->spatial_mux_flag
)
1650 cmd
->peer_flags
|= WMI_PEER_SPATIAL_MUX
;
1651 if (param
->vht_flag
)
1652 cmd
->peer_flags
|= WMI_PEER_VHT
;
1654 cmd
->peer_flags
|= WMI_PEER_HE
;
1655 if (param
->twt_requester
)
1656 cmd
->peer_flags
|= WMI_PEER_TWT_REQ
;
1657 if (param
->twt_responder
)
1658 cmd
->peer_flags
|= WMI_PEER_TWT_RESP
;
1661 /* Suppress authorization for all AUTH modes that need 4-way handshake
1662 * (during re-association).
1663 * Authorization will be done for these modes on key installation.
1665 if (param
->auth_flag
)
1666 cmd
->peer_flags
|= WMI_PEER_AUTH
;
1667 if (param
->need_ptk_4_way
)
1668 cmd
->peer_flags
|= WMI_PEER_NEED_PTK_4_WAY
;
1670 cmd
->peer_flags
&= ~WMI_PEER_NEED_PTK_4_WAY
;
1671 if (param
->need_gtk_2_way
)
1672 cmd
->peer_flags
|= WMI_PEER_NEED_GTK_2_WAY
;
1673 /* safe mode bypass the 4-way handshake */
1674 if (param
->safe_mode_enabled
)
1675 cmd
->peer_flags
&= ~(WMI_PEER_NEED_PTK_4_WAY
|
1676 WMI_PEER_NEED_GTK_2_WAY
);
1678 if (param
->is_pmf_enabled
)
1679 cmd
->peer_flags
|= WMI_PEER_PMF
;
1681 /* Disable AMSDU for station transmit, if user configures it */
1682 /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1684 * if (param->amsdu_disable) Add after FW support
1687 /* Target asserts if node is marked HT and all MCS is set to 0.
1688 * Mark the node as non-HT if all the mcs rates are disabled through
1691 if (param
->peer_ht_rates
.num_rates
== 0)
1692 cmd
->peer_flags
&= ~WMI_PEER_HT
;
1695 int ath11k_wmi_send_peer_assoc_cmd(struct ath11k
*ar
,
1696 struct peer_assoc_params
*param
)
1698 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1699 struct wmi_peer_assoc_complete_cmd
*cmd
;
1700 struct wmi_vht_rate_set
*mcs
;
1701 struct wmi_he_rate_set
*he_mcs
;
1702 struct sk_buff
*skb
;
1703 struct wmi_tlv
*tlv
;
1705 u32 peer_legacy_rates_align
;
1706 u32 peer_ht_rates_align
;
1709 peer_legacy_rates_align
= roundup(param
->peer_legacy_rates
.num_rates
,
1711 peer_ht_rates_align
= roundup(param
->peer_ht_rates
.num_rates
,
1714 len
= sizeof(*cmd
) +
1715 TLV_HDR_SIZE
+ (peer_legacy_rates_align
* sizeof(u8
)) +
1716 TLV_HDR_SIZE
+ (peer_ht_rates_align
* sizeof(u8
)) +
1717 sizeof(*mcs
) + TLV_HDR_SIZE
+
1718 (sizeof(*he_mcs
) * param
->peer_he_mcs_count
);
1720 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1727 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1728 WMI_TAG_PEER_ASSOC_COMPLETE_CMD
) |
1729 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1731 cmd
->vdev_id
= param
->vdev_id
;
1733 cmd
->peer_new_assoc
= param
->peer_new_assoc
;
1734 cmd
->peer_associd
= param
->peer_associd
;
1736 ath11k_wmi_copy_peer_flags(cmd
, param
);
1738 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_mac
);
1740 cmd
->peer_rate_caps
= param
->peer_rate_caps
;
1741 cmd
->peer_caps
= param
->peer_caps
;
1742 cmd
->peer_listen_intval
= param
->peer_listen_intval
;
1743 cmd
->peer_ht_caps
= param
->peer_ht_caps
;
1744 cmd
->peer_max_mpdu
= param
->peer_max_mpdu
;
1745 cmd
->peer_mpdu_density
= param
->peer_mpdu_density
;
1746 cmd
->peer_vht_caps
= param
->peer_vht_caps
;
1747 cmd
->peer_phymode
= param
->peer_phymode
;
1749 /* Update 11ax capabilities */
1750 cmd
->peer_he_cap_info
= param
->peer_he_cap_macinfo
[0];
1751 cmd
->peer_he_cap_info_ext
= param
->peer_he_cap_macinfo
[1];
1752 cmd
->peer_he_cap_info_internal
= param
->peer_he_cap_macinfo_internal
;
1753 cmd
->peer_he_ops
= param
->peer_he_ops
;
1754 memcpy(&cmd
->peer_he_cap_phy
, ¶m
->peer_he_cap_phyinfo
,
1755 sizeof(param
->peer_he_cap_phyinfo
));
1756 memcpy(&cmd
->peer_ppet
, ¶m
->peer_ppet
,
1757 sizeof(param
->peer_ppet
));
1759 /* Update peer legacy rate information */
1760 ptr
+= sizeof(*cmd
);
1763 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1764 FIELD_PREP(WMI_TLV_LEN
, peer_legacy_rates_align
);
1766 ptr
+= TLV_HDR_SIZE
;
1768 cmd
->num_peer_legacy_rates
= param
->peer_legacy_rates
.num_rates
;
1769 memcpy(ptr
, param
->peer_legacy_rates
.rates
,
1770 param
->peer_legacy_rates
.num_rates
);
1772 /* Update peer HT rate information */
1773 ptr
+= peer_legacy_rates_align
;
1776 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1777 FIELD_PREP(WMI_TLV_LEN
, peer_ht_rates_align
);
1778 ptr
+= TLV_HDR_SIZE
;
1779 cmd
->num_peer_ht_rates
= param
->peer_ht_rates
.num_rates
;
1780 memcpy(ptr
, param
->peer_ht_rates
.rates
,
1781 param
->peer_ht_rates
.num_rates
);
1784 ptr
+= peer_ht_rates_align
;
1788 mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VHT_RATE_SET
) |
1789 FIELD_PREP(WMI_TLV_LEN
, sizeof(*mcs
) - TLV_HDR_SIZE
);
1791 cmd
->peer_nss
= param
->peer_nss
;
1793 /* Update bandwidth-NSS mapping */
1794 cmd
->peer_bw_rxnss_override
= 0;
1795 cmd
->peer_bw_rxnss_override
|= param
->peer_bw_rxnss_override
;
1797 if (param
->vht_capable
) {
1798 mcs
->rx_max_rate
= param
->rx_max_rate
;
1799 mcs
->rx_mcs_set
= param
->rx_mcs_set
;
1800 mcs
->tx_max_rate
= param
->tx_max_rate
;
1801 mcs
->tx_mcs_set
= param
->tx_mcs_set
;
1805 cmd
->peer_he_mcs
= param
->peer_he_mcs_count
;
1807 ptr
+= sizeof(*mcs
);
1809 len
= param
->peer_he_mcs_count
* sizeof(*he_mcs
);
1812 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
1813 FIELD_PREP(WMI_TLV_LEN
, len
);
1814 ptr
+= TLV_HDR_SIZE
;
1816 /* Loop through the HE rate set */
1817 for (i
= 0; i
< param
->peer_he_mcs_count
; i
++) {
1819 he_mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1820 WMI_TAG_HE_RATE_SET
) |
1821 FIELD_PREP(WMI_TLV_LEN
,
1822 sizeof(*he_mcs
) - TLV_HDR_SIZE
);
1824 he_mcs
->rx_mcs_set
= param
->peer_he_rx_mcs_set
[i
];
1825 he_mcs
->tx_mcs_set
= param
->peer_he_tx_mcs_set
[i
];
1826 ptr
+= sizeof(*he_mcs
);
1829 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_ASSOC_CMDID
);
1832 "failed to send WMI_PEER_ASSOC_CMDID\n");
1836 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1837 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
1838 cmd
->vdev_id
, cmd
->peer_associd
, param
->peer_mac
,
1839 cmd
->peer_flags
, cmd
->peer_rate_caps
, cmd
->peer_caps
,
1840 cmd
->peer_listen_intval
, cmd
->peer_ht_caps
,
1841 cmd
->peer_max_mpdu
, cmd
->peer_nss
, cmd
->peer_phymode
,
1842 cmd
->peer_mpdu_density
,
1843 cmd
->peer_vht_caps
, cmd
->peer_he_cap_info
,
1844 cmd
->peer_he_ops
, cmd
->peer_he_cap_info_ext
,
1845 cmd
->peer_he_cap_phy
[0], cmd
->peer_he_cap_phy
[1],
1846 cmd
->peer_he_cap_phy
[2],
1847 cmd
->peer_bw_rxnss_override
);
1852 void ath11k_wmi_start_scan_init(struct ath11k
*ar
,
1853 struct scan_req_params
*arg
)
1855 /* setup commonly used values */
1856 arg
->scan_req_id
= 1;
1857 arg
->scan_priority
= WMI_SCAN_PRIORITY_LOW
;
1858 arg
->dwell_time_active
= 50;
1859 arg
->dwell_time_active_2g
= 0;
1860 arg
->dwell_time_passive
= 150;
1861 arg
->min_rest_time
= 50;
1862 arg
->max_rest_time
= 500;
1863 arg
->repeat_probe_time
= 0;
1864 arg
->probe_spacing_time
= 0;
1866 arg
->max_scan_time
= 20000;
1867 arg
->probe_delay
= 5;
1868 arg
->notify_scan_events
= WMI_SCAN_EVENT_STARTED
|
1869 WMI_SCAN_EVENT_COMPLETED
|
1870 WMI_SCAN_EVENT_BSS_CHANNEL
|
1871 WMI_SCAN_EVENT_FOREIGN_CHAN
|
1872 WMI_SCAN_EVENT_DEQUEUED
;
1873 arg
->scan_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1878 ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd
*cmd
,
1879 struct scan_req_params
*param
)
1881 /* Scan events subscription */
1882 if (param
->scan_ev_started
)
1883 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_STARTED
;
1884 if (param
->scan_ev_completed
)
1885 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_COMPLETED
;
1886 if (param
->scan_ev_bss_chan
)
1887 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_BSS_CHANNEL
;
1888 if (param
->scan_ev_foreign_chan
)
1889 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN
;
1890 if (param
->scan_ev_dequeued
)
1891 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_DEQUEUED
;
1892 if (param
->scan_ev_preempted
)
1893 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_PREEMPTED
;
1894 if (param
->scan_ev_start_failed
)
1895 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_START_FAILED
;
1896 if (param
->scan_ev_restarted
)
1897 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESTARTED
;
1898 if (param
->scan_ev_foreign_chn_exit
)
1899 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
;
1900 if (param
->scan_ev_suspended
)
1901 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_SUSPENDED
;
1902 if (param
->scan_ev_resumed
)
1903 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESUMED
;
1905 /** Set scan control flags */
1906 cmd
->scan_ctrl_flags
= 0;
1907 if (param
->scan_f_passive
)
1908 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_PASSIVE
;
1909 if (param
->scan_f_strict_passive_pch
)
1910 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN
;
1911 if (param
->scan_f_promisc_mode
)
1912 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROMISCUOS
;
1913 if (param
->scan_f_capture_phy_err
)
1914 cmd
->scan_ctrl_flags
|= WMI_SCAN_CAPTURE_PHY_ERROR
;
1915 if (param
->scan_f_half_rate
)
1916 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_HALF_RATE_SUPPORT
;
1917 if (param
->scan_f_quarter_rate
)
1918 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT
;
1919 if (param
->scan_f_cck_rates
)
1920 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_CCK_RATES
;
1921 if (param
->scan_f_ofdm_rates
)
1922 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_OFDM_RATES
;
1923 if (param
->scan_f_chan_stat_evnt
)
1924 cmd
->scan_ctrl_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1925 if (param
->scan_f_filter_prb_req
)
1926 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROBE_REQ
;
1927 if (param
->scan_f_bcast_probe
)
1928 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_BCAST_PROBE_REQ
;
1929 if (param
->scan_f_offchan_mgmt_tx
)
1930 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_MGMT_TX
;
1931 if (param
->scan_f_offchan_data_tx
)
1932 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_DATA_TX
;
1933 if (param
->scan_f_force_active_dfs_chn
)
1934 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS
;
1935 if (param
->scan_f_add_tpc_ie_in_probe
)
1936 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ
;
1937 if (param
->scan_f_add_ds_ie_in_probe
)
1938 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ
;
1939 if (param
->scan_f_add_spoofed_mac_in_probe
)
1940 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ
;
1941 if (param
->scan_f_add_rand_seq_in_probe
)
1942 cmd
->scan_ctrl_flags
|= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ
;
1943 if (param
->scan_f_en_ie_whitelist_in_probe
)
1944 cmd
->scan_ctrl_flags
|=
1945 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ
;
1947 /* for adaptive scan mode using 3 bits (21 - 23 bits) */
1948 WMI_SCAN_SET_DWELL_MODE(cmd
->scan_ctrl_flags
,
1949 param
->adaptive_dwell_time_mode
);
1952 int ath11k_wmi_send_scan_start_cmd(struct ath11k
*ar
,
1953 struct scan_req_params
*params
)
1955 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1956 struct wmi_start_scan_cmd
*cmd
;
1957 struct wmi_ssid
*ssid
= NULL
;
1958 struct wmi_mac_addr
*bssid
;
1959 struct sk_buff
*skb
;
1960 struct wmi_tlv
*tlv
;
1964 u8 extraie_len_with_pad
= 0;
1968 len
+= TLV_HDR_SIZE
;
1969 if (params
->num_chan
)
1970 len
+= params
->num_chan
* sizeof(u32
);
1972 len
+= TLV_HDR_SIZE
;
1973 if (params
->num_ssids
)
1974 len
+= params
->num_ssids
* sizeof(*ssid
);
1976 len
+= TLV_HDR_SIZE
;
1977 if (params
->num_bssid
)
1978 len
+= sizeof(*bssid
) * params
->num_bssid
;
1980 len
+= TLV_HDR_SIZE
;
1981 if (params
->extraie
.len
)
1982 extraie_len_with_pad
=
1983 roundup(params
->extraie
.len
, sizeof(u32
));
1984 len
+= extraie_len_with_pad
;
1986 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1993 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_START_SCAN_CMD
) |
1994 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1996 cmd
->scan_id
= params
->scan_id
;
1997 cmd
->scan_req_id
= params
->scan_req_id
;
1998 cmd
->vdev_id
= params
->vdev_id
;
1999 cmd
->scan_priority
= params
->scan_priority
;
2000 cmd
->notify_scan_events
= params
->notify_scan_events
;
2002 ath11k_wmi_copy_scan_event_cntrl_flags(cmd
, params
);
2004 cmd
->dwell_time_active
= params
->dwell_time_active
;
2005 cmd
->dwell_time_active_2g
= params
->dwell_time_active_2g
;
2006 cmd
->dwell_time_passive
= params
->dwell_time_passive
;
2007 cmd
->min_rest_time
= params
->min_rest_time
;
2008 cmd
->max_rest_time
= params
->max_rest_time
;
2009 cmd
->repeat_probe_time
= params
->repeat_probe_time
;
2010 cmd
->probe_spacing_time
= params
->probe_spacing_time
;
2011 cmd
->idle_time
= params
->idle_time
;
2012 cmd
->max_scan_time
= params
->max_scan_time
;
2013 cmd
->probe_delay
= params
->probe_delay
;
2014 cmd
->burst_duration
= params
->burst_duration
;
2015 cmd
->num_chan
= params
->num_chan
;
2016 cmd
->num_bssid
= params
->num_bssid
;
2017 cmd
->num_ssids
= params
->num_ssids
;
2018 cmd
->ie_len
= params
->extraie
.len
;
2019 cmd
->n_probes
= params
->n_probes
;
2021 ptr
+= sizeof(*cmd
);
2023 len
= params
->num_chan
* sizeof(u32
);
2026 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
2027 FIELD_PREP(WMI_TLV_LEN
, len
);
2028 ptr
+= TLV_HDR_SIZE
;
2029 tmp_ptr
= (u32
*)ptr
;
2031 for (i
= 0; i
< params
->num_chan
; ++i
)
2032 tmp_ptr
[i
] = params
->chan_list
[i
];
2036 len
= params
->num_ssids
* sizeof(*ssid
);
2038 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2039 FIELD_PREP(WMI_TLV_LEN
, len
);
2041 ptr
+= TLV_HDR_SIZE
;
2043 if (params
->num_ssids
) {
2045 for (i
= 0; i
< params
->num_ssids
; ++i
) {
2046 ssid
->ssid_len
= params
->ssid
[i
].length
;
2047 memcpy(ssid
->ssid
, params
->ssid
[i
].ssid
,
2048 params
->ssid
[i
].length
);
2053 ptr
+= (params
->num_ssids
* sizeof(*ssid
));
2054 len
= params
->num_bssid
* sizeof(*bssid
);
2056 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2057 FIELD_PREP(WMI_TLV_LEN
, len
);
2059 ptr
+= TLV_HDR_SIZE
;
2062 if (params
->num_bssid
) {
2063 for (i
= 0; i
< params
->num_bssid
; ++i
) {
2064 ether_addr_copy(bssid
->addr
,
2065 params
->bssid_list
[i
].addr
);
2070 ptr
+= params
->num_bssid
* sizeof(*bssid
);
2072 len
= extraie_len_with_pad
;
2074 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
2075 FIELD_PREP(WMI_TLV_LEN
, len
);
2076 ptr
+= TLV_HDR_SIZE
;
2078 if (params
->extraie
.len
)
2079 memcpy(ptr
, params
->extraie
.ptr
,
2080 params
->extraie
.len
);
2082 ptr
+= extraie_len_with_pad
;
2084 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2085 WMI_START_SCAN_CMDID
);
2087 ath11k_warn(ar
->ab
, "failed to send WMI_START_SCAN_CMDID\n");
2094 int ath11k_wmi_send_scan_stop_cmd(struct ath11k
*ar
,
2095 struct scan_cancel_param
*param
)
2097 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2098 struct wmi_stop_scan_cmd
*cmd
;
2099 struct sk_buff
*skb
;
2102 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2106 cmd
= (struct wmi_stop_scan_cmd
*)skb
->data
;
2108 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STOP_SCAN_CMD
) |
2109 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2111 cmd
->vdev_id
= param
->vdev_id
;
2112 cmd
->requestor
= param
->requester
;
2113 cmd
->scan_id
= param
->scan_id
;
2114 cmd
->pdev_id
= param
->pdev_id
;
2115 /* stop the scan with the corresponding scan_id */
2116 if (param
->req_type
== WLAN_SCAN_CANCEL_PDEV_ALL
) {
2117 /* Cancelling all scans */
2118 cmd
->req_type
= WMI_SCAN_STOP_ALL
;
2119 } else if (param
->req_type
== WLAN_SCAN_CANCEL_VDEV_ALL
) {
2120 /* Cancelling VAP scans */
2121 cmd
->req_type
= WMI_SCN_STOP_VAP_ALL
;
2122 } else if (param
->req_type
== WLAN_SCAN_CANCEL_SINGLE
) {
2123 /* Cancelling specific scan */
2124 cmd
->req_type
= WMI_SCAN_STOP_ONE
;
2126 ath11k_warn(ar
->ab
, "invalid scan cancel param %d",
2132 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2133 WMI_STOP_SCAN_CMDID
);
2135 ath11k_warn(ar
->ab
, "failed to send WMI_STOP_SCAN_CMDID\n");
2142 int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k
*ar
,
2143 struct scan_chan_list_params
*chan_list
)
2145 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2146 struct wmi_scan_chan_list_cmd
*cmd
;
2147 struct sk_buff
*skb
;
2148 struct wmi_channel
*chan_info
;
2149 struct channel_param
*tchan_info
;
2150 struct wmi_tlv
*tlv
;
2155 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
2156 sizeof(*chan_info
) * chan_list
->nallchans
;
2158 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2162 cmd
= (struct wmi_scan_chan_list_cmd
*)skb
->data
;
2163 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_SCAN_CHAN_LIST_CMD
) |
2164 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2166 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2167 "WMI no.of chan = %d len = %d\n", chan_list
->nallchans
, len
);
2168 cmd
->pdev_id
= chan_list
->pdev_id
;
2169 cmd
->num_scan_chans
= chan_list
->nallchans
;
2171 ptr
= skb
->data
+ sizeof(*cmd
);
2173 len
= sizeof(*chan_info
) * chan_list
->nallchans
;
2175 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2176 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2177 ptr
+= TLV_HDR_SIZE
;
2179 tchan_info
= &chan_list
->ch_param
[0];
2181 for (i
= 0; i
< chan_list
->nallchans
; ++i
) {
2183 memset(chan_info
, 0, sizeof(*chan_info
));
2184 len
= sizeof(*chan_info
);
2185 chan_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2187 FIELD_PREP(WMI_TLV_LEN
,
2188 len
- TLV_HDR_SIZE
);
2190 reg1
= &chan_info
->reg_info_1
;
2191 reg2
= &chan_info
->reg_info_2
;
2192 chan_info
->mhz
= tchan_info
->mhz
;
2193 chan_info
->band_center_freq1
= tchan_info
->cfreq1
;
2194 chan_info
->band_center_freq2
= tchan_info
->cfreq2
;
2196 if (tchan_info
->is_chan_passive
)
2197 chan_info
->info
|= WMI_CHAN_INFO_PASSIVE
;
2198 if (tchan_info
->allow_he
)
2199 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
2200 else if (tchan_info
->allow_vht
)
2201 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
2202 else if (tchan_info
->allow_ht
)
2203 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
2204 if (tchan_info
->half_rate
)
2205 chan_info
->info
|= WMI_CHAN_INFO_HALF_RATE
;
2206 if (tchan_info
->quarter_rate
)
2207 chan_info
->info
|= WMI_CHAN_INFO_QUARTER_RATE
;
2209 chan_info
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
,
2210 tchan_info
->phy_mode
);
2211 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR
,
2212 tchan_info
->minpower
);
2213 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
2214 tchan_info
->maxpower
);
2215 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
2216 tchan_info
->maxregpower
);
2217 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS
,
2218 tchan_info
->reg_class_id
);
2219 *reg2
|= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
2220 tchan_info
->antennamax
);
2222 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2223 "WMI chan scan list chan[%d] = %u\n",
2226 ptr
+= sizeof(*chan_info
);
2231 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_SCAN_CHAN_LIST_CMDID
);
2233 ath11k_warn(ar
->ab
, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2240 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k
*ar
, u32 vdev_id
,
2241 struct wmi_wmm_params_all_arg
*param
)
2243 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2244 struct wmi_vdev_set_wmm_params_cmd
*cmd
;
2245 struct wmi_wmm_params
*wmm_param
;
2246 struct wmi_wmm_params_arg
*wmi_wmm_arg
;
2247 struct sk_buff
*skb
;
2250 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2254 cmd
= (struct wmi_vdev_set_wmm_params_cmd
*)skb
->data
;
2255 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2256 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2257 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2259 cmd
->vdev_id
= vdev_id
;
2260 cmd
->wmm_param_type
= 0;
2262 for (ac
= 0; ac
< WME_NUM_AC
; ac
++) {
2265 wmi_wmm_arg
= ¶m
->ac_be
;
2268 wmi_wmm_arg
= ¶m
->ac_bk
;
2271 wmi_wmm_arg
= ¶m
->ac_vi
;
2274 wmi_wmm_arg
= ¶m
->ac_vo
;
2278 wmm_param
= (struct wmi_wmm_params
*)&cmd
->wmm_params
[ac
];
2279 wmm_param
->tlv_header
=
2280 FIELD_PREP(WMI_TLV_TAG
,
2281 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2282 FIELD_PREP(WMI_TLV_LEN
,
2283 sizeof(*wmm_param
) - TLV_HDR_SIZE
);
2285 wmm_param
->aifs
= wmi_wmm_arg
->aifs
;
2286 wmm_param
->cwmin
= wmi_wmm_arg
->cwmin
;
2287 wmm_param
->cwmax
= wmi_wmm_arg
->cwmax
;
2288 wmm_param
->txoplimit
= wmi_wmm_arg
->txop
;
2289 wmm_param
->acm
= wmi_wmm_arg
->acm
;
2290 wmm_param
->no_ack
= wmi_wmm_arg
->no_ack
;
2292 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2293 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2294 ac
, wmm_param
->aifs
, wmm_param
->cwmin
,
2295 wmm_param
->cwmax
, wmm_param
->txoplimit
,
2296 wmm_param
->acm
, wmm_param
->no_ack
);
2298 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2299 WMI_VDEV_SET_WMM_PARAMS_CMDID
);
2302 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2309 int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k
*ar
,
2312 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2313 struct wmi_dfs_phyerr_offload_cmd
*cmd
;
2314 struct sk_buff
*skb
;
2317 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2321 cmd
= (struct wmi_dfs_phyerr_offload_cmd
*)skb
->data
;
2323 FIELD_PREP(WMI_TLV_TAG
,
2324 WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD
) |
2325 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2327 cmd
->pdev_id
= pdev_id
;
2329 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2330 "WMI dfs phy err offload enable pdev id %d\n", pdev_id
);
2332 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2333 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID
);
2336 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2343 int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k
*ar
, u8
*addr
, u8 enable
)
2345 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2346 struct wmi_pdev_pktlog_filter_cmd
*cmd
;
2347 struct wmi_pdev_pktlog_filter_info
*info
;
2348 struct sk_buff
*skb
;
2349 struct wmi_tlv
*tlv
;
2353 len
= sizeof(*cmd
) + sizeof(*info
) + TLV_HDR_SIZE
;
2354 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2358 cmd
= (struct wmi_pdev_pktlog_filter_cmd
*)skb
->data
;
2360 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD
) |
2361 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2363 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2365 cmd
->enable
= enable
;
2367 ptr
= skb
->data
+ sizeof(*cmd
);
2370 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2371 FIELD_PREP(WMI_TLV_LEN
, sizeof(*info
));
2373 ptr
+= TLV_HDR_SIZE
;
2376 ether_addr_copy(info
->peer_macaddr
.addr
, addr
);
2377 info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO
) |
2378 FIELD_PREP(WMI_TLV_LEN
,
2379 sizeof(*info
) - TLV_HDR_SIZE
);
2381 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2382 WMI_PDEV_PKTLOG_FILTER_CMDID
);
2384 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2392 ath11k_wmi_send_init_country_cmd(struct ath11k
*ar
,
2393 struct wmi_init_country_params init_cc_params
)
2395 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2396 struct wmi_init_country_cmd
*cmd
;
2397 struct sk_buff
*skb
;
2400 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2404 cmd
= (struct wmi_init_country_cmd
*)skb
->data
;
2406 FIELD_PREP(WMI_TLV_TAG
,
2407 WMI_TAG_SET_INIT_COUNTRY_CMD
) |
2408 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2410 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2412 switch (init_cc_params
.flags
) {
2414 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_ALPHA
;
2415 memcpy((u8
*)&cmd
->cc_info
.alpha2
,
2416 init_cc_params
.cc_info
.alpha2
, 3);
2419 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE
;
2420 cmd
->cc_info
.country_code
= init_cc_params
.cc_info
.country_code
;
2423 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_REGDOMAIN
;
2424 cmd
->cc_info
.regdom_id
= init_cc_params
.cc_info
.regdom_id
;
2431 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2432 WMI_SET_INIT_COUNTRY_CMDID
);
2437 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2445 int ath11k_wmi_pdev_pktlog_enable(struct ath11k
*ar
, u32 pktlog_filter
)
2447 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2448 struct wmi_pktlog_enable_cmd
*cmd
;
2449 struct sk_buff
*skb
;
2452 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2456 cmd
= (struct wmi_pktlog_enable_cmd
*)skb
->data
;
2458 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD
) |
2459 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2461 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2462 cmd
->evlist
= pktlog_filter
;
2463 cmd
->enable
= ATH11K_WMI_PKTLOG_ENABLE_FORCE
;
2465 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2466 WMI_PDEV_PKTLOG_ENABLE_CMDID
);
2468 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2475 int ath11k_wmi_pdev_pktlog_disable(struct ath11k
*ar
)
2477 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2478 struct wmi_pktlog_disable_cmd
*cmd
;
2479 struct sk_buff
*skb
;
2482 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2486 cmd
= (struct wmi_pktlog_disable_cmd
*)skb
->data
;
2488 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD
) |
2489 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2491 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2493 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2494 WMI_PDEV_PKTLOG_DISABLE_CMDID
);
2496 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2504 ath11k_wmi_send_twt_enable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2506 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2507 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2508 struct wmi_twt_enable_params_cmd
*cmd
;
2509 struct sk_buff
*skb
;
2514 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2518 cmd
= (struct wmi_twt_enable_params_cmd
*)skb
->data
;
2519 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_ENABLE_CMD
) |
2520 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2521 cmd
->pdev_id
= pdev_id
;
2522 cmd
->sta_cong_timer_ms
= ATH11K_TWT_DEF_STA_CONG_TIMER_MS
;
2523 cmd
->default_slot_size
= ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE
;
2524 cmd
->congestion_thresh_setup
= ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP
;
2525 cmd
->congestion_thresh_teardown
=
2526 ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN
;
2527 cmd
->congestion_thresh_critical
=
2528 ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL
;
2529 cmd
->interference_thresh_teardown
=
2530 ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN
;
2531 cmd
->interference_thresh_setup
=
2532 ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP
;
2533 cmd
->min_no_sta_setup
= ATH11K_TWT_DEF_MIN_NO_STA_SETUP
;
2534 cmd
->min_no_sta_teardown
= ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN
;
2535 cmd
->no_of_bcast_mcast_slots
= ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS
;
2536 cmd
->min_no_twt_slots
= ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS
;
2537 cmd
->max_no_sta_twt
= ATH11K_TWT_DEF_MAX_NO_STA_TWT
;
2538 cmd
->mode_check_interval
= ATH11K_TWT_DEF_MODE_CHECK_INTERVAL
;
2539 cmd
->add_sta_slot_interval
= ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL
;
2540 cmd
->remove_sta_slot_interval
=
2541 ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL
;
2542 /* TODO add MBSSID support */
2543 cmd
->mbss_support
= 0;
2545 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2546 WMI_TWT_ENABLE_CMDID
);
2548 ath11k_warn(ab
, "Failed to send WMI_TWT_ENABLE_CMDID");
2555 ath11k_wmi_send_twt_disable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2557 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2558 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2559 struct wmi_twt_disable_params_cmd
*cmd
;
2560 struct sk_buff
*skb
;
2565 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2569 cmd
= (struct wmi_twt_disable_params_cmd
*)skb
->data
;
2570 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_DISABLE_CMD
) |
2571 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2572 cmd
->pdev_id
= pdev_id
;
2574 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2575 WMI_TWT_DISABLE_CMDID
);
2577 ath11k_warn(ab
, "Failed to send WMI_TWT_DISABLE_CMDID");
2584 ath11k_wmi_send_obss_spr_cmd(struct ath11k
*ar
, u32 vdev_id
,
2585 struct ieee80211_he_obss_pd
*he_obss_pd
)
2587 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2588 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2589 struct wmi_obss_spatial_reuse_params_cmd
*cmd
;
2590 struct sk_buff
*skb
;
2595 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2599 cmd
= (struct wmi_obss_spatial_reuse_params_cmd
*)skb
->data
;
2600 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2601 WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD
) |
2602 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2603 cmd
->vdev_id
= vdev_id
;
2604 cmd
->enable
= he_obss_pd
->enable
;
2605 cmd
->obss_min
= he_obss_pd
->min_offset
;
2606 cmd
->obss_max
= he_obss_pd
->max_offset
;
2608 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2609 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID
);
2612 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2619 ath11k_fill_band_to_mac_param(struct ath11k_base
*soc
,
2620 struct wmi_host_pdev_band_to_mac
*band_to_mac
)
2623 struct ath11k_hal_reg_capabilities_ext
*hal_reg_cap
;
2624 struct ath11k_pdev
*pdev
;
2626 for (i
= 0; i
< soc
->num_radios
; i
++) {
2627 pdev
= &soc
->pdevs
[i
];
2628 hal_reg_cap
= &soc
->hal_reg_cap
[i
];
2629 band_to_mac
[i
].pdev_id
= pdev
->pdev_id
;
2631 switch (pdev
->cap
.supported_bands
) {
2632 case WMI_HOST_WLAN_2G_5G_CAP
:
2633 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
2634 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
2636 case WMI_HOST_WLAN_2G_CAP
:
2637 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
2638 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_2ghz_chan
;
2640 case WMI_HOST_WLAN_5G_CAP
:
2641 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_5ghz_chan
;
2642 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
2651 ath11k_wmi_copy_resource_config(struct wmi_resource_config
*wmi_cfg
,
2652 struct target_resource_config
*tg_cfg
)
2654 wmi_cfg
->num_vdevs
= tg_cfg
->num_vdevs
;
2655 wmi_cfg
->num_peers
= tg_cfg
->num_peers
;
2656 wmi_cfg
->num_offload_peers
= tg_cfg
->num_offload_peers
;
2657 wmi_cfg
->num_offload_reorder_buffs
= tg_cfg
->num_offload_reorder_buffs
;
2658 wmi_cfg
->num_peer_keys
= tg_cfg
->num_peer_keys
;
2659 wmi_cfg
->num_tids
= tg_cfg
->num_tids
;
2660 wmi_cfg
->ast_skid_limit
= tg_cfg
->ast_skid_limit
;
2661 wmi_cfg
->tx_chain_mask
= tg_cfg
->tx_chain_mask
;
2662 wmi_cfg
->rx_chain_mask
= tg_cfg
->rx_chain_mask
;
2663 wmi_cfg
->rx_timeout_pri
[0] = tg_cfg
->rx_timeout_pri
[0];
2664 wmi_cfg
->rx_timeout_pri
[1] = tg_cfg
->rx_timeout_pri
[1];
2665 wmi_cfg
->rx_timeout_pri
[2] = tg_cfg
->rx_timeout_pri
[2];
2666 wmi_cfg
->rx_timeout_pri
[3] = tg_cfg
->rx_timeout_pri
[3];
2667 wmi_cfg
->rx_decap_mode
= tg_cfg
->rx_decap_mode
;
2668 wmi_cfg
->scan_max_pending_req
= tg_cfg
->scan_max_pending_req
;
2669 wmi_cfg
->bmiss_offload_max_vdev
= tg_cfg
->bmiss_offload_max_vdev
;
2670 wmi_cfg
->roam_offload_max_vdev
= tg_cfg
->roam_offload_max_vdev
;
2671 wmi_cfg
->roam_offload_max_ap_profiles
=
2672 tg_cfg
->roam_offload_max_ap_profiles
;
2673 wmi_cfg
->num_mcast_groups
= tg_cfg
->num_mcast_groups
;
2674 wmi_cfg
->num_mcast_table_elems
= tg_cfg
->num_mcast_table_elems
;
2675 wmi_cfg
->mcast2ucast_mode
= tg_cfg
->mcast2ucast_mode
;
2676 wmi_cfg
->tx_dbg_log_size
= tg_cfg
->tx_dbg_log_size
;
2677 wmi_cfg
->num_wds_entries
= tg_cfg
->num_wds_entries
;
2678 wmi_cfg
->dma_burst_size
= tg_cfg
->dma_burst_size
;
2679 wmi_cfg
->mac_aggr_delim
= tg_cfg
->mac_aggr_delim
;
2680 wmi_cfg
->rx_skip_defrag_timeout_dup_detection_check
=
2681 tg_cfg
->rx_skip_defrag_timeout_dup_detection_check
;
2682 wmi_cfg
->vow_config
= tg_cfg
->vow_config
;
2683 wmi_cfg
->gtk_offload_max_vdev
= tg_cfg
->gtk_offload_max_vdev
;
2684 wmi_cfg
->num_msdu_desc
= tg_cfg
->num_msdu_desc
;
2685 wmi_cfg
->max_frag_entries
= tg_cfg
->max_frag_entries
;
2686 wmi_cfg
->num_tdls_vdevs
= tg_cfg
->num_tdls_vdevs
;
2687 wmi_cfg
->num_tdls_conn_table_entries
=
2688 tg_cfg
->num_tdls_conn_table_entries
;
2689 wmi_cfg
->beacon_tx_offload_max_vdev
=
2690 tg_cfg
->beacon_tx_offload_max_vdev
;
2691 wmi_cfg
->num_multicast_filter_entries
=
2692 tg_cfg
->num_multicast_filter_entries
;
2693 wmi_cfg
->num_wow_filters
= tg_cfg
->num_wow_filters
;
2694 wmi_cfg
->num_keep_alive_pattern
= tg_cfg
->num_keep_alive_pattern
;
2695 wmi_cfg
->keep_alive_pattern_size
= tg_cfg
->keep_alive_pattern_size
;
2696 wmi_cfg
->max_tdls_concurrent_sleep_sta
=
2697 tg_cfg
->max_tdls_concurrent_sleep_sta
;
2698 wmi_cfg
->max_tdls_concurrent_buffer_sta
=
2699 tg_cfg
->max_tdls_concurrent_buffer_sta
;
2700 wmi_cfg
->wmi_send_separate
= tg_cfg
->wmi_send_separate
;
2701 wmi_cfg
->num_ocb_vdevs
= tg_cfg
->num_ocb_vdevs
;
2702 wmi_cfg
->num_ocb_channels
= tg_cfg
->num_ocb_channels
;
2703 wmi_cfg
->num_ocb_schedules
= tg_cfg
->num_ocb_schedules
;
2704 wmi_cfg
->bpf_instruction_size
= tg_cfg
->bpf_instruction_size
;
2705 wmi_cfg
->max_bssid_rx_filters
= tg_cfg
->max_bssid_rx_filters
;
2706 wmi_cfg
->use_pdev_id
= tg_cfg
->use_pdev_id
;
2707 wmi_cfg
->flag1
= tg_cfg
->atf_config
;
2708 wmi_cfg
->peer_map_unmap_v2_support
= tg_cfg
->peer_map_unmap_v2_support
;
2709 wmi_cfg
->sched_params
= tg_cfg
->sched_params
;
2710 wmi_cfg
->twt_ap_pdev_count
= tg_cfg
->twt_ap_pdev_count
;
2711 wmi_cfg
->twt_ap_sta_count
= tg_cfg
->twt_ap_sta_count
;
2714 static int ath11k_init_cmd_send(struct ath11k_pdev_wmi
*wmi
,
2715 struct wmi_init_cmd_param
*param
)
2717 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2718 struct sk_buff
*skb
;
2719 struct wmi_init_cmd
*cmd
;
2720 struct wmi_resource_config
*cfg
;
2721 struct wmi_pdev_set_hw_mode_cmd_param
*hw_mode
;
2722 struct wmi_pdev_band_to_mac
*band_to_mac
;
2723 struct wlan_host_mem_chunk
*host_mem_chunks
;
2724 struct wmi_tlv
*tlv
;
2727 u32 hw_mode_len
= 0;
2730 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
)
2731 hw_mode_len
= sizeof(*hw_mode
) + TLV_HDR_SIZE
+
2732 (param
->num_band_to_mac
* sizeof(*band_to_mac
));
2734 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ sizeof(*cfg
) + hw_mode_len
+
2735 (sizeof(*host_mem_chunks
) * WMI_MAX_MEM_REQS
);
2737 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2741 cmd
= (struct wmi_init_cmd
*)skb
->data
;
2743 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_INIT_CMD
) |
2744 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2746 ptr
= skb
->data
+ sizeof(*cmd
);
2749 ath11k_wmi_copy_resource_config(cfg
, param
->res_cfg
);
2751 cfg
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_RESOURCE_CONFIG
) |
2752 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cfg
) - TLV_HDR_SIZE
);
2754 ptr
+= sizeof(*cfg
);
2755 host_mem_chunks
= ptr
+ TLV_HDR_SIZE
;
2756 len
= sizeof(struct wlan_host_mem_chunk
);
2758 for (idx
= 0; idx
< param
->num_mem_chunks
; ++idx
) {
2759 host_mem_chunks
[idx
].tlv_header
=
2760 FIELD_PREP(WMI_TLV_TAG
,
2761 WMI_TAG_WLAN_HOST_MEMORY_CHUNK
) |
2762 FIELD_PREP(WMI_TLV_LEN
, len
);
2764 host_mem_chunks
[idx
].ptr
= param
->mem_chunks
[idx
].paddr
;
2765 host_mem_chunks
[idx
].size
= param
->mem_chunks
[idx
].len
;
2766 host_mem_chunks
[idx
].req_id
= param
->mem_chunks
[idx
].req_id
;
2768 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
2769 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
2770 param
->mem_chunks
[idx
].req_id
,
2771 (u64
)param
->mem_chunks
[idx
].paddr
,
2772 param
->mem_chunks
[idx
].len
);
2774 cmd
->num_host_mem_chunks
= param
->num_mem_chunks
;
2775 len
= sizeof(struct wlan_host_mem_chunk
) * param
->num_mem_chunks
;
2777 /* num_mem_chunks is zero */
2779 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2780 FIELD_PREP(WMI_TLV_LEN
, len
);
2781 ptr
+= TLV_HDR_SIZE
+ len
;
2783 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
) {
2784 hw_mode
= (struct wmi_pdev_set_hw_mode_cmd_param
*)ptr
;
2785 hw_mode
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2786 WMI_TAG_PDEV_SET_HW_MODE_CMD
) |
2787 FIELD_PREP(WMI_TLV_LEN
,
2788 sizeof(*hw_mode
) - TLV_HDR_SIZE
);
2790 hw_mode
->hw_mode_index
= param
->hw_mode_id
;
2791 hw_mode
->num_band_to_mac
= param
->num_band_to_mac
;
2793 ptr
+= sizeof(*hw_mode
);
2795 len
= param
->num_band_to_mac
* sizeof(*band_to_mac
);
2797 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2798 FIELD_PREP(WMI_TLV_LEN
, len
);
2800 ptr
+= TLV_HDR_SIZE
;
2801 len
= sizeof(*band_to_mac
);
2803 for (idx
= 0; idx
< param
->num_band_to_mac
; idx
++) {
2804 band_to_mac
= (void *)ptr
;
2806 band_to_mac
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2807 WMI_TAG_PDEV_BAND_TO_MAC
) |
2808 FIELD_PREP(WMI_TLV_LEN
,
2809 len
- TLV_HDR_SIZE
);
2810 band_to_mac
->pdev_id
= param
->band_to_mac
[idx
].pdev_id
;
2811 band_to_mac
->start_freq
=
2812 param
->band_to_mac
[idx
].start_freq
;
2813 band_to_mac
->end_freq
=
2814 param
->band_to_mac
[idx
].end_freq
;
2815 ptr
+= sizeof(*band_to_mac
);
2819 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_INIT_CMDID
);
2821 ath11k_warn(ab
, "failed to send WMI_INIT_CMDID\n");
2828 int ath11k_wmi_wait_for_service_ready(struct ath11k_base
*ab
)
2830 unsigned long time_left
;
2832 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.service_ready
,
2833 WMI_SERVICE_READY_TIMEOUT_HZ
);
2840 int ath11k_wmi_wait_for_unified_ready(struct ath11k_base
*ab
)
2842 unsigned long time_left
;
2844 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.unified_ready
,
2845 WMI_SERVICE_READY_TIMEOUT_HZ
);
2852 int ath11k_wmi_cmd_init(struct ath11k_base
*ab
)
2854 struct ath11k_wmi_base
*wmi_sc
= &ab
->wmi_ab
;
2855 struct wmi_init_cmd_param init_param
;
2856 struct target_resource_config config
;
2858 memset(&init_param
, 0, sizeof(init_param
));
2859 memset(&config
, 0, sizeof(config
));
2861 config
.num_vdevs
= ab
->num_radios
* TARGET_NUM_VDEVS
;
2863 if (ab
->num_radios
== 2) {
2864 config
.num_peers
= TARGET_NUM_PEERS(DBS
);
2865 config
.num_tids
= TARGET_NUM_TIDS(DBS
);
2866 } else if (ab
->num_radios
== 3) {
2867 config
.num_peers
= TARGET_NUM_PEERS(DBS_SBS
);
2868 config
.num_tids
= TARGET_NUM_TIDS(DBS_SBS
);
2870 /* Control should not reach here */
2871 config
.num_peers
= TARGET_NUM_PEERS(SINGLE
);
2872 config
.num_tids
= TARGET_NUM_TIDS(SINGLE
);
2874 config
.num_offload_peers
= TARGET_NUM_OFFLD_PEERS
;
2875 config
.num_offload_reorder_buffs
= TARGET_NUM_OFFLD_REORDER_BUFFS
;
2876 config
.num_peer_keys
= TARGET_NUM_PEER_KEYS
;
2877 config
.ast_skid_limit
= TARGET_AST_SKID_LIMIT
;
2878 config
.tx_chain_mask
= (1 << ab
->target_caps
.num_rf_chains
) - 1;
2879 config
.rx_chain_mask
= (1 << ab
->target_caps
.num_rf_chains
) - 1;
2880 config
.rx_timeout_pri
[0] = TARGET_RX_TIMEOUT_LO_PRI
;
2881 config
.rx_timeout_pri
[1] = TARGET_RX_TIMEOUT_LO_PRI
;
2882 config
.rx_timeout_pri
[2] = TARGET_RX_TIMEOUT_LO_PRI
;
2883 config
.rx_timeout_pri
[3] = TARGET_RX_TIMEOUT_HI_PRI
;
2884 config
.rx_decap_mode
= TARGET_DECAP_MODE_NATIVE_WIFI
;
2885 config
.scan_max_pending_req
= TARGET_SCAN_MAX_PENDING_REQS
;
2886 config
.bmiss_offload_max_vdev
= TARGET_BMISS_OFFLOAD_MAX_VDEV
;
2887 config
.roam_offload_max_vdev
= TARGET_ROAM_OFFLOAD_MAX_VDEV
;
2888 config
.roam_offload_max_ap_profiles
= TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES
;
2889 config
.num_mcast_groups
= TARGET_NUM_MCAST_GROUPS
;
2890 config
.num_mcast_table_elems
= TARGET_NUM_MCAST_TABLE_ELEMS
;
2891 config
.mcast2ucast_mode
= TARGET_MCAST2UCAST_MODE
;
2892 config
.tx_dbg_log_size
= TARGET_TX_DBG_LOG_SIZE
;
2893 config
.num_wds_entries
= TARGET_NUM_WDS_ENTRIES
;
2894 config
.dma_burst_size
= TARGET_DMA_BURST_SIZE
;
2895 config
.rx_skip_defrag_timeout_dup_detection_check
=
2896 TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
;
2897 config
.vow_config
= TARGET_VOW_CONFIG
;
2898 config
.gtk_offload_max_vdev
= TARGET_GTK_OFFLOAD_MAX_VDEV
;
2899 config
.num_msdu_desc
= TARGET_NUM_MSDU_DESC
;
2900 config
.beacon_tx_offload_max_vdev
= ab
->num_radios
* TARGET_MAX_BCN_OFFLD
;
2901 config
.rx_batchmode
= TARGET_RX_BATCHMODE
;
2902 config
.peer_map_unmap_v2_support
= 1;
2903 config
.twt_ap_pdev_count
= 2;
2904 config
.twt_ap_sta_count
= 1000;
2906 memcpy(&wmi_sc
->wlan_resource_config
, &config
, sizeof(config
));
2908 init_param
.res_cfg
= &wmi_sc
->wlan_resource_config
;
2909 init_param
.num_mem_chunks
= wmi_sc
->num_mem_chunks
;
2910 init_param
.hw_mode_id
= wmi_sc
->preferred_hw_mode
;
2911 init_param
.mem_chunks
= wmi_sc
->mem_chunks
;
2913 if (wmi_sc
->preferred_hw_mode
== WMI_HOST_HW_MODE_SINGLE
)
2914 init_param
.hw_mode_id
= WMI_HOST_HW_MODE_MAX
;
2916 init_param
.num_band_to_mac
= ab
->num_radios
;
2918 ath11k_fill_band_to_mac_param(ab
, init_param
.band_to_mac
);
2920 return ath11k_init_cmd_send(&wmi_sc
->wmi
[0], &init_param
);
2923 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base
*soc
,
2925 const void *ptr
, void *data
)
2927 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
2928 struct wmi_hw_mode_capabilities
*hw_mode_cap
;
2931 if (tag
!= WMI_TAG_HW_MODE_CAPABILITIES
)
2934 if (svc_rdy_ext
->n_hw_mode_caps
>= svc_rdy_ext
->param
.num_hw_modes
)
2937 hw_mode_cap
= container_of(ptr
, struct wmi_hw_mode_capabilities
,
2939 svc_rdy_ext
->n_hw_mode_caps
++;
2941 phy_map
= hw_mode_cap
->phy_id_map
;
2943 svc_rdy_ext
->tot_phy_id
++;
2944 phy_map
= phy_map
>> 1;
2950 static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base
*soc
,
2951 u16 len
, const void *ptr
, void *data
)
2953 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
2954 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
2955 enum wmi_host_hw_mode_config_type mode
, pref
;
2959 svc_rdy_ext
->n_hw_mode_caps
= 0;
2960 svc_rdy_ext
->hw_mode_caps
= (struct wmi_hw_mode_capabilities
*)ptr
;
2962 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
2963 ath11k_wmi_tlv_hw_mode_caps_parse
,
2966 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
2971 while (i
< svc_rdy_ext
->n_hw_mode_caps
) {
2972 hw_mode_caps
= &svc_rdy_ext
->hw_mode_caps
[i
];
2973 mode
= hw_mode_caps
->hw_mode_id
;
2974 pref
= soc
->wmi_ab
.preferred_hw_mode
;
2976 if (ath11k_hw_mode_pri_map
[mode
] < ath11k_hw_mode_pri_map
[pref
]) {
2977 svc_rdy_ext
->pref_hw_mode_caps
= *hw_mode_caps
;
2978 soc
->wmi_ab
.preferred_hw_mode
= mode
;
2983 if (soc
->wmi_ab
.preferred_hw_mode
== WMI_HOST_HW_MODE_MAX
)
2989 static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base
*soc
,
2991 const void *ptr
, void *data
)
2993 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
2995 if (tag
!= WMI_TAG_MAC_PHY_CAPABILITIES
)
2998 if (svc_rdy_ext
->n_mac_phy_caps
>= svc_rdy_ext
->tot_phy_id
)
3001 len
= min_t(u16
, len
, sizeof(struct wmi_mac_phy_capabilities
));
3002 if (!svc_rdy_ext
->n_mac_phy_caps
) {
3003 svc_rdy_ext
->mac_phy_caps
= kzalloc((svc_rdy_ext
->tot_phy_id
) * len
,
3005 if (!svc_rdy_ext
->mac_phy_caps
)
3009 memcpy(svc_rdy_ext
->mac_phy_caps
+ svc_rdy_ext
->n_mac_phy_caps
, ptr
, len
);
3010 svc_rdy_ext
->n_mac_phy_caps
++;
3014 static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base
*soc
,
3016 const void *ptr
, void *data
)
3018 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3020 if (tag
!= WMI_TAG_HAL_REG_CAPABILITIES_EXT
)
3023 if (svc_rdy_ext
->n_ext_hal_reg_caps
>= svc_rdy_ext
->param
.num_phy
)
3026 svc_rdy_ext
->n_ext_hal_reg_caps
++;
3030 static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base
*soc
,
3031 u16 len
, const void *ptr
, void *data
)
3033 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3034 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3035 struct ath11k_hal_reg_capabilities_ext reg_cap
;
3039 svc_rdy_ext
->n_ext_hal_reg_caps
= 0;
3040 svc_rdy_ext
->ext_hal_reg_caps
= (struct wmi_hal_reg_capabilities_ext
*)ptr
;
3041 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3042 ath11k_wmi_tlv_ext_hal_reg_caps_parse
,
3045 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3049 for (i
= 0; i
< svc_rdy_ext
->param
.num_phy
; i
++) {
3050 ret
= ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle
,
3051 svc_rdy_ext
->soc_hal_reg_caps
,
3052 svc_rdy_ext
->ext_hal_reg_caps
, i
,
3055 ath11k_warn(soc
, "failed to extract reg cap %d\n", i
);
3059 memcpy(&soc
->hal_reg_cap
[reg_cap
.phy_id
],
3060 ®_cap
, sizeof(reg_cap
));
3065 static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base
*soc
,
3066 u16 len
, const void *ptr
,
3069 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3070 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3071 u8 hw_mode_id
= svc_rdy_ext
->pref_hw_mode_caps
.hw_mode_id
;
3075 svc_rdy_ext
->soc_hal_reg_caps
= (struct wmi_soc_hal_reg_capabilities
*)ptr
;
3076 svc_rdy_ext
->param
.num_phy
= svc_rdy_ext
->soc_hal_reg_caps
->num_phy
;
3078 soc
->num_radios
= 0;
3079 phy_id_map
= svc_rdy_ext
->pref_hw_mode_caps
.phy_id_map
;
3081 while (phy_id_map
&& soc
->num_radios
< MAX_RADIOS
) {
3082 ret
= ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle
,
3083 svc_rdy_ext
->hw_caps
,
3084 svc_rdy_ext
->hw_mode_caps
,
3085 svc_rdy_ext
->soc_hal_reg_caps
,
3086 svc_rdy_ext
->mac_phy_caps
,
3087 hw_mode_id
, soc
->num_radios
,
3088 &soc
->pdevs
[soc
->num_radios
]);
3090 ath11k_warn(soc
, "failed to extract mac caps, idx :%d\n",
3097 /* TODO: mac_phy_cap prints */
3103 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base
*ab
,
3105 const void *ptr
, void *data
)
3107 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
3108 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3112 case WMI_TAG_SERVICE_READY_EXT_EVENT
:
3113 ret
= ath11k_pull_svc_ready_ext(wmi_handle
, ptr
,
3114 &svc_rdy_ext
->param
);
3116 ath11k_warn(ab
, "unable to extract ext params\n");
3121 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
:
3122 svc_rdy_ext
->hw_caps
= (struct wmi_soc_mac_phy_hw_mode_caps
*)ptr
;
3123 svc_rdy_ext
->param
.num_hw_modes
= svc_rdy_ext
->hw_caps
->num_hw_modes
;
3126 case WMI_TAG_SOC_HAL_REG_CAPABILITIES
:
3127 ret
= ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab
, len
, ptr
,
3133 case WMI_TAG_ARRAY_STRUCT
:
3134 if (!svc_rdy_ext
->hw_mode_done
) {
3135 ret
= ath11k_wmi_tlv_hw_mode_caps(ab
, len
, ptr
,
3140 svc_rdy_ext
->hw_mode_done
= true;
3141 } else if (!svc_rdy_ext
->mac_phy_done
) {
3142 svc_rdy_ext
->n_mac_phy_caps
= 0;
3143 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3144 ath11k_wmi_tlv_mac_phy_caps_parse
,
3147 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
3151 svc_rdy_ext
->mac_phy_done
= true;
3152 } else if (!svc_rdy_ext
->ext_hal_reg_done
) {
3153 ret
= ath11k_wmi_tlv_ext_hal_reg_caps(ab
, len
, ptr
,
3158 svc_rdy_ext
->ext_hal_reg_done
= true;
3159 complete(&ab
->wmi_ab
.service_ready
);
3169 static int ath11k_service_ready_ext_event(struct ath11k_base
*ab
,
3170 struct sk_buff
*skb
)
3172 struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext
= { };
3175 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
3176 ath11k_wmi_tlv_svc_rdy_ext_parse
,
3179 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
3183 kfree(svc_rdy_ext
.mac_phy_caps
);
3187 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3188 struct wmi_vdev_start_resp_event
*vdev_rsp
)
3191 const struct wmi_vdev_start_resp_event
*ev
;
3194 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3197 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3201 ev
= tb
[WMI_TAG_VDEV_START_RESPONSE_EVENT
];
3203 ath11k_warn(ab
, "failed to fetch vdev start resp ev");
3208 memset(vdev_rsp
, 0, sizeof(*vdev_rsp
));
3210 vdev_rsp
->vdev_id
= ev
->vdev_id
;
3211 vdev_rsp
->requestor_id
= ev
->requestor_id
;
3212 vdev_rsp
->resp_type
= ev
->resp_type
;
3213 vdev_rsp
->status
= ev
->status
;
3214 vdev_rsp
->chain_mask
= ev
->chain_mask
;
3215 vdev_rsp
->smps_mode
= ev
->smps_mode
;
3216 vdev_rsp
->mac_id
= ev
->mac_id
;
3217 vdev_rsp
->cfgd_tx_streams
= ev
->cfgd_tx_streams
;
3218 vdev_rsp
->cfgd_rx_streams
= ev
->cfgd_rx_streams
;
3224 static struct cur_reg_rule
3225 *create_reg_rules_from_wmi(u32 num_reg_rules
,
3226 struct wmi_regulatory_rule_struct
*wmi_reg_rule
)
3228 struct cur_reg_rule
*reg_rule_ptr
;
3231 reg_rule_ptr
= kzalloc((num_reg_rules
* sizeof(*reg_rule_ptr
)),
3237 for (count
= 0; count
< num_reg_rules
; count
++) {
3238 reg_rule_ptr
[count
].start_freq
=
3239 FIELD_GET(REG_RULE_START_FREQ
,
3240 wmi_reg_rule
[count
].freq_info
);
3241 reg_rule_ptr
[count
].end_freq
=
3242 FIELD_GET(REG_RULE_END_FREQ
,
3243 wmi_reg_rule
[count
].freq_info
);
3244 reg_rule_ptr
[count
].max_bw
=
3245 FIELD_GET(REG_RULE_MAX_BW
,
3246 wmi_reg_rule
[count
].bw_pwr_info
);
3247 reg_rule_ptr
[count
].reg_power
=
3248 FIELD_GET(REG_RULE_REG_PWR
,
3249 wmi_reg_rule
[count
].bw_pwr_info
);
3250 reg_rule_ptr
[count
].ant_gain
=
3251 FIELD_GET(REG_RULE_ANT_GAIN
,
3252 wmi_reg_rule
[count
].bw_pwr_info
);
3253 reg_rule_ptr
[count
].flags
=
3254 FIELD_GET(REG_RULE_FLAGS
,
3255 wmi_reg_rule
[count
].flag_info
);
3258 return reg_rule_ptr
;
3261 static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base
*ab
,
3262 struct sk_buff
*skb
,
3263 struct cur_regulatory_info
*reg_info
)
3266 const struct wmi_reg_chan_list_cc_event
*chan_list_event_hdr
;
3267 struct wmi_regulatory_rule_struct
*wmi_reg_rule
;
3268 u32 num_2g_reg_rules
, num_5g_reg_rules
;
3271 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processing regulatory channel list\n");
3273 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3276 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3280 chan_list_event_hdr
= tb
[WMI_TAG_REG_CHAN_LIST_CC_EVENT
];
3281 if (!chan_list_event_hdr
) {
3282 ath11k_warn(ab
, "failed to fetch reg chan list update ev\n");
3287 reg_info
->num_2g_reg_rules
= chan_list_event_hdr
->num_2g_reg_rules
;
3288 reg_info
->num_5g_reg_rules
= chan_list_event_hdr
->num_5g_reg_rules
;
3290 if (!(reg_info
->num_2g_reg_rules
+ reg_info
->num_5g_reg_rules
)) {
3291 ath11k_warn(ab
, "No regulatory rules available in the event info\n");
3296 memcpy(reg_info
->alpha2
, &chan_list_event_hdr
->alpha2
,
3298 reg_info
->dfs_region
= chan_list_event_hdr
->dfs_region
;
3299 reg_info
->phybitmap
= chan_list_event_hdr
->phybitmap
;
3300 reg_info
->num_phy
= chan_list_event_hdr
->num_phy
;
3301 reg_info
->phy_id
= chan_list_event_hdr
->phy_id
;
3302 reg_info
->ctry_code
= chan_list_event_hdr
->country_id
;
3303 reg_info
->reg_dmn_pair
= chan_list_event_hdr
->domain_code
;
3304 if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_PASS
)
3305 reg_info
->status_code
= REG_SET_CC_STATUS_PASS
;
3306 else if (chan_list_event_hdr
->status_code
== WMI_REG_CURRENT_ALPHA2_NOT_FOUND
)
3307 reg_info
->status_code
= REG_CURRENT_ALPHA2_NOT_FOUND
;
3308 else if (chan_list_event_hdr
->status_code
== WMI_REG_INIT_ALPHA2_NOT_FOUND
)
3309 reg_info
->status_code
= REG_INIT_ALPHA2_NOT_FOUND
;
3310 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_CHANGE_NOT_ALLOWED
)
3311 reg_info
->status_code
= REG_SET_CC_CHANGE_NOT_ALLOWED
;
3312 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_NO_MEMORY
)
3313 reg_info
->status_code
= REG_SET_CC_STATUS_NO_MEMORY
;
3314 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_FAIL
)
3315 reg_info
->status_code
= REG_SET_CC_STATUS_FAIL
;
3317 reg_info
->min_bw_2g
= chan_list_event_hdr
->min_bw_2g
;
3318 reg_info
->max_bw_2g
= chan_list_event_hdr
->max_bw_2g
;
3319 reg_info
->min_bw_5g
= chan_list_event_hdr
->min_bw_5g
;
3320 reg_info
->max_bw_5g
= chan_list_event_hdr
->max_bw_5g
;
3322 num_2g_reg_rules
= reg_info
->num_2g_reg_rules
;
3323 num_5g_reg_rules
= reg_info
->num_5g_reg_rules
;
3325 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3326 "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
3327 __func__
, reg_info
->alpha2
, reg_info
->dfs_region
,
3328 reg_info
->min_bw_2g
, reg_info
->max_bw_2g
,
3329 reg_info
->min_bw_5g
, reg_info
->max_bw_5g
);
3331 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3332 "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__
,
3333 num_2g_reg_rules
, num_5g_reg_rules
);
3336 (struct wmi_regulatory_rule_struct
*)((u8
*)chan_list_event_hdr
3337 + sizeof(*chan_list_event_hdr
)
3338 + sizeof(struct wmi_tlv
));
3340 if (num_2g_reg_rules
) {
3341 reg_info
->reg_rules_2g_ptr
= create_reg_rules_from_wmi(num_2g_reg_rules
,
3343 if (!reg_info
->reg_rules_2g_ptr
) {
3345 ath11k_warn(ab
, "Unable to Allocate memory for 2g rules\n");
3350 if (num_5g_reg_rules
) {
3351 wmi_reg_rule
+= num_2g_reg_rules
;
3352 reg_info
->reg_rules_5g_ptr
= create_reg_rules_from_wmi(num_5g_reg_rules
,
3354 if (!reg_info
->reg_rules_5g_ptr
) {
3356 ath11k_warn(ab
, "Unable to Allocate memory for 5g rules\n");
3361 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processed regulatory channel list\n");
3367 static int ath11k_pull_peer_del_resp_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3368 struct wmi_peer_delete_resp_event
*peer_del_resp
)
3371 const struct wmi_peer_delete_resp_event
*ev
;
3374 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3377 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3381 ev
= tb
[WMI_TAG_PEER_DELETE_RESP_EVENT
];
3383 ath11k_warn(ab
, "failed to fetch peer delete resp ev");
3388 memset(peer_del_resp
, 0, sizeof(*peer_del_resp
));
3390 peer_del_resp
->vdev_id
= ev
->vdev_id
;
3391 ether_addr_copy(peer_del_resp
->peer_macaddr
.addr
,
3392 ev
->peer_macaddr
.addr
);
3398 static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base
*ab
, void *evt_buf
,
3399 u32 len
, u32
*vdev_id
,
3403 const struct wmi_bcn_tx_status_event
*ev
;
3406 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
3409 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3413 ev
= tb
[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
];
3415 ath11k_warn(ab
, "failed to fetch bcn tx status ev");
3420 *vdev_id
= ev
->vdev_id
;
3421 *tx_status
= ev
->tx_status
;
3427 static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3431 const struct wmi_vdev_stopped_event
*ev
;
3434 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3437 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3441 ev
= tb
[WMI_TAG_VDEV_STOPPED_EVENT
];
3443 ath11k_warn(ab
, "failed to fetch vdev stop ev");
3448 *vdev_id
= ev
->vdev_id
;
3454 static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base
*ab
,
3455 struct sk_buff
*skb
,
3456 struct mgmt_rx_event_params
*hdr
)
3459 const struct wmi_mgmt_rx_hdr
*ev
;
3463 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3466 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3470 ev
= tb
[WMI_TAG_MGMT_RX_HDR
];
3471 frame
= tb
[WMI_TAG_ARRAY_BYTE
];
3473 if (!ev
|| !frame
) {
3474 ath11k_warn(ab
, "failed to fetch mgmt rx hdr");
3479 hdr
->pdev_id
= ev
->pdev_id
;
3480 hdr
->channel
= ev
->channel
;
3482 hdr
->rate
= ev
->rate
;
3483 hdr
->phy_mode
= ev
->phy_mode
;
3484 hdr
->buf_len
= ev
->buf_len
;
3485 hdr
->status
= ev
->status
;
3486 hdr
->flags
= ev
->flags
;
3487 hdr
->rssi
= ev
->rssi
;
3488 hdr
->tsf_delta
= ev
->tsf_delta
;
3489 memcpy(hdr
->rssi_ctl
, ev
->rssi_ctl
, sizeof(hdr
->rssi_ctl
));
3491 if (skb
->len
< (frame
- skb
->data
) + hdr
->buf_len
) {
3492 ath11k_warn(ab
, "invalid length in mgmt rx hdr ev");
3497 /* shift the sk_buff to point to `frame` */
3499 skb_put(skb
, frame
- skb
->data
);
3500 skb_pull(skb
, frame
- skb
->data
);
3501 skb_put(skb
, hdr
->buf_len
);
3503 ath11k_ce_byte_swap(skb
->data
, hdr
->buf_len
);
3509 static int wmi_process_mgmt_tx_comp(struct ath11k
*ar
, u32 desc_id
,
3512 struct sk_buff
*msdu
;
3513 struct ieee80211_tx_info
*info
;
3514 struct ath11k_skb_cb
*skb_cb
;
3516 spin_lock_bh(&ar
->txmgmt_idr_lock
);
3517 msdu
= idr_find(&ar
->txmgmt_idr
, desc_id
);
3520 ath11k_warn(ar
->ab
, "received mgmt tx compl for invalid msdu_id: %d\n",
3522 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
3526 idr_remove(&ar
->txmgmt_idr
, desc_id
);
3527 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
3529 skb_cb
= ATH11K_SKB_CB(msdu
);
3530 dma_unmap_single(ar
->ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
3532 info
= IEEE80211_SKB_CB(msdu
);
3533 if ((!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) && !status
)
3534 info
->flags
|= IEEE80211_TX_STAT_ACK
;
3536 ieee80211_tx_status_irqsafe(ar
->hw
, msdu
);
3538 WARN_ON_ONCE(atomic_read(&ar
->num_pending_mgmt_tx
) == 0);
3539 atomic_dec(&ar
->num_pending_mgmt_tx
);
3544 static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base
*ab
,
3545 struct sk_buff
*skb
,
3546 struct wmi_mgmt_tx_compl_event
*param
)
3549 const struct wmi_mgmt_tx_compl_event
*ev
;
3552 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3555 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3559 ev
= tb
[WMI_TAG_MGMT_TX_COMPL_EVENT
];
3561 ath11k_warn(ab
, "failed to fetch mgmt tx compl ev");
3566 param
->pdev_id
= ev
->pdev_id
;
3567 param
->desc_id
= ev
->desc_id
;
3568 param
->status
= ev
->status
;
3574 static void ath11k_wmi_event_scan_started(struct ath11k
*ar
)
3576 lockdep_assert_held(&ar
->data_lock
);
3578 switch (ar
->scan
.state
) {
3579 case ATH11K_SCAN_IDLE
:
3580 case ATH11K_SCAN_RUNNING
:
3581 case ATH11K_SCAN_ABORTING
:
3582 ath11k_warn(ar
->ab
, "received scan started event in an invalid scan state: %s (%d)\n",
3583 ath11k_scan_state_str(ar
->scan
.state
),
3586 case ATH11K_SCAN_STARTING
:
3587 ar
->scan
.state
= ATH11K_SCAN_RUNNING
;
3588 complete(&ar
->scan
.started
);
3593 static void ath11k_wmi_event_scan_start_failed(struct ath11k
*ar
)
3595 lockdep_assert_held(&ar
->data_lock
);
3597 switch (ar
->scan
.state
) {
3598 case ATH11K_SCAN_IDLE
:
3599 case ATH11K_SCAN_RUNNING
:
3600 case ATH11K_SCAN_ABORTING
:
3601 ath11k_warn(ar
->ab
, "received scan start failed event in an invalid scan state: %s (%d)\n",
3602 ath11k_scan_state_str(ar
->scan
.state
),
3605 case ATH11K_SCAN_STARTING
:
3606 complete(&ar
->scan
.started
);
3607 __ath11k_mac_scan_finish(ar
);
3612 static void ath11k_wmi_event_scan_completed(struct ath11k
*ar
)
3614 lockdep_assert_held(&ar
->data_lock
);
3616 switch (ar
->scan
.state
) {
3617 case ATH11K_SCAN_IDLE
:
3618 case ATH11K_SCAN_STARTING
:
3619 /* One suspected reason scan can be completed while starting is
3620 * if firmware fails to deliver all scan events to the host,
3621 * e.g. when transport pipe is full. This has been observed
3622 * with spectral scan phyerr events starving wmi transport
3623 * pipe. In such case the "scan completed" event should be (and
3624 * is) ignored by the host as it may be just firmware's scan
3625 * state machine recovering.
3627 ath11k_warn(ar
->ab
, "received scan completed event in an invalid scan state: %s (%d)\n",
3628 ath11k_scan_state_str(ar
->scan
.state
),
3631 case ATH11K_SCAN_RUNNING
:
3632 case ATH11K_SCAN_ABORTING
:
3633 __ath11k_mac_scan_finish(ar
);
3638 static void ath11k_wmi_event_scan_bss_chan(struct ath11k
*ar
)
3640 lockdep_assert_held(&ar
->data_lock
);
3642 switch (ar
->scan
.state
) {
3643 case ATH11K_SCAN_IDLE
:
3644 case ATH11K_SCAN_STARTING
:
3645 ath11k_warn(ar
->ab
, "received scan bss chan event in an invalid scan state: %s (%d)\n",
3646 ath11k_scan_state_str(ar
->scan
.state
),
3649 case ATH11K_SCAN_RUNNING
:
3650 case ATH11K_SCAN_ABORTING
:
3651 ar
->scan_channel
= NULL
;
3656 static void ath11k_wmi_event_scan_foreign_chan(struct ath11k
*ar
, u32 freq
)
3658 lockdep_assert_held(&ar
->data_lock
);
3660 switch (ar
->scan
.state
) {
3661 case ATH11K_SCAN_IDLE
:
3662 case ATH11K_SCAN_STARTING
:
3663 ath11k_warn(ar
->ab
, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
3664 ath11k_scan_state_str(ar
->scan
.state
),
3667 case ATH11K_SCAN_RUNNING
:
3668 case ATH11K_SCAN_ABORTING
:
3669 ar
->scan_channel
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
3675 ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type
,
3676 enum wmi_scan_completion_reason reason
)
3679 case WMI_SCAN_EVENT_STARTED
:
3681 case WMI_SCAN_EVENT_COMPLETED
:
3683 case WMI_SCAN_REASON_COMPLETED
:
3685 case WMI_SCAN_REASON_CANCELLED
:
3686 return "completed [cancelled]";
3687 case WMI_SCAN_REASON_PREEMPTED
:
3688 return "completed [preempted]";
3689 case WMI_SCAN_REASON_TIMEDOUT
:
3690 return "completed [timedout]";
3691 case WMI_SCAN_REASON_INTERNAL_FAILURE
:
3692 return "completed [internal err]";
3693 case WMI_SCAN_REASON_MAX
:
3696 return "completed [unknown]";
3697 case WMI_SCAN_EVENT_BSS_CHANNEL
:
3698 return "bss channel";
3699 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
3700 return "foreign channel";
3701 case WMI_SCAN_EVENT_DEQUEUED
:
3703 case WMI_SCAN_EVENT_PREEMPTED
:
3705 case WMI_SCAN_EVENT_START_FAILED
:
3706 return "start failed";
3707 case WMI_SCAN_EVENT_RESTARTED
:
3709 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
3710 return "foreign channel exit";
3716 static int ath11k_pull_scan_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3717 struct wmi_scan_event
*scan_evt_param
)
3720 const struct wmi_scan_event
*ev
;
3723 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3726 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3730 ev
= tb
[WMI_TAG_SCAN_EVENT
];
3732 ath11k_warn(ab
, "failed to fetch scan ev");
3737 scan_evt_param
->event_type
= ev
->event_type
;
3738 scan_evt_param
->reason
= ev
->reason
;
3739 scan_evt_param
->channel_freq
= ev
->channel_freq
;
3740 scan_evt_param
->scan_req_id
= ev
->scan_req_id
;
3741 scan_evt_param
->scan_id
= ev
->scan_id
;
3742 scan_evt_param
->vdev_id
= ev
->vdev_id
;
3743 scan_evt_param
->tsf_timestamp
= ev
->tsf_timestamp
;
3749 static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3750 struct wmi_peer_sta_kickout_arg
*arg
)
3753 const struct wmi_peer_sta_kickout_event
*ev
;
3756 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3759 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3763 ev
= tb
[WMI_TAG_PEER_STA_KICKOUT_EVENT
];
3765 ath11k_warn(ab
, "failed to fetch peer sta kickout ev");
3770 arg
->mac_addr
= ev
->peer_macaddr
.addr
;
3776 static int ath11k_pull_roam_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3777 struct wmi_roam_event
*roam_ev
)
3780 const struct wmi_roam_event
*ev
;
3783 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3786 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3790 ev
= tb
[WMI_TAG_ROAM_EVENT
];
3792 ath11k_warn(ab
, "failed to fetch roam ev");
3797 roam_ev
->vdev_id
= ev
->vdev_id
;
3798 roam_ev
->reason
= ev
->reason
;
3799 roam_ev
->rssi
= ev
->rssi
;
3805 static int freq_to_idx(struct ath11k
*ar
, int freq
)
3807 struct ieee80211_supported_band
*sband
;
3808 int band
, ch
, idx
= 0;
3810 for (band
= NL80211_BAND_2GHZ
; band
< NUM_NL80211_BANDS
; band
++) {
3811 sband
= ar
->hw
->wiphy
->bands
[band
];
3815 for (ch
= 0; ch
< sband
->n_channels
; ch
++, idx
++)
3816 if (sband
->channels
[ch
].center_freq
== freq
)
3824 static int ath11k_pull_chan_info_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
3825 u32 len
, struct wmi_chan_info_event
*ch_info_ev
)
3828 const struct wmi_chan_info_event
*ev
;
3831 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
3834 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3838 ev
= tb
[WMI_TAG_CHAN_INFO_EVENT
];
3840 ath11k_warn(ab
, "failed to fetch chan info ev");
3845 ch_info_ev
->err_code
= ev
->err_code
;
3846 ch_info_ev
->freq
= ev
->freq
;
3847 ch_info_ev
->cmd_flags
= ev
->cmd_flags
;
3848 ch_info_ev
->noise_floor
= ev
->noise_floor
;
3849 ch_info_ev
->rx_clear_count
= ev
->rx_clear_count
;
3850 ch_info_ev
->cycle_count
= ev
->cycle_count
;
3851 ch_info_ev
->chan_tx_pwr_range
= ev
->chan_tx_pwr_range
;
3852 ch_info_ev
->chan_tx_pwr_tp
= ev
->chan_tx_pwr_tp
;
3853 ch_info_ev
->rx_frame_count
= ev
->rx_frame_count
;
3854 ch_info_ev
->tx_frame_cnt
= ev
->tx_frame_cnt
;
3855 ch_info_ev
->mac_clk_mhz
= ev
->mac_clk_mhz
;
3856 ch_info_ev
->vdev_id
= ev
->vdev_id
;
3863 ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3864 struct wmi_pdev_bss_chan_info_event
*bss_ch_info_ev
)
3867 const struct wmi_pdev_bss_chan_info_event
*ev
;
3870 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3873 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3877 ev
= tb
[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
];
3879 ath11k_warn(ab
, "failed to fetch pdev bss chan info ev");
3884 bss_ch_info_ev
->pdev_id
= ev
->pdev_id
;
3885 bss_ch_info_ev
->freq
= ev
->freq
;
3886 bss_ch_info_ev
->noise_floor
= ev
->noise_floor
;
3887 bss_ch_info_ev
->rx_clear_count_low
= ev
->rx_clear_count_low
;
3888 bss_ch_info_ev
->rx_clear_count_high
= ev
->rx_clear_count_high
;
3889 bss_ch_info_ev
->cycle_count_low
= ev
->cycle_count_low
;
3890 bss_ch_info_ev
->cycle_count_high
= ev
->cycle_count_high
;
3891 bss_ch_info_ev
->tx_cycle_count_low
= ev
->tx_cycle_count_low
;
3892 bss_ch_info_ev
->tx_cycle_count_high
= ev
->tx_cycle_count_high
;
3893 bss_ch_info_ev
->rx_cycle_count_low
= ev
->rx_cycle_count_low
;
3894 bss_ch_info_ev
->rx_cycle_count_high
= ev
->rx_cycle_count_high
;
3895 bss_ch_info_ev
->rx_bss_cycle_count_low
= ev
->rx_bss_cycle_count_low
;
3896 bss_ch_info_ev
->rx_bss_cycle_count_high
= ev
->rx_bss_cycle_count_high
;
3903 ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3904 struct wmi_vdev_install_key_complete_arg
*arg
)
3907 const struct wmi_vdev_install_key_compl_event
*ev
;
3910 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3913 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3917 ev
= tb
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
];
3919 ath11k_warn(ab
, "failed to fetch vdev install key compl ev");
3924 arg
->vdev_id
= ev
->vdev_id
;
3925 arg
->macaddr
= ev
->peer_macaddr
.addr
;
3926 arg
->key_idx
= ev
->key_idx
;
3927 arg
->key_flags
= ev
->key_flags
;
3928 arg
->status
= ev
->status
;
3934 static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
3935 struct wmi_peer_assoc_conf_arg
*peer_assoc_conf
)
3938 const struct wmi_peer_assoc_conf_event
*ev
;
3941 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
3944 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
3948 ev
= tb
[WMI_TAG_PEER_ASSOC_CONF_EVENT
];
3950 ath11k_warn(ab
, "failed to fetch peer assoc conf ev");
3955 peer_assoc_conf
->vdev_id
= ev
->vdev_id
;
3956 peer_assoc_conf
->macaddr
= ev
->peer_macaddr
.addr
;
3962 static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base
*src
,
3963 struct ath11k_fw_stats_pdev
*dst
)
3965 dst
->ch_noise_floor
= src
->chan_nf
;
3966 dst
->tx_frame_count
= src
->tx_frame_count
;
3967 dst
->rx_frame_count
= src
->rx_frame_count
;
3968 dst
->rx_clear_count
= src
->rx_clear_count
;
3969 dst
->cycle_count
= src
->cycle_count
;
3970 dst
->phy_err_count
= src
->phy_err_count
;
3971 dst
->chan_tx_power
= src
->chan_tx_pwr
;
3975 ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx
*src
,
3976 struct ath11k_fw_stats_pdev
*dst
)
3978 dst
->comp_queued
= src
->comp_queued
;
3979 dst
->comp_delivered
= src
->comp_delivered
;
3980 dst
->msdu_enqued
= src
->msdu_enqued
;
3981 dst
->mpdu_enqued
= src
->mpdu_enqued
;
3982 dst
->wmm_drop
= src
->wmm_drop
;
3983 dst
->local_enqued
= src
->local_enqued
;
3984 dst
->local_freed
= src
->local_freed
;
3985 dst
->hw_queued
= src
->hw_queued
;
3986 dst
->hw_reaped
= src
->hw_reaped
;
3987 dst
->underrun
= src
->underrun
;
3988 dst
->tx_abort
= src
->tx_abort
;
3989 dst
->mpdus_requed
= src
->mpdus_requed
;
3990 dst
->tx_ko
= src
->tx_ko
;
3991 dst
->data_rc
= src
->data_rc
;
3992 dst
->self_triggers
= src
->self_triggers
;
3993 dst
->sw_retry_failure
= src
->sw_retry_failure
;
3994 dst
->illgl_rate_phy_err
= src
->illgl_rate_phy_err
;
3995 dst
->pdev_cont_xretry
= src
->pdev_cont_xretry
;
3996 dst
->pdev_tx_timeout
= src
->pdev_tx_timeout
;
3997 dst
->pdev_resets
= src
->pdev_resets
;
3998 dst
->stateless_tid_alloc_failure
= src
->stateless_tid_alloc_failure
;
3999 dst
->phy_underrun
= src
->phy_underrun
;
4000 dst
->txop_ovf
= src
->txop_ovf
;
4003 static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx
*src
,
4004 struct ath11k_fw_stats_pdev
*dst
)
4006 dst
->mid_ppdu_route_change
= src
->mid_ppdu_route_change
;
4007 dst
->status_rcvd
= src
->status_rcvd
;
4008 dst
->r0_frags
= src
->r0_frags
;
4009 dst
->r1_frags
= src
->r1_frags
;
4010 dst
->r2_frags
= src
->r2_frags
;
4011 dst
->r3_frags
= src
->r3_frags
;
4012 dst
->htt_msdus
= src
->htt_msdus
;
4013 dst
->htt_mpdus
= src
->htt_mpdus
;
4014 dst
->loc_msdus
= src
->loc_msdus
;
4015 dst
->loc_mpdus
= src
->loc_mpdus
;
4016 dst
->oversize_amsdu
= src
->oversize_amsdu
;
4017 dst
->phy_errs
= src
->phy_errs
;
4018 dst
->phy_err_drop
= src
->phy_err_drop
;
4019 dst
->mpdu_errs
= src
->mpdu_errs
;
4023 ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats
*src
,
4024 struct ath11k_fw_stats_vdev
*dst
)
4028 dst
->vdev_id
= src
->vdev_id
;
4029 dst
->beacon_snr
= src
->beacon_snr
;
4030 dst
->data_snr
= src
->data_snr
;
4031 dst
->num_rx_frames
= src
->num_rx_frames
;
4032 dst
->num_rts_fail
= src
->num_rts_fail
;
4033 dst
->num_rts_success
= src
->num_rts_success
;
4034 dst
->num_rx_err
= src
->num_rx_err
;
4035 dst
->num_rx_discard
= src
->num_rx_discard
;
4036 dst
->num_tx_not_acked
= src
->num_tx_not_acked
;
4038 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames
); i
++)
4039 dst
->num_tx_frames
[i
] = src
->num_tx_frames
[i
];
4041 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_retries
); i
++)
4042 dst
->num_tx_frames_retries
[i
] = src
->num_tx_frames_retries
[i
];
4044 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_failures
); i
++)
4045 dst
->num_tx_frames_failures
[i
] = src
->num_tx_frames_failures
[i
];
4047 for (i
= 0; i
< ARRAY_SIZE(src
->tx_rate_history
); i
++)
4048 dst
->tx_rate_history
[i
] = src
->tx_rate_history
[i
];
4050 for (i
= 0; i
< ARRAY_SIZE(src
->beacon_rssi_history
); i
++)
4051 dst
->beacon_rssi_history
[i
] = src
->beacon_rssi_history
[i
];
4055 ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats
*src
,
4056 struct ath11k_fw_stats_bcn
*dst
)
4058 dst
->vdev_id
= src
->vdev_id
;
4059 dst
->tx_bcn_succ_cnt
= src
->tx_bcn_succ_cnt
;
4060 dst
->tx_bcn_outage_cnt
= src
->tx_bcn_outage_cnt
;
4063 int ath11k_wmi_pull_fw_stats(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4064 struct ath11k_fw_stats
*stats
)
4067 const struct wmi_stats_event
*ev
;
4072 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, len
, GFP_ATOMIC
);
4075 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4079 ev
= tb
[WMI_TAG_STATS_EVENT
];
4080 data
= tb
[WMI_TAG_ARRAY_BYTE
];
4082 ath11k_warn(ab
, "failed to fetch update stats ev");
4087 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4088 "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
4090 ev
->num_pdev_stats
, ev
->num_vdev_stats
,
4093 stats
->pdev_id
= ev
->pdev_id
;
4094 stats
->stats_id
= 0;
4096 for (i
= 0; i
< ev
->num_pdev_stats
; i
++) {
4097 const struct wmi_pdev_stats
*src
;
4098 struct ath11k_fw_stats_pdev
*dst
;
4101 if (len
< sizeof(*src
)) {
4106 stats
->stats_id
= WMI_REQUEST_PDEV_STAT
;
4108 data
+= sizeof(*src
);
4109 len
-= sizeof(*src
);
4111 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4115 ath11k_wmi_pull_pdev_stats_base(&src
->base
, dst
);
4116 ath11k_wmi_pull_pdev_stats_tx(&src
->tx
, dst
);
4117 ath11k_wmi_pull_pdev_stats_rx(&src
->rx
, dst
);
4118 list_add_tail(&dst
->list
, &stats
->pdevs
);
4121 for (i
= 0; i
< ev
->num_vdev_stats
; i
++) {
4122 const struct wmi_vdev_stats
*src
;
4123 struct ath11k_fw_stats_vdev
*dst
;
4126 if (len
< sizeof(*src
)) {
4131 stats
->stats_id
= WMI_REQUEST_VDEV_STAT
;
4133 data
+= sizeof(*src
);
4134 len
-= sizeof(*src
);
4136 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4140 ath11k_wmi_pull_vdev_stats(src
, dst
);
4141 list_add_tail(&dst
->list
, &stats
->vdevs
);
4144 for (i
= 0; i
< ev
->num_bcn_stats
; i
++) {
4145 const struct wmi_bcn_stats
*src
;
4146 struct ath11k_fw_stats_bcn
*dst
;
4149 if (len
< sizeof(*src
)) {
4154 stats
->stats_id
= WMI_REQUEST_BCN_STAT
;
4156 data
+= sizeof(*src
);
4157 len
-= sizeof(*src
);
4159 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
4163 ath11k_wmi_pull_bcn_stats(src
, dst
);
4164 list_add_tail(&dst
->list
, &stats
->bcn
);
4171 size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head
*head
)
4173 struct ath11k_fw_stats_vdev
*i
;
4176 list_for_each_entry(i
, head
, list
)
4182 static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head
*head
)
4184 struct ath11k_fw_stats_bcn
*i
;
4187 list_for_each_entry(i
, head
, list
)
4194 ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
4195 char *buf
, u32
*length
)
4198 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4200 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4201 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
4202 "ath11k PDEV stats");
4203 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4204 "=================");
4206 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4207 "Channel noise floor", pdev
->ch_noise_floor
);
4208 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4209 "Channel TX power", pdev
->chan_tx_power
);
4210 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4211 "TX frame count", pdev
->tx_frame_count
);
4212 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4213 "RX frame count", pdev
->rx_frame_count
);
4214 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4215 "RX clear count", pdev
->rx_clear_count
);
4216 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4217 "Cycle count", pdev
->cycle_count
);
4218 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4219 "PHY error count", pdev
->phy_err_count
);
4225 ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
4226 char *buf
, u32
*length
)
4229 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4231 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
4232 "ath11k PDEV TX stats");
4233 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4234 "====================");
4236 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4237 "HTT cookies queued", pdev
->comp_queued
);
4238 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4239 "HTT cookies disp.", pdev
->comp_delivered
);
4240 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4241 "MSDU queued", pdev
->msdu_enqued
);
4242 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4243 "MPDU queued", pdev
->mpdu_enqued
);
4244 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4245 "MSDUs dropped", pdev
->wmm_drop
);
4246 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4247 "Local enqued", pdev
->local_enqued
);
4248 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4249 "Local freed", pdev
->local_freed
);
4250 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4251 "HW queued", pdev
->hw_queued
);
4252 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4253 "PPDUs reaped", pdev
->hw_reaped
);
4254 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4255 "Num underruns", pdev
->underrun
);
4256 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4257 "PPDUs cleaned", pdev
->tx_abort
);
4258 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4259 "MPDUs requed", pdev
->mpdus_requed
);
4260 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4261 "Excessive retries", pdev
->tx_ko
);
4262 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4263 "HW rate", pdev
->data_rc
);
4264 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4265 "Sched self triggers", pdev
->self_triggers
);
4266 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4267 "Dropped due to SW retries",
4268 pdev
->sw_retry_failure
);
4269 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4270 "Illegal rate phy errors",
4271 pdev
->illgl_rate_phy_err
);
4272 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4273 "PDEV continuous xretry", pdev
->pdev_cont_xretry
);
4274 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4275 "TX timeout", pdev
->pdev_tx_timeout
);
4276 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4277 "PDEV resets", pdev
->pdev_resets
);
4278 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4279 "Stateless TIDs alloc failures",
4280 pdev
->stateless_tid_alloc_failure
);
4281 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4282 "PHY underrun", pdev
->phy_underrun
);
4283 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
4284 "MPDU is more than txop limit", pdev
->txop_ovf
);
4289 ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
4290 char *buf
, u32
*length
)
4293 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4295 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
4296 "ath11k PDEV RX stats");
4297 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4298 "====================");
4300 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4301 "Mid PPDU route change",
4302 pdev
->mid_ppdu_route_change
);
4303 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4304 "Tot. number of statuses", pdev
->status_rcvd
);
4305 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4306 "Extra frags on rings 0", pdev
->r0_frags
);
4307 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4308 "Extra frags on rings 1", pdev
->r1_frags
);
4309 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4310 "Extra frags on rings 2", pdev
->r2_frags
);
4311 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4312 "Extra frags on rings 3", pdev
->r3_frags
);
4313 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4314 "MSDUs delivered to HTT", pdev
->htt_msdus
);
4315 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4316 "MPDUs delivered to HTT", pdev
->htt_mpdus
);
4317 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4318 "MSDUs delivered to stack", pdev
->loc_msdus
);
4319 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4320 "MPDUs delivered to stack", pdev
->loc_mpdus
);
4321 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4322 "Oversized AMSUs", pdev
->oversize_amsdu
);
4323 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4324 "PHY errors", pdev
->phy_errs
);
4325 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4326 "PHY errors drops", pdev
->phy_err_drop
);
4327 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
4328 "MPDU errors (FCS, MIC, ENC)", pdev
->mpdu_errs
);
4333 ath11k_wmi_fw_vdev_stats_fill(struct ath11k
*ar
,
4334 const struct ath11k_fw_stats_vdev
*vdev
,
4335 char *buf
, u32
*length
)
4338 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4339 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, vdev
->vdev_id
);
4343 /* VDEV stats has all the active VDEVs of other PDEVs as well,
4344 * ignoring those not part of requested PDEV
4349 vif_macaddr
= arvif
->vif
->addr
;
4351 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4352 "VDEV ID", vdev
->vdev_id
);
4353 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
4354 "VDEV MAC address", vif_macaddr
);
4355 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4356 "beacon snr", vdev
->beacon_snr
);
4357 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4358 "data snr", vdev
->data_snr
);
4359 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4360 "num rx frames", vdev
->num_rx_frames
);
4361 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4362 "num rts fail", vdev
->num_rts_fail
);
4363 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4364 "num rts success", vdev
->num_rts_success
);
4365 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4366 "num rx err", vdev
->num_rx_err
);
4367 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4368 "num rx discard", vdev
->num_rx_discard
);
4369 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4370 "num tx not acked", vdev
->num_tx_not_acked
);
4372 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames
); i
++)
4373 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4376 vdev
->num_tx_frames
[i
]);
4378 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_retries
); i
++)
4379 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4381 "num tx frames retries", i
,
4382 vdev
->num_tx_frames_retries
[i
]);
4384 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_failures
); i
++)
4385 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4387 "num tx frames failures", i
,
4388 vdev
->num_tx_frames_failures
[i
]);
4390 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->tx_rate_history
); i
++)
4391 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4392 "%25s [%02d] 0x%08x\n",
4393 "tx rate history", i
,
4394 vdev
->tx_rate_history
[i
]);
4396 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->beacon_rssi_history
); i
++)
4397 len
+= scnprintf(buf
+ len
, buf_len
- len
,
4399 "beacon rssi history", i
,
4400 vdev
->beacon_rssi_history
[i
]);
4402 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4407 ath11k_wmi_fw_bcn_stats_fill(struct ath11k
*ar
,
4408 const struct ath11k_fw_stats_bcn
*bcn
,
4409 char *buf
, u32
*length
)
4412 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4413 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, bcn
->vdev_id
);
4417 ath11k_warn(ar
->ab
, "invalid vdev id %d in bcn stats",
4422 vdev_macaddr
= arvif
->vif
->addr
;
4424 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4425 "VDEV ID", bcn
->vdev_id
);
4426 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
4427 "VDEV MAC address", vdev_macaddr
);
4428 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4429 "================");
4430 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4431 "Num of beacon tx success", bcn
->tx_bcn_succ_cnt
);
4432 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
4433 "Num of beacon tx failures", bcn
->tx_bcn_outage_cnt
);
4435 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4439 void ath11k_wmi_fw_stats_fill(struct ath11k
*ar
,
4440 struct ath11k_fw_stats
*fw_stats
,
4441 u32 stats_id
, char *buf
)
4444 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
4445 const struct ath11k_fw_stats_pdev
*pdev
;
4446 const struct ath11k_fw_stats_vdev
*vdev
;
4447 const struct ath11k_fw_stats_bcn
*bcn
;
4450 spin_lock_bh(&ar
->data_lock
);
4452 if (stats_id
== WMI_REQUEST_PDEV_STAT
) {
4453 pdev
= list_first_entry_or_null(&fw_stats
->pdevs
,
4454 struct ath11k_fw_stats_pdev
, list
);
4456 ath11k_warn(ar
->ab
, "failed to get pdev stats\n");
4460 ath11k_wmi_fw_pdev_base_stats_fill(pdev
, buf
, &len
);
4461 ath11k_wmi_fw_pdev_tx_stats_fill(pdev
, buf
, &len
);
4462 ath11k_wmi_fw_pdev_rx_stats_fill(pdev
, buf
, &len
);
4465 if (stats_id
== WMI_REQUEST_VDEV_STAT
) {
4466 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4467 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
4468 "ath11k VDEV stats");
4469 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4470 "=================");
4472 list_for_each_entry(vdev
, &fw_stats
->vdevs
, list
)
4473 ath11k_wmi_fw_vdev_stats_fill(ar
, vdev
, buf
, &len
);
4476 if (stats_id
== WMI_REQUEST_BCN_STAT
) {
4477 num_bcn
= ath11k_wmi_fw_stats_num_bcn(&fw_stats
->bcn
);
4479 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
4480 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s (%zu)\n",
4481 "ath11k Beacon stats", num_bcn
);
4482 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
4483 "===================");
4485 list_for_each_entry(bcn
, &fw_stats
->bcn
, list
)
4486 ath11k_wmi_fw_bcn_stats_fill(ar
, bcn
, buf
, &len
);
4490 spin_unlock_bh(&ar
->data_lock
);
4498 static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base
*ab
)
4500 /* try to send pending beacons first. they take priority */
4501 wake_up(&ab
->wmi_ab
.tx_credits_wq
);
4504 static void ath11k_wmi_htc_tx_complete(struct ath11k_base
*ab
,
4505 struct sk_buff
*skb
)
4510 static bool ath11k_reg_is_world_alpha(char *alpha
)
4512 return alpha
[0] == '0' && alpha
[1] == '0';
4515 static int ath11k_reg_chan_list_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4517 struct cur_regulatory_info
*reg_info
= NULL
;
4518 struct ieee80211_regdomain
*regd
= NULL
;
4519 bool intersect
= false;
4520 int ret
= 0, pdev_idx
;
4523 reg_info
= kzalloc(sizeof(*reg_info
), GFP_ATOMIC
);
4529 ret
= ath11k_pull_reg_chan_list_update_ev(ab
, skb
, reg_info
);
4531 ath11k_warn(ab
, "failed to extract regulatory info from received event\n");
4535 if (reg_info
->status_code
!= REG_SET_CC_STATUS_PASS
) {
4536 /* In case of failure to set the requested ctry,
4537 * fw retains the current regd. We print a failure info
4538 * and return from here.
4540 ath11k_warn(ab
, "Failed to set the requested Country regulatory setting\n");
4544 pdev_idx
= reg_info
->phy_id
;
4546 if (pdev_idx
>= ab
->num_radios
)
4549 /* Avoid multiple overwrites to default regd, during core
4550 * stop-start after mac registration.
4552 if (ab
->default_regd
[pdev_idx
] && !ab
->new_regd
[pdev_idx
] &&
4553 !memcmp((char *)ab
->default_regd
[pdev_idx
]->alpha2
,
4554 (char *)reg_info
->alpha2
, 2))
4557 /* Intersect new rules with default regd if a new country setting was
4558 * requested, i.e a default regd was already set during initialization
4559 * and the regd coming from this event has a valid country info.
4561 if (ab
->default_regd
[pdev_idx
] &&
4562 !ath11k_reg_is_world_alpha((char *)
4563 ab
->default_regd
[pdev_idx
]->alpha2
) &&
4564 !ath11k_reg_is_world_alpha((char *)reg_info
->alpha2
))
4567 regd
= ath11k_reg_build_regd(ab
, reg_info
, intersect
);
4569 ath11k_warn(ab
, "failed to build regd from reg_info\n");
4573 spin_lock(&ab
->base_lock
);
4574 if (test_bit(ATH11K_FLAG_REGISTERED
, &ab
->dev_flags
)) {
4575 /* Once mac is registered, ar is valid and all CC events from
4576 * fw is considered to be received due to user requests
4578 * Free previously built regd before assigning the newly
4579 * generated regd to ar. NULL pointer handling will be
4580 * taken care by kfree itself.
4582 ar
= ab
->pdevs
[pdev_idx
].ar
;
4583 kfree(ab
->new_regd
[pdev_idx
]);
4584 ab
->new_regd
[pdev_idx
] = regd
;
4585 ieee80211_queue_work(ar
->hw
, &ar
->regd_update_work
);
4587 /* Multiple events for the same *ar is not expected. But we
4588 * can still clear any previously stored default_regd if we
4589 * are receiving this event for the same radio by mistake.
4590 * NULL pointer handling will be taken care by kfree itself.
4592 kfree(ab
->default_regd
[pdev_idx
]);
4593 /* This regd would be applied during mac registration */
4594 ab
->default_regd
[pdev_idx
] = regd
;
4596 ab
->dfs_region
= reg_info
->dfs_region
;
4597 spin_unlock(&ab
->base_lock
);
4602 /* Fallback to older reg (by sending previous country setting
4603 * again if fw has succeded and we failed to process here.
4604 * The Regdomain should be uniform across driver and fw. Since the
4605 * FW has processed the command and sent a success status, we expect
4606 * this function to succeed as well. If it doesn't, CTRY needs to be
4607 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
4609 /* TODO: This is rare, but still should also be handled */
4613 kfree(reg_info
->reg_rules_2g_ptr
);
4614 kfree(reg_info
->reg_rules_5g_ptr
);
4620 static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
4621 const void *ptr
, void *data
)
4623 struct wmi_tlv_rdy_parse
*rdy_parse
= data
;
4624 struct wmi_ready_event
*fixed_param
;
4625 struct wmi_mac_addr
*addr_list
;
4626 struct ath11k_pdev
*pdev
;
4631 case WMI_TAG_READY_EVENT
:
4632 fixed_param
= (struct wmi_ready_event
*)ptr
;
4633 ab
->wlan_init_status
= fixed_param
->status
;
4634 rdy_parse
->num_extra_mac_addr
= fixed_param
->num_extra_mac_addr
;
4636 ether_addr_copy(ab
->mac_addr
, fixed_param
->mac_addr
.addr
);
4637 ab
->wmi_ready
= true;
4639 case WMI_TAG_ARRAY_FIXED_STRUCT
:
4640 addr_list
= (struct wmi_mac_addr
*)ptr
;
4641 num_mac_addr
= rdy_parse
->num_extra_mac_addr
;
4643 if (!(ab
->num_radios
> 1 && num_mac_addr
>= ab
->num_radios
))
4646 for (i
= 0; i
< ab
->num_radios
; i
++) {
4647 pdev
= &ab
->pdevs
[i
];
4648 ether_addr_copy(pdev
->mac_addr
, addr_list
[i
].addr
);
4650 ab
->pdevs_macaddr_valid
= true;
4659 static int ath11k_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4661 struct wmi_tlv_rdy_parse rdy_parse
= { };
4664 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
4665 ath11k_wmi_tlv_rdy_parse
, &rdy_parse
);
4667 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
4671 complete(&ab
->wmi_ab
.unified_ready
);
4675 static void ath11k_peer_delete_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4677 struct wmi_peer_delete_resp_event peer_del_resp
;
4679 if (ath11k_pull_peer_del_resp_ev(ab
, skb
, &peer_del_resp
) != 0) {
4680 ath11k_warn(ab
, "failed to extract peer delete resp");
4684 /* TODO: Do we need to validate whether ath11k_peer_find() return NULL
4685 * Why this is needed when there is HTT event for peer delete
4689 static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status
)
4691 switch (vdev_resp_status
) {
4692 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID
:
4693 return "invalid vdev id";
4694 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED
:
4695 return "not supported";
4696 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION
:
4697 return "dfs violation";
4698 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN
:
4699 return "invalid regdomain";
4705 static void ath11k_vdev_start_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4707 struct wmi_vdev_start_resp_event vdev_start_resp
;
4711 if (ath11k_pull_vdev_start_resp_tlv(ab
, skb
, &vdev_start_resp
) != 0) {
4712 ath11k_warn(ab
, "failed to extract vdev start resp");
4717 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, vdev_start_resp
.vdev_id
);
4719 ath11k_warn(ab
, "invalid vdev id in vdev start resp ev %d",
4720 vdev_start_resp
.vdev_id
);
4725 ar
->last_wmi_vdev_start_status
= 0;
4727 status
= vdev_start_resp
.status
;
4729 if (WARN_ON_ONCE(status
)) {
4730 ath11k_warn(ab
, "vdev start resp error status %d (%s)\n",
4731 status
, ath11k_wmi_vdev_resp_print(status
));
4732 ar
->last_wmi_vdev_start_status
= status
;
4735 complete(&ar
->vdev_setup_done
);
4739 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev start resp for vdev id %d",
4740 vdev_start_resp
.vdev_id
);
4743 static void ath11k_bcn_tx_status_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4745 u32 vdev_id
, tx_status
;
4747 if (ath11k_pull_bcn_tx_status_ev(ab
, skb
->data
, skb
->len
,
4748 &vdev_id
, &tx_status
) != 0) {
4749 ath11k_warn(ab
, "failed to extract bcn tx status");
4754 static void ath11k_vdev_stopped_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4759 if (ath11k_pull_vdev_stopped_param_tlv(ab
, skb
, &vdev_id
) != 0) {
4760 ath11k_warn(ab
, "failed to extract vdev stopped event");
4765 ar
= ath11k_mac_get_ar_vdev_stop_status(ab
, vdev_id
);
4767 ath11k_warn(ab
, "invalid vdev id in vdev stopped ev %d",
4773 complete(&ar
->vdev_setup_done
);
4777 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev stopped for vdev id %d", vdev_id
);
4780 static void ath11k_mgmt_rx_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4782 struct mgmt_rx_event_params rx_ev
= {0};
4784 struct ieee80211_rx_status
*status
= IEEE80211_SKB_RXCB(skb
);
4785 struct ieee80211_hdr
*hdr
;
4787 struct ieee80211_supported_band
*sband
;
4789 if (ath11k_pull_mgmt_rx_params_tlv(ab
, skb
, &rx_ev
) != 0) {
4790 ath11k_warn(ab
, "failed to extract mgmt rx event");
4795 memset(status
, 0, sizeof(*status
));
4797 ath11k_dbg(ab
, ATH11K_DBG_MGMT
, "mgmt rx event status %08x\n",
4801 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, rx_ev
.pdev_id
);
4804 ath11k_warn(ab
, "invalid pdev_id %d in mgmt_rx_event\n",
4810 if ((test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) ||
4811 (rx_ev
.status
& (WMI_RX_STATUS_ERR_DECRYPT
|
4812 WMI_RX_STATUS_ERR_KEY_CACHE_MISS
| WMI_RX_STATUS_ERR_CRC
))) {
4817 if (rx_ev
.status
& WMI_RX_STATUS_ERR_MIC
)
4818 status
->flag
|= RX_FLAG_MMIC_ERROR
;
4820 if (rx_ev
.channel
>= 1 && rx_ev
.channel
<= 14) {
4821 status
->band
= NL80211_BAND_2GHZ
;
4822 } else if (rx_ev
.channel
>= 36 && rx_ev
.channel
<= ATH11K_MAX_5G_CHAN
) {
4823 status
->band
= NL80211_BAND_5GHZ
;
4825 /* Shouldn't happen unless list of advertised channels to
4826 * mac80211 has been changed.
4833 if (rx_ev
.phy_mode
== MODE_11B
&& status
->band
== NL80211_BAND_5GHZ
)
4834 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4835 "wmi mgmt rx 11b (CCK) on 5GHz\n");
4837 sband
= &ar
->mac
.sbands
[status
->band
];
4839 status
->freq
= ieee80211_channel_to_frequency(rx_ev
.channel
,
4841 status
->signal
= rx_ev
.snr
+ ATH11K_DEFAULT_NOISE_FLOOR
;
4842 status
->rate_idx
= ath11k_mac_bitrate_to_idx(sband
, rx_ev
.rate
/ 100);
4844 hdr
= (struct ieee80211_hdr
*)skb
->data
;
4845 fc
= le16_to_cpu(hdr
->frame_control
);
4847 /* Firmware is guaranteed to report all essential management frames via
4848 * WMI while it can deliver some extra via HTT. Since there can be
4849 * duplicates split the reporting wrt monitor/sniffing.
4851 status
->flag
|= RX_FLAG_SKIP_MONITOR
;
4853 /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
4854 * Don't clear that. Also, FW delivers broadcast management frames
4855 * (ex: group privacy action frames in mesh) as encrypted payload.
4857 if (ieee80211_has_protected(hdr
->frame_control
) &&
4858 !is_multicast_ether_addr(ieee80211_get_DA(hdr
))) {
4859 status
->flag
|= RX_FLAG_DECRYPTED
;
4861 if (!ieee80211_is_robust_mgmt_frame(skb
)) {
4862 status
->flag
|= RX_FLAG_IV_STRIPPED
|
4863 RX_FLAG_MMIC_STRIPPED
;
4864 hdr
->frame_control
= __cpu_to_le16(fc
&
4865 ~IEEE80211_FCTL_PROTECTED
);
4869 /* TODO: Pending handle beacon implementation
4870 *if (ieee80211_is_beacon(hdr->frame_control))
4871 * ath11k_mac_handle_beacon(ar, skb);
4874 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
4875 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
4877 fc
& IEEE80211_FCTL_FTYPE
, fc
& IEEE80211_FCTL_STYPE
);
4879 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
4880 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
4881 status
->freq
, status
->band
, status
->signal
,
4884 ieee80211_rx_ni(ar
->hw
, skb
);
4890 static void ath11k_mgmt_tx_compl_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4892 struct wmi_mgmt_tx_compl_event tx_compl_param
= {0};
4895 if (ath11k_pull_mgmt_tx_compl_param_tlv(ab
, skb
, &tx_compl_param
) != 0) {
4896 ath11k_warn(ab
, "failed to extract mgmt tx compl event");
4901 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, tx_compl_param
.pdev_id
);
4903 ath11k_warn(ab
, "invalid pdev id %d in mgmt_tx_compl_event\n",
4904 tx_compl_param
.pdev_id
);
4908 wmi_process_mgmt_tx_comp(ar
, tx_compl_param
.desc_id
,
4909 tx_compl_param
.status
);
4911 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
4912 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
4913 tx_compl_param
.pdev_id
, tx_compl_param
.desc_id
,
4914 tx_compl_param
.status
);
4920 static struct ath11k
*ath11k_get_ar_on_scan_abort(struct ath11k_base
*ab
,
4924 struct ath11k_pdev
*pdev
;
4927 for (i
= 0; i
< ab
->num_radios
; i
++) {
4928 pdev
= rcu_dereference(ab
->pdevs_active
[i
]);
4929 if (pdev
&& pdev
->ar
) {
4932 spin_lock_bh(&ar
->data_lock
);
4933 if (ar
->scan
.state
== ATH11K_SCAN_ABORTING
&&
4934 ar
->scan
.vdev_id
== vdev_id
) {
4935 spin_unlock_bh(&ar
->data_lock
);
4938 spin_unlock_bh(&ar
->data_lock
);
4944 static void ath11k_scan_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
4947 struct wmi_scan_event scan_ev
= {0};
4949 if (ath11k_pull_scan_ev(ab
, skb
, &scan_ev
) != 0) {
4950 ath11k_warn(ab
, "failed to extract scan event");
4956 /* In case the scan was cancelled, ex. during interface teardown,
4957 * the interface will not be found in active interfaces.
4958 * Rather, in such scenarios, iterate over the active pdev's to
4959 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
4960 * aborting scan's vdev id matches this event info.
4962 if (scan_ev
.event_type
== WMI_SCAN_EVENT_COMPLETED
&&
4963 scan_ev
.reason
== WMI_SCAN_REASON_CANCELLED
)
4964 ar
= ath11k_get_ar_on_scan_abort(ab
, scan_ev
.vdev_id
);
4966 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, scan_ev
.vdev_id
);
4969 ath11k_warn(ab
, "Received scan event for unknown vdev");
4974 spin_lock_bh(&ar
->data_lock
);
4976 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4977 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
4978 ath11k_wmi_event_scan_type_str(scan_ev
.event_type
, scan_ev
.reason
),
4979 scan_ev
.event_type
, scan_ev
.reason
, scan_ev
.channel_freq
,
4980 scan_ev
.scan_req_id
, scan_ev
.scan_id
, scan_ev
.vdev_id
,
4981 ath11k_scan_state_str(ar
->scan
.state
), ar
->scan
.state
);
4983 switch (scan_ev
.event_type
) {
4984 case WMI_SCAN_EVENT_STARTED
:
4985 ath11k_wmi_event_scan_started(ar
);
4987 case WMI_SCAN_EVENT_COMPLETED
:
4988 ath11k_wmi_event_scan_completed(ar
);
4990 case WMI_SCAN_EVENT_BSS_CHANNEL
:
4991 ath11k_wmi_event_scan_bss_chan(ar
);
4993 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
4994 ath11k_wmi_event_scan_foreign_chan(ar
, scan_ev
.channel_freq
);
4996 case WMI_SCAN_EVENT_START_FAILED
:
4997 ath11k_warn(ab
, "received scan start failure event\n");
4998 ath11k_wmi_event_scan_start_failed(ar
);
5000 case WMI_SCAN_EVENT_DEQUEUED
:
5001 case WMI_SCAN_EVENT_PREEMPTED
:
5002 case WMI_SCAN_EVENT_RESTARTED
:
5003 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
5008 spin_unlock_bh(&ar
->data_lock
);
5013 static void ath11k_peer_sta_kickout_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5015 struct wmi_peer_sta_kickout_arg arg
= {};
5016 struct ieee80211_sta
*sta
;
5017 struct ath11k_peer
*peer
;
5020 if (ath11k_pull_peer_sta_kickout_ev(ab
, skb
, &arg
) != 0) {
5021 ath11k_warn(ab
, "failed to extract peer sta kickout event");
5027 spin_lock_bh(&ab
->base_lock
);
5029 peer
= ath11k_peer_find_by_addr(ab
, arg
.mac_addr
);
5032 ath11k_warn(ab
, "peer not found %pM\n",
5037 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer
->vdev_id
);
5039 ath11k_warn(ab
, "invalid vdev id in peer sta kickout ev %d",
5044 sta
= ieee80211_find_sta_by_ifaddr(ar
->hw
,
5045 arg
.mac_addr
, NULL
);
5047 ath11k_warn(ab
, "Spurious quick kickout for STA %pM\n",
5052 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "peer sta kickout event %pM",
5055 ieee80211_report_low_ack(sta
, 10);
5058 spin_unlock_bh(&ab
->base_lock
);
5062 static void ath11k_roam_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5064 struct wmi_roam_event roam_ev
= {};
5067 if (ath11k_pull_roam_ev(ab
, skb
, &roam_ev
) != 0) {
5068 ath11k_warn(ab
, "failed to extract roam event");
5072 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5073 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
5074 roam_ev
.vdev_id
, roam_ev
.reason
, roam_ev
.rssi
);
5077 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, roam_ev
.vdev_id
);
5079 ath11k_warn(ab
, "invalid vdev id in roam ev %d",
5085 if (roam_ev
.reason
>= WMI_ROAM_REASON_MAX
)
5086 ath11k_warn(ab
, "ignoring unknown roam event reason %d on vdev %i\n",
5087 roam_ev
.reason
, roam_ev
.vdev_id
);
5089 switch (roam_ev
.reason
) {
5090 case WMI_ROAM_REASON_BEACON_MISS
:
5091 /* TODO: Pending beacon miss and connection_loss_work
5093 * ath11k_mac_handle_beacon_miss(ar, vdev_id);
5096 case WMI_ROAM_REASON_BETTER_AP
:
5097 case WMI_ROAM_REASON_LOW_RSSI
:
5098 case WMI_ROAM_REASON_SUITABLE_AP_FOUND
:
5099 case WMI_ROAM_REASON_HO_FAILED
:
5100 ath11k_warn(ab
, "ignoring not implemented roam event reason %d on vdev %i\n",
5101 roam_ev
.reason
, roam_ev
.vdev_id
);
5108 static void ath11k_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5110 struct wmi_chan_info_event ch_info_ev
= {0};
5112 struct survey_info
*survey
;
5114 /* HW channel counters frequency value in hertz */
5115 u32 cc_freq_hz
= ab
->cc_freq_hz
;
5117 if (ath11k_pull_chan_info_ev(ab
, skb
->data
, skb
->len
, &ch_info_ev
) != 0) {
5118 ath11k_warn(ab
, "failed to extract chan info event");
5122 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5123 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
5124 ch_info_ev
.vdev_id
, ch_info_ev
.err_code
, ch_info_ev
.freq
,
5125 ch_info_ev
.cmd_flags
, ch_info_ev
.noise_floor
,
5126 ch_info_ev
.rx_clear_count
, ch_info_ev
.cycle_count
,
5127 ch_info_ev
.mac_clk_mhz
);
5129 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_END_RESP
) {
5130 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "chan info report completed\n");
5135 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, ch_info_ev
.vdev_id
);
5137 ath11k_warn(ab
, "invalid vdev id in chan info ev %d",
5138 ch_info_ev
.vdev_id
);
5142 spin_lock_bh(&ar
->data_lock
);
5144 switch (ar
->scan
.state
) {
5145 case ATH11K_SCAN_IDLE
:
5146 case ATH11K_SCAN_STARTING
:
5147 ath11k_warn(ab
, "received chan info event without a scan request, ignoring\n");
5149 case ATH11K_SCAN_RUNNING
:
5150 case ATH11K_SCAN_ABORTING
:
5154 idx
= freq_to_idx(ar
, ch_info_ev
.freq
);
5155 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
5156 ath11k_warn(ab
, "chan info: invalid frequency %d (idx %d out of bounds)\n",
5157 ch_info_ev
.freq
, idx
);
5161 /* If FW provides MAC clock frequency in Mhz, overriding the initialized
5162 * HW channel counters frequency value
5164 if (ch_info_ev
.mac_clk_mhz
)
5165 cc_freq_hz
= (ch_info_ev
.mac_clk_mhz
* 1000);
5167 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_START_RESP
) {
5168 survey
= &ar
->survey
[idx
];
5169 memset(survey
, 0, sizeof(*survey
));
5170 survey
->noise
= ch_info_ev
.noise_floor
;
5171 survey
->filled
= SURVEY_INFO_NOISE_DBM
| SURVEY_INFO_TIME
|
5172 SURVEY_INFO_TIME_BUSY
;
5173 survey
->time
= div_u64(ch_info_ev
.cycle_count
, cc_freq_hz
);
5174 survey
->time_busy
= div_u64(ch_info_ev
.rx_clear_count
, cc_freq_hz
);
5177 spin_unlock_bh(&ar
->data_lock
);
5182 ath11k_pdev_bss_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5184 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev
= {};
5185 struct survey_info
*survey
;
5187 u32 cc_freq_hz
= ab
->cc_freq_hz
;
5188 u64 busy
, total
, tx
, rx
, rx_bss
;
5191 if (ath11k_pull_pdev_bss_chan_info_ev(ab
, skb
, &bss_ch_info_ev
) != 0) {
5192 ath11k_warn(ab
, "failed to extract pdev bss chan info event");
5196 busy
= (u64
)(bss_ch_info_ev
.rx_clear_count_high
) << 32 |
5197 bss_ch_info_ev
.rx_clear_count_low
;
5199 total
= (u64
)(bss_ch_info_ev
.cycle_count_high
) << 32 |
5200 bss_ch_info_ev
.cycle_count_low
;
5202 tx
= (u64
)(bss_ch_info_ev
.tx_cycle_count_high
) << 32 |
5203 bss_ch_info_ev
.tx_cycle_count_low
;
5205 rx
= (u64
)(bss_ch_info_ev
.rx_cycle_count_high
) << 32 |
5206 bss_ch_info_ev
.rx_cycle_count_low
;
5208 rx_bss
= (u64
)(bss_ch_info_ev
.rx_bss_cycle_count_high
) << 32 |
5209 bss_ch_info_ev
.rx_bss_cycle_count_low
;
5211 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5212 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
5213 bss_ch_info_ev
.pdev_id
, bss_ch_info_ev
.freq
,
5214 bss_ch_info_ev
.noise_floor
, busy
, total
,
5218 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, bss_ch_info_ev
.pdev_id
);
5221 ath11k_warn(ab
, "invalid pdev id %d in bss_chan_info event\n",
5222 bss_ch_info_ev
.pdev_id
);
5227 spin_lock_bh(&ar
->data_lock
);
5228 idx
= freq_to_idx(ar
, bss_ch_info_ev
.freq
);
5229 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
5230 ath11k_warn(ab
, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
5231 bss_ch_info_ev
.freq
, idx
);
5235 survey
= &ar
->survey
[idx
];
5237 survey
->noise
= bss_ch_info_ev
.noise_floor
;
5238 survey
->time
= div_u64(total
, cc_freq_hz
);
5239 survey
->time_busy
= div_u64(busy
, cc_freq_hz
);
5240 survey
->time_rx
= div_u64(rx_bss
, cc_freq_hz
);
5241 survey
->time_tx
= div_u64(tx
, cc_freq_hz
);
5242 survey
->filled
|= (SURVEY_INFO_NOISE_DBM
|
5244 SURVEY_INFO_TIME_BUSY
|
5245 SURVEY_INFO_TIME_RX
|
5246 SURVEY_INFO_TIME_TX
);
5248 spin_unlock_bh(&ar
->data_lock
);
5249 complete(&ar
->bss_survey_done
);
5254 static void ath11k_vdev_install_key_compl_event(struct ath11k_base
*ab
,
5255 struct sk_buff
*skb
)
5257 struct wmi_vdev_install_key_complete_arg install_key_compl
= {0};
5260 if (ath11k_pull_vdev_install_key_compl_ev(ab
, skb
, &install_key_compl
) != 0) {
5261 ath11k_warn(ab
, "failed to extract install key compl event");
5265 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5266 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
5267 install_key_compl
.key_idx
, install_key_compl
.key_flags
,
5268 install_key_compl
.macaddr
, install_key_compl
.status
);
5271 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, install_key_compl
.vdev_id
);
5273 ath11k_warn(ab
, "invalid vdev id in install key compl ev %d",
5274 install_key_compl
.vdev_id
);
5279 ar
->install_key_status
= 0;
5281 if (install_key_compl
.status
!= WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS
) {
5282 ath11k_warn(ab
, "install key failed for %pM status %d\n",
5283 install_key_compl
.macaddr
, install_key_compl
.status
);
5284 ar
->install_key_status
= install_key_compl
.status
;
5287 complete(&ar
->install_key_done
);
5291 static void ath11k_service_available_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5294 const struct wmi_service_available_event
*ev
;
5298 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5301 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5305 ev
= tb
[WMI_TAG_SERVICE_AVAILABLE_EVENT
];
5307 ath11k_warn(ab
, "failed to fetch svc available ev");
5312 /* TODO: Use wmi_service_segment_offset information to get the service
5313 * especially when more services are advertised in multiple sevice
5316 for (i
= 0, j
= WMI_MAX_SERVICE
;
5317 i
< WMI_SERVICE_SEGMENT_BM_SIZE32
&& j
< WMI_MAX_EXT_SERVICE
;
5320 if (ev
->wmi_service_segment_bitmap
[i
] &
5321 BIT(j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
))
5322 set_bit(j
, ab
->wmi_ab
.svc_map
);
5323 } while (++j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
);
5326 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5327 "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
5328 ev
->wmi_service_segment_bitmap
[0], ev
->wmi_service_segment_bitmap
[1],
5329 ev
->wmi_service_segment_bitmap
[2], ev
->wmi_service_segment_bitmap
[3]);
5334 static void ath11k_peer_assoc_conf_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5336 struct wmi_peer_assoc_conf_arg peer_assoc_conf
= {0};
5339 if (ath11k_pull_peer_assoc_conf_ev(ab
, skb
, &peer_assoc_conf
) != 0) {
5340 ath11k_warn(ab
, "failed to extract peer assoc conf event");
5344 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5345 "peer assoc conf ev vdev id %d macaddr %pM\n",
5346 peer_assoc_conf
.vdev_id
, peer_assoc_conf
.macaddr
);
5348 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer_assoc_conf
.vdev_id
);
5351 ath11k_warn(ab
, "invalid vdev id in peer assoc conf ev %d",
5352 peer_assoc_conf
.vdev_id
);
5356 complete(&ar
->peer_assoc_done
);
5359 static void ath11k_update_stats_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5361 ath11k_debug_fw_stats_process(ab
, skb
);
5364 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
5365 * is not part of BDF CTL(Conformance test limits) table entries.
5367 static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base
*ab
,
5368 struct sk_buff
*skb
)
5371 const struct wmi_pdev_ctl_failsafe_chk_event
*ev
;
5374 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5377 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5381 ev
= tb
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
];
5383 ath11k_warn(ab
, "failed to fetch pdev ctl failsafe check ev");
5388 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5389 "pdev ctl failsafe check ev status %d\n",
5390 ev
->ctl_failsafe_status
);
5392 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
5393 * to 10 dBm else the CTL power entry in the BDF would be picked up.
5395 if (ev
->ctl_failsafe_status
!= 0)
5396 ath11k_warn(ab
, "pdev ctl failsafe failure status %d",
5397 ev
->ctl_failsafe_status
);
5403 ath11k_wmi_process_csa_switch_count_event(struct ath11k_base
*ab
,
5404 const struct wmi_pdev_csa_switch_ev
*ev
,
5405 const u32
*vdev_ids
)
5408 struct ath11k_vif
*arvif
;
5410 /* Finish CSA once the switch count becomes NULL */
5411 if (ev
->current_switch_count
)
5415 for (i
= 0; i
< ev
->num_vdevs
; i
++) {
5416 arvif
= ath11k_mac_get_arvif_by_vdev_id(ab
, vdev_ids
[i
]);
5419 ath11k_warn(ab
, "Recvd csa status for unknown vdev %d",
5424 if (arvif
->is_up
&& arvif
->vif
->csa_active
)
5425 ieee80211_csa_finish(arvif
->vif
);
5431 ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base
*ab
,
5432 struct sk_buff
*skb
)
5435 const struct wmi_pdev_csa_switch_ev
*ev
;
5436 const u32
*vdev_ids
;
5439 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5442 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5446 ev
= tb
[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT
];
5447 vdev_ids
= tb
[WMI_TAG_ARRAY_UINT32
];
5449 if (!ev
|| !vdev_ids
) {
5450 ath11k_warn(ab
, "failed to fetch pdev csa switch count ev");
5455 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5456 "pdev csa switch count %d for pdev %d, num_vdevs %d",
5457 ev
->current_switch_count
, ev
->pdev_id
,
5460 ath11k_wmi_process_csa_switch_count_event(ab
, ev
, vdev_ids
);
5466 ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5469 const struct wmi_pdev_radar_ev
*ev
;
5473 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
5476 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5480 ev
= tb
[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT
];
5483 ath11k_warn(ab
, "failed to fetch pdev dfs radar detected ev");
5488 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5489 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
5490 ev
->pdev_id
, ev
->detection_mode
, ev
->chan_freq
, ev
->chan_width
,
5491 ev
->detector_id
, ev
->segment_id
, ev
->timestamp
, ev
->is_chirp
,
5492 ev
->freq_offset
, ev
->sidx
);
5494 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
->pdev_id
);
5497 ath11k_warn(ab
, "radar detected in invalid pdev %d\n",
5502 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "DFS Radar Detected in pdev %d\n",
5505 if (ar
->dfs_block_radar_events
)
5506 ath11k_info(ab
, "DFS Radar detected, but ignored as requested\n");
5508 ieee80211_radar_detected(ar
->hw
);
5514 static void ath11k_wmi_tlv_op_rx(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5516 struct wmi_cmd_hdr
*cmd_hdr
;
5517 enum wmi_tlv_event_id id
;
5519 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
5520 id
= FIELD_GET(WMI_CMD_HDR_CMD_ID
, (cmd_hdr
->cmd_id
));
5522 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
5526 /* Process all the WMI events here */
5527 case WMI_SERVICE_READY_EVENTID
:
5528 ath11k_service_ready_event(ab
, skb
);
5530 case WMI_SERVICE_READY_EXT_EVENTID
:
5531 ath11k_service_ready_ext_event(ab
, skb
);
5533 case WMI_REG_CHAN_LIST_CC_EVENTID
:
5534 ath11k_reg_chan_list_event(ab
, skb
);
5536 case WMI_READY_EVENTID
:
5537 ath11k_ready_event(ab
, skb
);
5539 case WMI_PEER_DELETE_RESP_EVENTID
:
5540 ath11k_peer_delete_resp_event(ab
, skb
);
5542 case WMI_VDEV_START_RESP_EVENTID
:
5543 ath11k_vdev_start_resp_event(ab
, skb
);
5545 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID
:
5546 ath11k_bcn_tx_status_event(ab
, skb
);
5548 case WMI_VDEV_STOPPED_EVENTID
:
5549 ath11k_vdev_stopped_event(ab
, skb
);
5551 case WMI_MGMT_RX_EVENTID
:
5552 ath11k_mgmt_rx_event(ab
, skb
);
5553 /* mgmt_rx_event() owns the skb now! */
5555 case WMI_MGMT_TX_COMPLETION_EVENTID
:
5556 ath11k_mgmt_tx_compl_event(ab
, skb
);
5558 case WMI_SCAN_EVENTID
:
5559 ath11k_scan_event(ab
, skb
);
5561 case WMI_PEER_STA_KICKOUT_EVENTID
:
5562 ath11k_peer_sta_kickout_event(ab
, skb
);
5564 case WMI_ROAM_EVENTID
:
5565 ath11k_roam_event(ab
, skb
);
5567 case WMI_CHAN_INFO_EVENTID
:
5568 ath11k_chan_info_event(ab
, skb
);
5570 case WMI_PDEV_BSS_CHAN_INFO_EVENTID
:
5571 ath11k_pdev_bss_chan_info_event(ab
, skb
);
5573 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID
:
5574 ath11k_vdev_install_key_compl_event(ab
, skb
);
5576 case WMI_SERVICE_AVAILABLE_EVENTID
:
5577 ath11k_service_available_event(ab
, skb
);
5579 case WMI_PEER_ASSOC_CONF_EVENTID
:
5580 ath11k_peer_assoc_conf_event(ab
, skb
);
5582 case WMI_UPDATE_STATS_EVENTID
:
5583 ath11k_update_stats_event(ab
, skb
);
5585 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID
:
5586 ath11k_pdev_ctl_failsafe_check_event(ab
, skb
);
5588 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID
:
5589 ath11k_wmi_pdev_csa_switch_count_status_event(ab
, skb
);
5591 /* add Unsupported events here */
5592 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID
:
5593 case WMI_VDEV_DELETE_RESP_EVENTID
:
5594 case WMI_PEER_OPER_MODE_CHANGE_EVENTID
:
5595 case WMI_TWT_ENABLE_EVENTID
:
5596 case WMI_TWT_DISABLE_EVENTID
:
5597 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5598 "ignoring unsupported event 0x%x\n", id
);
5600 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID
:
5601 ath11k_wmi_pdev_dfs_radar_detected_event(ab
, skb
);
5603 /* TODO: Add remaining events */
5605 ath11k_warn(ab
, "Unknown eventid: 0x%x\n", id
);
5613 static int ath11k_connect_pdev_htc_service(struct ath11k_base
*ab
,
5617 u32 svc_id
[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL
,
5618 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1
,
5619 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2
};
5621 struct ath11k_htc_svc_conn_req conn_req
;
5622 struct ath11k_htc_svc_conn_resp conn_resp
;
5624 memset(&conn_req
, 0, sizeof(conn_req
));
5625 memset(&conn_resp
, 0, sizeof(conn_resp
));
5627 /* these fields are the same for all service endpoints */
5628 conn_req
.ep_ops
.ep_tx_complete
= ath11k_wmi_htc_tx_complete
;
5629 conn_req
.ep_ops
.ep_rx_complete
= ath11k_wmi_tlv_op_rx
;
5630 conn_req
.ep_ops
.ep_tx_credits
= ath11k_wmi_op_ep_tx_credits
;
5632 /* connect to control service */
5633 conn_req
.service_id
= svc_id
[pdev_idx
];
5635 status
= ath11k_htc_connect_service(&ab
->htc
, &conn_req
, &conn_resp
);
5637 ath11k_warn(ab
, "failed to connect to WMI CONTROL service status: %d\n",
5642 ab
->wmi_ab
.wmi_endpoint_id
[pdev_idx
] = conn_resp
.eid
;
5643 ab
->wmi_ab
.wmi
[pdev_idx
].eid
= conn_resp
.eid
;
5644 ab
->wmi_ab
.max_msg_len
[pdev_idx
] = conn_resp
.max_msg_len
;
5650 ath11k_wmi_send_unit_test_cmd(struct ath11k
*ar
,
5651 struct wmi_unit_test_cmd ut_cmd
,
5654 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
5655 struct wmi_unit_test_cmd
*cmd
;
5656 struct sk_buff
*skb
;
5657 struct wmi_tlv
*tlv
;
5660 int buf_len
, arg_len
;
5664 arg_len
= sizeof(u32
) * ut_cmd
.num_args
;
5665 buf_len
= sizeof(ut_cmd
) + arg_len
+ TLV_HDR_SIZE
;
5667 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, buf_len
);
5671 cmd
= (struct wmi_unit_test_cmd
*)skb
->data
;
5672 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_UNIT_TEST_CMD
) |
5673 FIELD_PREP(WMI_TLV_LEN
, sizeof(ut_cmd
) - TLV_HDR_SIZE
);
5675 cmd
->vdev_id
= ut_cmd
.vdev_id
;
5676 cmd
->module_id
= ut_cmd
.module_id
;
5677 cmd
->num_args
= ut_cmd
.num_args
;
5678 cmd
->diag_token
= ut_cmd
.diag_token
;
5680 ptr
= skb
->data
+ sizeof(ut_cmd
);
5683 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
5684 FIELD_PREP(WMI_TLV_LEN
, arg_len
);
5686 ptr
+= TLV_HDR_SIZE
;
5689 for (i
= 0; i
< ut_cmd
.num_args
; i
++)
5690 ut_cmd_args
[i
] = test_args
[i
];
5692 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_UNIT_TEST_CMDID
);
5695 ath11k_warn(ar
->ab
, "failed to send WMI_UNIT_TEST CMD :%d\n",
5700 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
5701 "WMI unit test : module %d vdev %d n_args %d token %d\n",
5702 cmd
->module_id
, cmd
->vdev_id
, cmd
->num_args
,
5708 int ath11k_wmi_simulate_radar(struct ath11k
*ar
)
5710 struct ath11k_vif
*arvif
;
5711 u32 dfs_args
[DFS_MAX_TEST_ARGS
];
5712 struct wmi_unit_test_cmd wmi_ut
;
5713 bool arvif_found
= false;
5715 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
5716 if (arvif
->is_started
&& arvif
->vdev_type
== WMI_VDEV_TYPE_AP
) {
5725 dfs_args
[DFS_TEST_CMDID
] = 0;
5726 dfs_args
[DFS_TEST_PDEV_ID
] = ar
->pdev
->pdev_id
;
5727 /* Currently we could pass segment_id(b0 - b1), chirp(b2)
5728 * freq offset (b3 - b10) to unit test. For simulation
5729 * purpose this can be set to 0 which is valid.
5731 dfs_args
[DFS_TEST_RADAR_PARAM
] = 0;
5733 wmi_ut
.vdev_id
= arvif
->vdev_id
;
5734 wmi_ut
.module_id
= DFS_UNIT_TEST_MODULE
;
5735 wmi_ut
.num_args
= DFS_MAX_TEST_ARGS
;
5736 wmi_ut
.diag_token
= DFS_UNIT_TEST_TOKEN
;
5738 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "Triggering Radar Simulation\n");
5740 return ath11k_wmi_send_unit_test_cmd(ar
, wmi_ut
, dfs_args
);
5743 int ath11k_wmi_connect(struct ath11k_base
*ab
)
5748 wmi_ep_count
= ab
->htc
.wmi_ep_count
;
5749 if (wmi_ep_count
> MAX_RADIOS
)
5752 for (i
= 0; i
< wmi_ep_count
; i
++)
5753 ath11k_connect_pdev_htc_service(ab
, i
);
5758 static void ath11k_wmi_pdev_detach(struct ath11k_base
*ab
, u8 pdev_id
)
5760 if (WARN_ON(pdev_id
>= MAX_RADIOS
))
5763 /* TODO: Deinit any pdev specific wmi resource */
5766 int ath11k_wmi_pdev_attach(struct ath11k_base
*ab
,
5769 struct ath11k_pdev_wmi
*wmi_handle
;
5771 if (pdev_id
>= MAX_RADIOS
)
5774 wmi_handle
= &ab
->wmi_ab
.wmi
[pdev_id
];
5776 wmi_handle
->wmi_ab
= &ab
->wmi_ab
;
5779 /* TODO: Init remaining resource specific to pdev */
5784 int ath11k_wmi_attach(struct ath11k_base
*ab
)
5788 ret
= ath11k_wmi_pdev_attach(ab
, 0);
5793 ab
->wmi_ab
.preferred_hw_mode
= WMI_HOST_HW_MODE_MAX
;
5795 /* TODO: Init remaining wmi soc resources required */
5796 init_completion(&ab
->wmi_ab
.service_ready
);
5797 init_completion(&ab
->wmi_ab
.unified_ready
);
5802 void ath11k_wmi_detach(struct ath11k_base
*ab
)
5806 /* TODO: Deinit wmi resource specific to SOC as required */
5808 for (i
= 0; i
< ab
->htc
.wmi_ep_count
; i
++)
5809 ath11k_wmi_pdev_detach(ab
, i
);