1 // SPDX-License-Identifier: BSD-3-Clause-Clear
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
5 #include <linux/skbuff.h>
6 #include <linux/ctype.h>
7 #include <net/mac80211.h>
8 #include <net/cfg80211.h>
9 #include <linux/completion.h>
10 #include <linux/if_ether.h>
11 #include <linux/types.h>
12 #include <linux/pci.h>
13 #include <linux/uuid.h>
14 #include <linux/time.h>
22 struct wmi_tlv_policy
{
26 struct wmi_tlv_svc_ready_parse
{
27 bool wmi_svc_bitmap_done
;
30 struct wmi_tlv_dma_ring_caps_parse
{
31 struct wmi_dma_ring_capabilities
*dma_ring_caps
;
35 struct wmi_tlv_svc_rdy_ext_parse
{
36 struct ath11k_service_ext_param param
;
37 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
;
38 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
41 struct wmi_hw_mode_capabilities pref_hw_mode_caps
;
42 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
44 struct wmi_soc_hal_reg_capabilities
*soc_hal_reg_caps
;
45 struct wmi_hal_reg_capabilities_ext
*ext_hal_reg_caps
;
46 u32 n_ext_hal_reg_caps
;
47 struct wmi_tlv_dma_ring_caps_parse dma_caps_parse
;
50 bool ext_hal_reg_done
;
51 bool mac_phy_chainmask_combo_done
;
52 bool mac_phy_chainmask_cap_done
;
53 bool oem_dma_ring_cap_done
;
54 bool dma_ring_cap_done
;
57 struct wmi_tlv_svc_rdy_ext2_parse
{
58 struct wmi_tlv_dma_ring_caps_parse dma_caps_parse
;
59 bool dma_ring_cap_done
;
62 struct wmi_tlv_rdy_parse
{
63 u32 num_extra_mac_addr
;
66 struct wmi_tlv_dma_buf_release_parse
{
67 struct ath11k_wmi_dma_buf_release_fixed_param fixed
;
68 struct wmi_dma_buf_release_entry
*buf_entry
;
69 struct wmi_dma_buf_release_meta_data
*meta_data
;
76 static const struct wmi_tlv_policy wmi_tlv_policies
[] = {
79 [WMI_TAG_ARRAY_UINT32
]
81 [WMI_TAG_SERVICE_READY_EVENT
]
82 = { .min_len
= sizeof(struct wmi_service_ready_event
) },
83 [WMI_TAG_SERVICE_READY_EXT_EVENT
]
84 = { .min_len
= sizeof(struct wmi_service_ready_ext_event
) },
85 [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
]
86 = { .min_len
= sizeof(struct wmi_soc_mac_phy_hw_mode_caps
) },
87 [WMI_TAG_SOC_HAL_REG_CAPABILITIES
]
88 = { .min_len
= sizeof(struct wmi_soc_hal_reg_capabilities
) },
89 [WMI_TAG_VDEV_START_RESPONSE_EVENT
]
90 = { .min_len
= sizeof(struct wmi_vdev_start_resp_event
) },
91 [WMI_TAG_PEER_DELETE_RESP_EVENT
]
92 = { .min_len
= sizeof(struct wmi_peer_delete_resp_event
) },
93 [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
]
94 = { .min_len
= sizeof(struct wmi_bcn_tx_status_event
) },
95 [WMI_TAG_VDEV_STOPPED_EVENT
]
96 = { .min_len
= sizeof(struct wmi_vdev_stopped_event
) },
97 [WMI_TAG_REG_CHAN_LIST_CC_EVENT
]
98 = { .min_len
= sizeof(struct wmi_reg_chan_list_cc_event
) },
100 = { .min_len
= sizeof(struct wmi_mgmt_rx_hdr
) },
101 [WMI_TAG_MGMT_TX_COMPL_EVENT
]
102 = { .min_len
= sizeof(struct wmi_mgmt_tx_compl_event
) },
104 = { .min_len
= sizeof(struct wmi_scan_event
) },
105 [WMI_TAG_PEER_STA_KICKOUT_EVENT
]
106 = { .min_len
= sizeof(struct wmi_peer_sta_kickout_event
) },
108 = { .min_len
= sizeof(struct wmi_roam_event
) },
109 [WMI_TAG_CHAN_INFO_EVENT
]
110 = { .min_len
= sizeof(struct wmi_chan_info_event
) },
111 [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
]
112 = { .min_len
= sizeof(struct wmi_pdev_bss_chan_info_event
) },
113 [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
]
114 = { .min_len
= sizeof(struct wmi_vdev_install_key_compl_event
) },
115 [WMI_TAG_READY_EVENT
] = {
116 .min_len
= sizeof(struct wmi_ready_event_min
) },
117 [WMI_TAG_SERVICE_AVAILABLE_EVENT
]
118 = {.min_len
= sizeof(struct wmi_service_available_event
) },
119 [WMI_TAG_PEER_ASSOC_CONF_EVENT
]
120 = { .min_len
= sizeof(struct wmi_peer_assoc_conf_event
) },
121 [WMI_TAG_STATS_EVENT
]
122 = { .min_len
= sizeof(struct wmi_stats_event
) },
123 [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
]
124 = { .min_len
= sizeof(struct wmi_pdev_ctl_failsafe_chk_event
) },
125 [WMI_TAG_HOST_SWFDA_EVENT
] = {
126 .min_len
= sizeof(struct wmi_fils_discovery_event
) },
127 [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT
] = {
128 .min_len
= sizeof(struct wmi_probe_resp_tx_status_event
) },
129 [WMI_TAG_VDEV_DELETE_RESP_EVENT
] = {
130 .min_len
= sizeof(struct wmi_vdev_delete_resp_event
) },
133 #define PRIMAP(_hw_mode_) \
134 [_hw_mode_] = _hw_mode_##_PRI
136 static const int ath11k_hw_mode_pri_map
[] = {
137 PRIMAP(WMI_HOST_HW_MODE_SINGLE
),
138 PRIMAP(WMI_HOST_HW_MODE_DBS
),
139 PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE
),
140 PRIMAP(WMI_HOST_HW_MODE_SBS
),
141 PRIMAP(WMI_HOST_HW_MODE_DBS_SBS
),
142 PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS
),
144 PRIMAP(WMI_HOST_HW_MODE_MAX
),
148 ath11k_wmi_tlv_iter(struct ath11k_base
*ab
, const void *ptr
, size_t len
,
149 int (*iter
)(struct ath11k_base
*ab
, u16 tag
, u16 len
,
150 const void *ptr
, void *data
),
153 const void *begin
= ptr
;
154 const struct wmi_tlv
*tlv
;
155 u16 tlv_tag
, tlv_len
;
159 if (len
< sizeof(*tlv
)) {
160 ath11k_err(ab
, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
161 ptr
- begin
, len
, sizeof(*tlv
));
166 tlv_tag
= FIELD_GET(WMI_TLV_TAG
, tlv
->header
);
167 tlv_len
= FIELD_GET(WMI_TLV_LEN
, tlv
->header
);
172 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
173 tlv_tag
, ptr
- begin
, len
, tlv_len
);
177 if (tlv_tag
< ARRAY_SIZE(wmi_tlv_policies
) &&
178 wmi_tlv_policies
[tlv_tag
].min_len
&&
179 wmi_tlv_policies
[tlv_tag
].min_len
> tlv_len
) {
180 ath11k_err(ab
, "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
181 tlv_tag
, ptr
- begin
, tlv_len
,
182 wmi_tlv_policies
[tlv_tag
].min_len
);
186 ret
= iter(ab
, tlv_tag
, tlv_len
, ptr
, data
);
197 static int ath11k_wmi_tlv_iter_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
198 const void *ptr
, void *data
)
200 const void **tb
= data
;
202 if (tag
< WMI_TAG_MAX
)
208 static int ath11k_wmi_tlv_parse(struct ath11k_base
*ar
, const void **tb
,
209 const void *ptr
, size_t len
)
211 return ath11k_wmi_tlv_iter(ar
, ptr
, len
, ath11k_wmi_tlv_iter_parse
,
216 ath11k_wmi_tlv_parse_alloc(struct ath11k_base
*ab
, const void *ptr
,
217 size_t len
, gfp_t gfp
)
222 tb
= kcalloc(WMI_TAG_MAX
, sizeof(*tb
), gfp
);
224 return ERR_PTR(-ENOMEM
);
226 ret
= ath11k_wmi_tlv_parse(ab
, tb
, ptr
, len
);
235 static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
238 struct ath11k_skb_cb
*skb_cb
= ATH11K_SKB_CB(skb
);
239 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
240 struct wmi_cmd_hdr
*cmd_hdr
;
244 if (skb_push(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
247 cmd
|= FIELD_PREP(WMI_CMD_HDR_CMD_ID
, cmd_id
);
249 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
250 cmd_hdr
->cmd_id
= cmd
;
252 memset(skb_cb
, 0, sizeof(*skb_cb
));
253 ret
= ath11k_htc_send(&ab
->htc
, wmi
->eid
, skb
);
261 skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
265 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi
*wmi
, struct sk_buff
*skb
,
268 struct ath11k_wmi_base
*wmi_sc
= wmi
->wmi_ab
;
269 int ret
= -EOPNOTSUPP
;
273 wait_event_timeout(wmi_sc
->tx_credits_wq
, ({
274 ret
= ath11k_wmi_cmd_send_nowait(wmi
, skb
, cmd_id
);
276 if (ret
&& test_bit(ATH11K_FLAG_CRASH_FLUSH
, &wmi_sc
->ab
->dev_flags
))
280 }), WMI_SEND_TIMEOUT_HZ
);
283 ath11k_warn(wmi_sc
->ab
, "wmi command %d timeout\n", cmd_id
);
288 static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
290 struct ath11k_service_ext_param
*param
)
292 const struct wmi_service_ready_ext_event
*ev
= ptr
;
297 /* Move this to host based bitmap */
298 param
->default_conc_scan_config_bits
= ev
->default_conc_scan_config_bits
;
299 param
->default_fw_config_bits
= ev
->default_fw_config_bits
;
300 param
->he_cap_info
= ev
->he_cap_info
;
301 param
->mpdu_density
= ev
->mpdu_density
;
302 param
->max_bssid_rx_filters
= ev
->max_bssid_rx_filters
;
303 memcpy(¶m
->ppet
, &ev
->ppet
, sizeof(param
->ppet
));
309 ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi
*wmi_handle
,
310 struct wmi_soc_mac_phy_hw_mode_caps
*hw_caps
,
311 struct wmi_hw_mode_capabilities
*wmi_hw_mode_caps
,
312 struct wmi_soc_hal_reg_capabilities
*hal_reg_caps
,
313 struct wmi_mac_phy_capabilities
*wmi_mac_phy_caps
,
314 u8 hw_mode_id
, u8 phy_id
,
315 struct ath11k_pdev
*pdev
)
317 struct wmi_mac_phy_capabilities
*mac_phy_caps
;
318 struct ath11k_band_cap
*cap_band
;
319 struct ath11k_pdev_cap
*pdev_cap
= &pdev
->cap
;
321 u32 hw_idx
, phy_idx
= 0;
323 if (!hw_caps
|| !wmi_hw_mode_caps
|| !hal_reg_caps
)
326 for (hw_idx
= 0; hw_idx
< hw_caps
->num_hw_modes
; hw_idx
++) {
327 if (hw_mode_id
== wmi_hw_mode_caps
[hw_idx
].hw_mode_id
)
330 phy_map
= wmi_hw_mode_caps
[hw_idx
].phy_id_map
;
337 if (hw_idx
== hw_caps
->num_hw_modes
)
341 if (phy_id
>= hal_reg_caps
->num_phy
)
344 mac_phy_caps
= wmi_mac_phy_caps
+ phy_idx
;
346 pdev
->pdev_id
= mac_phy_caps
->pdev_id
;
347 pdev_cap
->supported_bands
|= mac_phy_caps
->supported_bands
;
348 pdev_cap
->ampdu_density
= mac_phy_caps
->ampdu_density
;
350 /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
351 * band to band for a single radio, need to see how this should be
354 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_2G_CAP
) {
355 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_2g
;
356 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_2g
;
357 } else if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_5G_CAP
) {
358 pdev_cap
->vht_cap
= mac_phy_caps
->vht_cap_info_5g
;
359 pdev_cap
->vht_mcs
= mac_phy_caps
->vht_supp_mcs_5g
;
360 pdev_cap
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
361 pdev_cap
->tx_chain_mask
= mac_phy_caps
->tx_chain_mask_5g
;
362 pdev_cap
->rx_chain_mask
= mac_phy_caps
->rx_chain_mask_5g
;
367 /* tx/rx chainmask reported from fw depends on the actual hw chains used,
368 * For example, for 4x4 capable macphys, first 4 chains can be used for first
369 * mac and the remaing 4 chains can be used for the second mac or vice-versa.
370 * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
371 * will be advertised for second mac or vice-versa. Compute the shift value
372 * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
375 pdev_cap
->tx_chain_mask_shift
=
376 find_first_bit((unsigned long *)&pdev_cap
->tx_chain_mask
, 32);
377 pdev_cap
->rx_chain_mask_shift
=
378 find_first_bit((unsigned long *)&pdev_cap
->rx_chain_mask
, 32);
380 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_2G_CAP
) {
381 cap_band
= &pdev_cap
->band
[NL80211_BAND_2GHZ
];
382 cap_band
->phy_id
= mac_phy_caps
->phy_id
;
383 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_2g
;
384 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_2g
;
385 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_2g
;
386 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_2g_ext
;
387 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_2g
;
388 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_2g
,
389 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
390 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet2g
,
391 sizeof(struct ath11k_ppe_threshold
));
394 if (mac_phy_caps
->supported_bands
& WMI_HOST_WLAN_5G_CAP
) {
395 cap_band
= &pdev_cap
->band
[NL80211_BAND_5GHZ
];
396 cap_band
->phy_id
= mac_phy_caps
->phy_id
;
397 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_5g
;
398 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_5g
;
399 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_5g
;
400 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_5g_ext
;
401 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
402 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_5g
,
403 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
404 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet5g
,
405 sizeof(struct ath11k_ppe_threshold
));
408 cap_band
= &pdev_cap
->band
[NL80211_BAND_6GHZ
];
409 cap_band
->max_bw_supported
= mac_phy_caps
->max_bw_supported_5g
;
410 cap_band
->ht_cap_info
= mac_phy_caps
->ht_cap_info_5g
;
411 cap_band
->he_cap_info
[0] = mac_phy_caps
->he_cap_info_5g
;
412 cap_band
->he_cap_info
[1] = mac_phy_caps
->he_cap_info_5g_ext
;
413 cap_band
->he_mcs
= mac_phy_caps
->he_supp_mcs_5g
;
414 memcpy(cap_band
->he_cap_phy_info
, &mac_phy_caps
->he_cap_phy_info_5g
,
415 sizeof(u32
) * PSOC_HOST_MAX_PHY_SIZE
);
416 memcpy(&cap_band
->he_ppet
, &mac_phy_caps
->he_ppet5g
,
417 sizeof(struct ath11k_ppe_threshold
));
423 ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi
*wmi_handle
,
424 struct wmi_soc_hal_reg_capabilities
*reg_caps
,
425 struct wmi_hal_reg_capabilities_ext
*wmi_ext_reg_cap
,
427 struct ath11k_hal_reg_capabilities_ext
*param
)
429 struct wmi_hal_reg_capabilities_ext
*ext_reg_cap
;
431 if (!reg_caps
|| !wmi_ext_reg_cap
)
434 if (phy_idx
>= reg_caps
->num_phy
)
437 ext_reg_cap
= &wmi_ext_reg_cap
[phy_idx
];
439 param
->phy_id
= ext_reg_cap
->phy_id
;
440 param
->eeprom_reg_domain
= ext_reg_cap
->eeprom_reg_domain
;
441 param
->eeprom_reg_domain_ext
=
442 ext_reg_cap
->eeprom_reg_domain_ext
;
443 param
->regcap1
= ext_reg_cap
->regcap1
;
444 param
->regcap2
= ext_reg_cap
->regcap2
;
445 /* check if param->wireless_mode is needed */
446 param
->low_2ghz_chan
= ext_reg_cap
->low_2ghz_chan
;
447 param
->high_2ghz_chan
= ext_reg_cap
->high_2ghz_chan
;
448 param
->low_5ghz_chan
= ext_reg_cap
->low_5ghz_chan
;
449 param
->high_5ghz_chan
= ext_reg_cap
->high_5ghz_chan
;
454 static int ath11k_pull_service_ready_tlv(struct ath11k_base
*ab
,
456 struct ath11k_targ_cap
*cap
)
458 const struct wmi_service_ready_event
*ev
= evt_buf
;
461 ath11k_err(ab
, "%s: failed by NULL param\n",
466 cap
->phy_capability
= ev
->phy_capability
;
467 cap
->max_frag_entry
= ev
->max_frag_entry
;
468 cap
->num_rf_chains
= ev
->num_rf_chains
;
469 cap
->ht_cap_info
= ev
->ht_cap_info
;
470 cap
->vht_cap_info
= ev
->vht_cap_info
;
471 cap
->vht_supp_mcs
= ev
->vht_supp_mcs
;
472 cap
->hw_min_tx_power
= ev
->hw_min_tx_power
;
473 cap
->hw_max_tx_power
= ev
->hw_max_tx_power
;
474 cap
->sys_cap_info
= ev
->sys_cap_info
;
475 cap
->min_pkt_size_enable
= ev
->min_pkt_size_enable
;
476 cap
->max_bcn_ie_size
= ev
->max_bcn_ie_size
;
477 cap
->max_num_scan_channels
= ev
->max_num_scan_channels
;
478 cap
->max_supported_macs
= ev
->max_supported_macs
;
479 cap
->wmi_fw_sub_feat_caps
= ev
->wmi_fw_sub_feat_caps
;
480 cap
->txrx_chainmask
= ev
->txrx_chainmask
;
481 cap
->default_dbs_hw_mode_index
= ev
->default_dbs_hw_mode_index
;
482 cap
->num_msdu_desc
= ev
->num_msdu_desc
;
487 /* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
488 * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
491 static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi
*wmi
,
492 const u32
*wmi_svc_bm
)
496 for (i
= 0, j
= 0; i
< WMI_SERVICE_BM_SIZE
&& j
< WMI_MAX_SERVICE
; i
++) {
498 if (wmi_svc_bm
[i
] & BIT(j
% WMI_SERVICE_BITS_IN_SIZE32
))
499 set_bit(j
, wmi
->wmi_ab
->svc_map
);
500 } while (++j
% WMI_SERVICE_BITS_IN_SIZE32
);
504 static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
505 const void *ptr
, void *data
)
507 struct wmi_tlv_svc_ready_parse
*svc_ready
= data
;
508 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
512 case WMI_TAG_SERVICE_READY_EVENT
:
513 if (ath11k_pull_service_ready_tlv(ab
, ptr
, &ab
->target_caps
))
517 case WMI_TAG_ARRAY_UINT32
:
518 if (!svc_ready
->wmi_svc_bitmap_done
) {
519 expect_len
= WMI_SERVICE_BM_SIZE
* sizeof(u32
);
520 if (len
< expect_len
) {
521 ath11k_warn(ab
, "invalid len %d for the tag 0x%x\n",
526 ath11k_wmi_service_bitmap_copy(wmi_handle
, ptr
);
528 svc_ready
->wmi_svc_bitmap_done
= true;
538 static int ath11k_service_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
540 struct wmi_tlv_svc_ready_parse svc_ready
= { };
543 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
544 ath11k_wmi_tlv_svc_rdy_parse
,
547 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
554 struct sk_buff
*ath11k_wmi_alloc_skb(struct ath11k_wmi_base
*wmi_sc
, u32 len
)
557 struct ath11k_base
*ab
= wmi_sc
->ab
;
558 u32 round_len
= roundup(len
, 4);
560 skb
= ath11k_htc_alloc_skb(ab
, WMI_SKB_HEADROOM
+ round_len
);
564 skb_reserve(skb
, WMI_SKB_HEADROOM
);
565 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
566 ath11k_warn(ab
, "unaligned WMI skb data\n");
568 skb_put(skb
, round_len
);
569 memset(skb
->data
, 0, round_len
);
574 int ath11k_wmi_mgmt_send(struct ath11k
*ar
, u32 vdev_id
, u32 buf_id
,
575 struct sk_buff
*frame
)
577 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
578 struct wmi_mgmt_send_cmd
*cmd
;
579 struct wmi_tlv
*frame_tlv
;
584 buf_len
= frame
->len
< WMI_MGMT_SEND_DOWNLD_LEN
?
585 frame
->len
: WMI_MGMT_SEND_DOWNLD_LEN
;
587 len
= sizeof(*cmd
) + sizeof(*frame_tlv
) + roundup(buf_len
, 4);
589 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
593 cmd
= (struct wmi_mgmt_send_cmd
*)skb
->data
;
594 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_MGMT_TX_SEND_CMD
) |
595 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
596 cmd
->vdev_id
= vdev_id
;
597 cmd
->desc_id
= buf_id
;
599 cmd
->paddr_lo
= lower_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
600 cmd
->paddr_hi
= upper_32_bits(ATH11K_SKB_CB(frame
)->paddr
);
601 cmd
->frame_len
= frame
->len
;
602 cmd
->buf_len
= buf_len
;
603 cmd
->tx_params_valid
= 0;
605 frame_tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
606 frame_tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
607 FIELD_PREP(WMI_TLV_LEN
, buf_len
);
609 memcpy(frame_tlv
->value
, frame
->data
, buf_len
);
611 ath11k_ce_byte_swap(frame_tlv
->value
, buf_len
);
613 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_MGMT_TX_SEND_CMDID
);
616 "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
623 int ath11k_wmi_vdev_create(struct ath11k
*ar
, u8
*macaddr
,
624 struct vdev_create_params
*param
)
626 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
627 struct wmi_vdev_create_cmd
*cmd
;
629 struct wmi_vdev_txrx_streams
*txrx_streams
;
634 /* It can be optimized my sending tx/rx chain configuration
635 * only for supported bands instead of always sending it for
638 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
639 (WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
));
641 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
645 cmd
= (struct wmi_vdev_create_cmd
*)skb
->data
;
646 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_CREATE_CMD
) |
647 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
649 cmd
->vdev_id
= param
->if_id
;
650 cmd
->vdev_type
= param
->type
;
651 cmd
->vdev_subtype
= param
->subtype
;
652 cmd
->num_cfg_txrx_streams
= WMI_NUM_SUPPORTED_BAND_MAX
;
653 cmd
->pdev_id
= param
->pdev_id
;
654 ether_addr_copy(cmd
->vdev_macaddr
.addr
, macaddr
);
656 ptr
= skb
->data
+ sizeof(*cmd
);
657 len
= WMI_NUM_SUPPORTED_BAND_MAX
* sizeof(*txrx_streams
);
660 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
661 FIELD_PREP(WMI_TLV_LEN
, len
);
665 len
= sizeof(*txrx_streams
);
666 txrx_streams
->tlv_header
=
667 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
668 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
669 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_2G
;
670 txrx_streams
->supported_tx_streams
=
671 param
->chains
[NL80211_BAND_2GHZ
].tx
;
672 txrx_streams
->supported_rx_streams
=
673 param
->chains
[NL80211_BAND_2GHZ
].rx
;
676 txrx_streams
->tlv_header
=
677 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_TXRX_STREAMS
) |
678 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
679 txrx_streams
->band
= WMI_TPC_CHAINMASK_CONFIG_BAND_5G
;
680 txrx_streams
->supported_tx_streams
=
681 param
->chains
[NL80211_BAND_5GHZ
].tx
;
682 txrx_streams
->supported_rx_streams
=
683 param
->chains
[NL80211_BAND_5GHZ
].rx
;
685 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_CREATE_CMDID
);
688 "failed to submit WMI_VDEV_CREATE_CMDID\n");
692 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
693 "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
694 param
->if_id
, param
->type
, param
->subtype
,
695 macaddr
, param
->pdev_id
);
700 int ath11k_wmi_vdev_delete(struct ath11k
*ar
, u8 vdev_id
)
702 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
703 struct wmi_vdev_delete_cmd
*cmd
;
707 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
711 cmd
= (struct wmi_vdev_delete_cmd
*)skb
->data
;
712 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DELETE_CMD
) |
713 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
714 cmd
->vdev_id
= vdev_id
;
716 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DELETE_CMDID
);
718 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DELETE_CMDID\n");
722 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev delete id %d\n", vdev_id
);
727 int ath11k_wmi_vdev_stop(struct ath11k
*ar
, u8 vdev_id
)
729 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
730 struct wmi_vdev_stop_cmd
*cmd
;
734 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
738 cmd
= (struct wmi_vdev_stop_cmd
*)skb
->data
;
740 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_STOP_CMD
) |
741 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
742 cmd
->vdev_id
= vdev_id
;
744 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_STOP_CMDID
);
746 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_STOP cmd\n");
750 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev stop id 0x%x\n", vdev_id
);
755 int ath11k_wmi_vdev_down(struct ath11k
*ar
, u8 vdev_id
)
757 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
758 struct wmi_vdev_down_cmd
*cmd
;
762 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
766 cmd
= (struct wmi_vdev_down_cmd
*)skb
->data
;
768 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_DOWN_CMD
) |
769 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
770 cmd
->vdev_id
= vdev_id
;
772 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_DOWN_CMDID
);
774 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_DOWN cmd\n");
778 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "WMI vdev down id 0x%x\n", vdev_id
);
783 static void ath11k_wmi_put_wmi_channel(struct wmi_channel
*chan
,
784 struct wmi_vdev_start_req_arg
*arg
)
786 memset(chan
, 0, sizeof(*chan
));
788 chan
->mhz
= arg
->channel
.freq
;
789 chan
->band_center_freq1
= arg
->channel
.band_center_freq1
;
790 if (arg
->channel
.mode
== MODE_11AC_VHT80_80
)
791 chan
->band_center_freq2
= arg
->channel
.band_center_freq2
;
793 chan
->band_center_freq2
= 0;
795 chan
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
, arg
->channel
.mode
);
796 if (arg
->channel
.passive
)
797 chan
->info
|= WMI_CHAN_INFO_PASSIVE
;
798 if (arg
->channel
.allow_ibss
)
799 chan
->info
|= WMI_CHAN_INFO_ADHOC_ALLOWED
;
800 if (arg
->channel
.allow_ht
)
801 chan
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
802 if (arg
->channel
.allow_vht
)
803 chan
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
804 if (arg
->channel
.allow_he
)
805 chan
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
806 if (arg
->channel
.ht40plus
)
807 chan
->info
|= WMI_CHAN_INFO_HT40_PLUS
;
808 if (arg
->channel
.chan_radar
)
809 chan
->info
|= WMI_CHAN_INFO_DFS
;
810 if (arg
->channel
.freq2_radar
)
811 chan
->info
|= WMI_CHAN_INFO_DFS_FREQ2
;
813 chan
->reg_info_1
= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
814 arg
->channel
.max_power
) |
815 FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
816 arg
->channel
.max_reg_power
);
818 chan
->reg_info_2
= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
819 arg
->channel
.max_antenna_gain
) |
820 FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR
,
821 arg
->channel
.max_power
);
824 int ath11k_wmi_vdev_start(struct ath11k
*ar
, struct wmi_vdev_start_req_arg
*arg
,
827 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
828 struct wmi_vdev_start_request_cmd
*cmd
;
830 struct wmi_channel
*chan
;
835 if (WARN_ON(arg
->ssid_len
> sizeof(cmd
->ssid
.ssid
)))
838 len
= sizeof(*cmd
) + sizeof(*chan
) + TLV_HDR_SIZE
;
840 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
844 cmd
= (struct wmi_vdev_start_request_cmd
*)skb
->data
;
845 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
846 WMI_TAG_VDEV_START_REQUEST_CMD
) |
847 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
848 cmd
->vdev_id
= arg
->vdev_id
;
849 cmd
->beacon_interval
= arg
->bcn_intval
;
850 cmd
->bcn_tx_rate
= arg
->bcn_tx_rate
;
851 cmd
->dtim_period
= arg
->dtim_period
;
852 cmd
->num_noa_descriptors
= arg
->num_noa_descriptors
;
853 cmd
->preferred_rx_streams
= arg
->pref_rx_streams
;
854 cmd
->preferred_tx_streams
= arg
->pref_tx_streams
;
855 cmd
->cac_duration_ms
= arg
->cac_duration_ms
;
856 cmd
->regdomain
= arg
->regdomain
;
857 cmd
->he_ops
= arg
->he_ops
;
861 cmd
->ssid
.ssid_len
= arg
->ssid_len
;
862 memcpy(cmd
->ssid
.ssid
, arg
->ssid
, arg
->ssid_len
);
864 if (arg
->hidden_ssid
)
865 cmd
->flags
|= WMI_VDEV_START_HIDDEN_SSID
;
866 if (arg
->pmf_enabled
)
867 cmd
->flags
|= WMI_VDEV_START_PMF_ENABLED
;
870 cmd
->flags
|= WMI_VDEV_START_LDPC_RX_ENABLED
;
872 ptr
= skb
->data
+ sizeof(*cmd
);
875 ath11k_wmi_put_wmi_channel(chan
, arg
);
877 chan
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_CHANNEL
) |
878 FIELD_PREP(WMI_TLV_LEN
,
879 sizeof(*chan
) - TLV_HDR_SIZE
);
880 ptr
+= sizeof(*chan
);
883 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
884 FIELD_PREP(WMI_TLV_LEN
, 0);
886 /* Note: This is a nested TLV containing:
887 * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
893 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
894 WMI_VDEV_RESTART_REQUEST_CMDID
);
896 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
897 WMI_VDEV_START_REQUEST_CMDID
);
899 ath11k_warn(ar
->ab
, "failed to submit vdev_%s cmd\n",
900 restart
? "restart" : "start");
904 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
905 restart
? "restart" : "start", arg
->vdev_id
,
906 arg
->channel
.freq
, arg
->channel
.mode
);
911 int ath11k_wmi_vdev_up(struct ath11k
*ar
, u32 vdev_id
, u32 aid
, const u8
*bssid
)
913 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
914 struct wmi_vdev_up_cmd
*cmd
;
918 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
922 cmd
= (struct wmi_vdev_up_cmd
*)skb
->data
;
924 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_UP_CMD
) |
925 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
926 cmd
->vdev_id
= vdev_id
;
927 cmd
->vdev_assoc_id
= aid
;
929 ether_addr_copy(cmd
->vdev_bssid
.addr
, bssid
);
931 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_UP_CMDID
);
933 ath11k_warn(ar
->ab
, "failed to submit WMI_VDEV_UP cmd\n");
937 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
938 "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
939 vdev_id
, aid
, bssid
);
944 int ath11k_wmi_send_peer_create_cmd(struct ath11k
*ar
,
945 struct peer_create_params
*param
)
947 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
948 struct wmi_peer_create_cmd
*cmd
;
952 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
956 cmd
= (struct wmi_peer_create_cmd
*)skb
->data
;
957 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_CREATE_CMD
) |
958 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
960 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_addr
);
961 cmd
->peer_type
= param
->peer_type
;
962 cmd
->vdev_id
= param
->vdev_id
;
964 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_CREATE_CMDID
);
966 ath11k_warn(ar
->ab
, "failed to submit WMI_PEER_CREATE cmd\n");
970 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
971 "WMI peer create vdev_id %d peer_addr %pM\n",
972 param
->vdev_id
, param
->peer_addr
);
977 int ath11k_wmi_send_peer_delete_cmd(struct ath11k
*ar
,
978 const u8
*peer_addr
, u8 vdev_id
)
980 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
981 struct wmi_peer_delete_cmd
*cmd
;
985 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
989 cmd
= (struct wmi_peer_delete_cmd
*)skb
->data
;
990 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_DELETE_CMD
) |
991 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
993 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
994 cmd
->vdev_id
= vdev_id
;
996 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
997 "WMI peer delete vdev_id %d peer_addr %pM\n",
1000 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_DELETE_CMDID
);
1002 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_DELETE cmd\n");
1009 int ath11k_wmi_send_pdev_set_regdomain(struct ath11k
*ar
,
1010 struct pdev_set_regdomain_params
*param
)
1012 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1013 struct wmi_pdev_set_regdomain_cmd
*cmd
;
1014 struct sk_buff
*skb
;
1017 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1021 cmd
= (struct wmi_pdev_set_regdomain_cmd
*)skb
->data
;
1022 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1023 WMI_TAG_PDEV_SET_REGDOMAIN_CMD
) |
1024 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1026 cmd
->reg_domain
= param
->current_rd_in_use
;
1027 cmd
->reg_domain_2g
= param
->current_rd_2g
;
1028 cmd
->reg_domain_5g
= param
->current_rd_5g
;
1029 cmd
->conformance_test_limit_2g
= param
->ctl_2g
;
1030 cmd
->conformance_test_limit_5g
= param
->ctl_5g
;
1031 cmd
->dfs_domain
= param
->dfs_domain
;
1032 cmd
->pdev_id
= param
->pdev_id
;
1034 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1035 "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
1036 param
->current_rd_in_use
, param
->current_rd_2g
,
1037 param
->current_rd_5g
, param
->dfs_domain
, param
->pdev_id
);
1039 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_REGDOMAIN_CMDID
);
1042 "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
1049 int ath11k_wmi_set_peer_param(struct ath11k
*ar
, const u8
*peer_addr
,
1050 u32 vdev_id
, u32 param_id
, u32 param_val
)
1052 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1053 struct wmi_peer_set_param_cmd
*cmd
;
1054 struct sk_buff
*skb
;
1057 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1061 cmd
= (struct wmi_peer_set_param_cmd
*)skb
->data
;
1062 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_SET_PARAM_CMD
) |
1063 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1064 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1065 cmd
->vdev_id
= vdev_id
;
1066 cmd
->param_id
= param_id
;
1067 cmd
->param_value
= param_val
;
1069 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_SET_PARAM_CMDID
);
1071 ath11k_warn(ar
->ab
, "failed to send WMI_PEER_SET_PARAM cmd\n");
1075 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1076 "WMI vdev %d peer 0x%pM set param %d value %d\n",
1077 vdev_id
, peer_addr
, param_id
, param_val
);
1082 int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k
*ar
,
1083 u8 peer_addr
[ETH_ALEN
],
1084 struct peer_flush_params
*param
)
1086 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1087 struct wmi_peer_flush_tids_cmd
*cmd
;
1088 struct sk_buff
*skb
;
1091 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1095 cmd
= (struct wmi_peer_flush_tids_cmd
*)skb
->data
;
1096 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PEER_FLUSH_TIDS_CMD
) |
1097 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1099 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1100 cmd
->peer_tid_bitmap
= param
->peer_tid_bitmap
;
1101 cmd
->vdev_id
= param
->vdev_id
;
1103 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_FLUSH_TIDS_CMDID
);
1106 "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
1110 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1111 "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
1112 param
->vdev_id
, peer_addr
, param
->peer_tid_bitmap
);
1117 int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k
*ar
,
1118 int vdev_id
, const u8
*addr
,
1119 dma_addr_t paddr
, u8 tid
,
1120 u8 ba_window_size_valid
,
1123 struct wmi_peer_reorder_queue_setup_cmd
*cmd
;
1124 struct sk_buff
*skb
;
1127 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
1131 cmd
= (struct wmi_peer_reorder_queue_setup_cmd
*)skb
->data
;
1132 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1133 WMI_TAG_REORDER_QUEUE_SETUP_CMD
) |
1134 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1136 ether_addr_copy(cmd
->peer_macaddr
.addr
, addr
);
1137 cmd
->vdev_id
= vdev_id
;
1139 cmd
->queue_ptr_lo
= lower_32_bits(paddr
);
1140 cmd
->queue_ptr_hi
= upper_32_bits(paddr
);
1141 cmd
->queue_no
= tid
;
1142 cmd
->ba_window_size_valid
= ba_window_size_valid
;
1143 cmd
->ba_window_size
= ba_window_size
;
1145 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
1146 WMI_PEER_REORDER_QUEUE_SETUP_CMDID
);
1149 "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
1153 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1154 "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
1155 addr
, vdev_id
, tid
);
1161 ath11k_wmi_rx_reord_queue_remove(struct ath11k
*ar
,
1162 struct rx_reorder_queue_remove_params
*param
)
1164 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1165 struct wmi_peer_reorder_queue_remove_cmd
*cmd
;
1166 struct sk_buff
*skb
;
1169 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1173 cmd
= (struct wmi_peer_reorder_queue_remove_cmd
*)skb
->data
;
1174 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1175 WMI_TAG_REORDER_QUEUE_REMOVE_CMD
) |
1176 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1178 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_macaddr
);
1179 cmd
->vdev_id
= param
->vdev_id
;
1180 cmd
->tid_mask
= param
->peer_tid_bitmap
;
1182 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1183 "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__
,
1184 param
->peer_macaddr
, param
->vdev_id
, param
->peer_tid_bitmap
);
1186 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1187 WMI_PEER_REORDER_QUEUE_REMOVE_CMDID
);
1190 "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
1197 int ath11k_wmi_pdev_set_param(struct ath11k
*ar
, u32 param_id
,
1198 u32 param_value
, u8 pdev_id
)
1200 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1201 struct wmi_pdev_set_param_cmd
*cmd
;
1202 struct sk_buff
*skb
;
1205 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1209 cmd
= (struct wmi_pdev_set_param_cmd
*)skb
->data
;
1210 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SET_PARAM_CMD
) |
1211 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1212 cmd
->pdev_id
= pdev_id
;
1213 cmd
->param_id
= param_id
;
1214 cmd
->param_value
= param_value
;
1216 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SET_PARAM_CMDID
);
1218 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1222 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1223 "WMI pdev set param %d pdev id %d value %d\n",
1224 param_id
, pdev_id
, param_value
);
1229 int ath11k_wmi_pdev_set_ps_mode(struct ath11k
*ar
, int vdev_id
, u32 enable
)
1231 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1232 struct wmi_pdev_set_ps_mode_cmd
*cmd
;
1233 struct sk_buff
*skb
;
1236 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1240 cmd
= (struct wmi_pdev_set_ps_mode_cmd
*)skb
->data
;
1241 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STA_POWERSAVE_MODE_CMD
) |
1242 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1243 cmd
->vdev_id
= vdev_id
;
1244 cmd
->sta_ps_mode
= enable
;
1246 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_MODE_CMDID
);
1248 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SET_PARAM cmd\n");
1252 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1253 "WMI vdev set psmode %d vdev id %d\n",
1259 int ath11k_wmi_pdev_suspend(struct ath11k
*ar
, u32 suspend_opt
,
1262 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1263 struct wmi_pdev_suspend_cmd
*cmd
;
1264 struct sk_buff
*skb
;
1267 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1271 cmd
= (struct wmi_pdev_suspend_cmd
*)skb
->data
;
1273 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SUSPEND_CMD
) |
1274 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1276 cmd
->suspend_opt
= suspend_opt
;
1277 cmd
->pdev_id
= pdev_id
;
1279 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_SUSPEND_CMDID
);
1281 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_SUSPEND cmd\n");
1285 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1286 "WMI pdev suspend pdev_id %d\n", pdev_id
);
1291 int ath11k_wmi_pdev_resume(struct ath11k
*ar
, u32 pdev_id
)
1293 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1294 struct wmi_pdev_resume_cmd
*cmd
;
1295 struct sk_buff
*skb
;
1298 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1302 cmd
= (struct wmi_pdev_resume_cmd
*)skb
->data
;
1304 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_RESUME_CMD
) |
1305 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1306 cmd
->pdev_id
= pdev_id
;
1308 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1309 "WMI pdev resume pdev id %d\n", pdev_id
);
1311 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_RESUME_CMDID
);
1313 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_RESUME cmd\n");
1320 /* TODO FW Support for the cmd is not available yet.
1321 * Can be tested once the command and corresponding
1322 * event is implemented in FW
1324 int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k
*ar
,
1325 enum wmi_bss_chan_info_req_type type
)
1327 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1328 struct wmi_pdev_bss_chan_info_req_cmd
*cmd
;
1329 struct sk_buff
*skb
;
1332 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1336 cmd
= (struct wmi_pdev_bss_chan_info_req_cmd
*)skb
->data
;
1338 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1339 WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST
) |
1340 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1341 cmd
->req_type
= type
;
1343 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1344 "WMI bss chan info req type %d\n", type
);
1346 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
1347 WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID
);
1350 "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
1357 int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k
*ar
, u8
*peer_addr
,
1358 struct ap_ps_params
*param
)
1360 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1361 struct wmi_ap_ps_peer_cmd
*cmd
;
1362 struct sk_buff
*skb
;
1365 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1369 cmd
= (struct wmi_ap_ps_peer_cmd
*)skb
->data
;
1370 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_AP_PS_PEER_CMD
) |
1371 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1373 cmd
->vdev_id
= param
->vdev_id
;
1374 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
1375 cmd
->param
= param
->param
;
1376 cmd
->value
= param
->value
;
1378 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_AP_PS_PEER_PARAM_CMDID
);
1381 "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
1385 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1386 "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
1387 param
->vdev_id
, peer_addr
, param
->param
, param
->value
);
1392 int ath11k_wmi_set_sta_ps_param(struct ath11k
*ar
, u32 vdev_id
,
1393 u32 param
, u32 param_value
)
1395 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1396 struct wmi_sta_powersave_param_cmd
*cmd
;
1397 struct sk_buff
*skb
;
1400 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1404 cmd
= (struct wmi_sta_powersave_param_cmd
*)skb
->data
;
1405 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1406 WMI_TAG_STA_POWERSAVE_PARAM_CMD
) |
1407 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1409 cmd
->vdev_id
= vdev_id
;
1411 cmd
->value
= param_value
;
1413 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1414 "WMI set sta ps vdev_id %d param %d value %d\n",
1415 vdev_id
, param
, param_value
);
1417 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_STA_POWERSAVE_PARAM_CMDID
);
1419 ath11k_warn(ar
->ab
, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
1426 int ath11k_wmi_force_fw_hang_cmd(struct ath11k
*ar
, u32 type
, u32 delay_time_ms
)
1428 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1429 struct wmi_force_fw_hang_cmd
*cmd
;
1430 struct sk_buff
*skb
;
1435 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1439 cmd
= (struct wmi_force_fw_hang_cmd
*)skb
->data
;
1440 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_FORCE_FW_HANG_CMD
) |
1441 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1444 cmd
->delay_time_ms
= delay_time_ms
;
1446 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_FORCE_FW_HANG_CMDID
);
1449 ath11k_warn(ar
->ab
, "Failed to send WMI_FORCE_FW_HANG_CMDID");
1455 int ath11k_wmi_vdev_set_param_cmd(struct ath11k
*ar
, u32 vdev_id
,
1456 u32 param_id
, u32 param_value
)
1458 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1459 struct wmi_vdev_set_param_cmd
*cmd
;
1460 struct sk_buff
*skb
;
1463 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1467 cmd
= (struct wmi_vdev_set_param_cmd
*)skb
->data
;
1468 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_SET_PARAM_CMD
) |
1469 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1471 cmd
->vdev_id
= vdev_id
;
1472 cmd
->param_id
= param_id
;
1473 cmd
->param_value
= param_value
;
1475 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_SET_PARAM_CMDID
);
1478 "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
1482 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1483 "WMI vdev id 0x%x set param %d value %d\n",
1484 vdev_id
, param_id
, param_value
);
1489 int ath11k_wmi_send_stats_request_cmd(struct ath11k
*ar
,
1490 struct stats_request_params
*param
)
1492 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1493 struct wmi_request_stats_cmd
*cmd
;
1494 struct sk_buff
*skb
;
1497 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1501 cmd
= (struct wmi_request_stats_cmd
*)skb
->data
;
1502 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_REQUEST_STATS_CMD
) |
1503 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1505 cmd
->stats_id
= param
->stats_id
;
1506 cmd
->vdev_id
= param
->vdev_id
;
1507 cmd
->pdev_id
= param
->pdev_id
;
1509 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_REQUEST_STATS_CMDID
);
1511 ath11k_warn(ar
->ab
, "failed to send WMI_REQUEST_STATS cmd\n");
1515 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1516 "WMI request stats 0x%x vdev id %d pdev id %d\n",
1517 param
->stats_id
, param
->vdev_id
, param
->pdev_id
);
1522 int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k
*ar
)
1524 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1525 struct wmi_get_pdev_temperature_cmd
*cmd
;
1526 struct sk_buff
*skb
;
1529 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1533 cmd
= (struct wmi_get_pdev_temperature_cmd
*)skb
->data
;
1534 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_GET_TEMPERATURE_CMD
) |
1535 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1536 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
1538 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PDEV_GET_TEMPERATURE_CMDID
);
1540 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
1544 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1545 "WMI pdev get temperature for pdev_id %d\n", ar
->pdev
->pdev_id
);
1550 int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k
*ar
,
1551 u32 vdev_id
, u32 bcn_ctrl_op
)
1553 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1554 struct wmi_bcn_offload_ctrl_cmd
*cmd
;
1555 struct sk_buff
*skb
;
1558 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
1562 cmd
= (struct wmi_bcn_offload_ctrl_cmd
*)skb
->data
;
1563 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1564 WMI_TAG_BCN_OFFLOAD_CTRL_CMD
) |
1565 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1567 cmd
->vdev_id
= vdev_id
;
1568 cmd
->bcn_ctrl_op
= bcn_ctrl_op
;
1570 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1571 "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
1572 vdev_id
, bcn_ctrl_op
);
1574 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_OFFLOAD_CTRL_CMDID
);
1577 "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
1584 int ath11k_wmi_bcn_tmpl(struct ath11k
*ar
, u32 vdev_id
,
1585 struct ieee80211_mutable_offsets
*offs
,
1586 struct sk_buff
*bcn
)
1588 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1589 struct wmi_bcn_tmpl_cmd
*cmd
;
1590 struct wmi_bcn_prb_info
*bcn_prb_info
;
1591 struct wmi_tlv
*tlv
;
1592 struct sk_buff
*skb
;
1595 size_t aligned_len
= roundup(bcn
->len
, 4);
1597 len
= sizeof(*cmd
) + sizeof(*bcn_prb_info
) + TLV_HDR_SIZE
+ aligned_len
;
1599 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1603 cmd
= (struct wmi_bcn_tmpl_cmd
*)skb
->data
;
1604 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BCN_TMPL_CMD
) |
1605 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1606 cmd
->vdev_id
= vdev_id
;
1607 cmd
->tim_ie_offset
= offs
->tim_offset
;
1608 cmd
->csa_switch_count_offset
= offs
->cntdwn_counter_offs
[0];
1609 cmd
->ext_csa_switch_count_offset
= offs
->cntdwn_counter_offs
[1];
1610 cmd
->buf_len
= bcn
->len
;
1612 ptr
= skb
->data
+ sizeof(*cmd
);
1615 len
= sizeof(*bcn_prb_info
);
1616 bcn_prb_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1617 WMI_TAG_BCN_PRB_INFO
) |
1618 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
1619 bcn_prb_info
->caps
= 0;
1620 bcn_prb_info
->erp
= 0;
1622 ptr
+= sizeof(*bcn_prb_info
);
1625 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1626 FIELD_PREP(WMI_TLV_LEN
, aligned_len
);
1627 memcpy(tlv
->value
, bcn
->data
, bcn
->len
);
1629 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_BCN_TMPL_CMDID
);
1631 ath11k_warn(ar
->ab
, "failed to send WMI_BCN_TMPL_CMDID\n");
1638 int ath11k_wmi_vdev_install_key(struct ath11k
*ar
,
1639 struct wmi_vdev_install_key_arg
*arg
)
1641 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1642 struct wmi_vdev_install_key_cmd
*cmd
;
1643 struct wmi_tlv
*tlv
;
1644 struct sk_buff
*skb
;
1646 int key_len_aligned
= roundup(arg
->key_len
, sizeof(uint32_t));
1648 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ key_len_aligned
;
1650 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1654 cmd
= (struct wmi_vdev_install_key_cmd
*)skb
->data
;
1655 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VDEV_INSTALL_KEY_CMD
) |
1656 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1657 cmd
->vdev_id
= arg
->vdev_id
;
1658 ether_addr_copy(cmd
->peer_macaddr
.addr
, arg
->macaddr
);
1659 cmd
->key_idx
= arg
->key_idx
;
1660 cmd
->key_flags
= arg
->key_flags
;
1661 cmd
->key_cipher
= arg
->key_cipher
;
1662 cmd
->key_len
= arg
->key_len
;
1663 cmd
->key_txmic_len
= arg
->key_txmic_len
;
1664 cmd
->key_rxmic_len
= arg
->key_rxmic_len
;
1666 if (arg
->key_rsc_counter
)
1667 memcpy(&cmd
->key_rsc_counter
, &arg
->key_rsc_counter
,
1668 sizeof(struct wmi_key_seq_counter
));
1670 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
1671 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1672 FIELD_PREP(WMI_TLV_LEN
, key_len_aligned
);
1673 memcpy(tlv
->value
, (u8
*)arg
->key_data
, key_len_aligned
);
1675 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_VDEV_INSTALL_KEY_CMDID
);
1678 "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
1682 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1683 "WMI vdev install key idx %d cipher %d len %d\n",
1684 arg
->key_idx
, arg
->key_cipher
, arg
->key_len
);
1690 ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd
*cmd
,
1691 struct peer_assoc_params
*param
,
1692 bool hw_crypto_disabled
)
1694 cmd
->peer_flags
= 0;
1696 if (param
->is_wme_set
) {
1697 if (param
->qos_flag
)
1698 cmd
->peer_flags
|= WMI_PEER_QOS
;
1699 if (param
->apsd_flag
)
1700 cmd
->peer_flags
|= WMI_PEER_APSD
;
1702 cmd
->peer_flags
|= WMI_PEER_HT
;
1704 cmd
->peer_flags
|= WMI_PEER_40MHZ
;
1706 cmd
->peer_flags
|= WMI_PEER_80MHZ
;
1708 cmd
->peer_flags
|= WMI_PEER_160MHZ
;
1710 /* Typically if STBC is enabled for VHT it should be enabled
1713 if (param
->stbc_flag
)
1714 cmd
->peer_flags
|= WMI_PEER_STBC
;
1716 /* Typically if LDPC is enabled for VHT it should be enabled
1719 if (param
->ldpc_flag
)
1720 cmd
->peer_flags
|= WMI_PEER_LDPC
;
1722 if (param
->static_mimops_flag
)
1723 cmd
->peer_flags
|= WMI_PEER_STATIC_MIMOPS
;
1724 if (param
->dynamic_mimops_flag
)
1725 cmd
->peer_flags
|= WMI_PEER_DYN_MIMOPS
;
1726 if (param
->spatial_mux_flag
)
1727 cmd
->peer_flags
|= WMI_PEER_SPATIAL_MUX
;
1728 if (param
->vht_flag
)
1729 cmd
->peer_flags
|= WMI_PEER_VHT
;
1731 cmd
->peer_flags
|= WMI_PEER_HE
;
1732 if (param
->twt_requester
)
1733 cmd
->peer_flags
|= WMI_PEER_TWT_REQ
;
1734 if (param
->twt_responder
)
1735 cmd
->peer_flags
|= WMI_PEER_TWT_RESP
;
1738 /* Suppress authorization for all AUTH modes that need 4-way handshake
1739 * (during re-association).
1740 * Authorization will be done for these modes on key installation.
1742 if (param
->auth_flag
)
1743 cmd
->peer_flags
|= WMI_PEER_AUTH
;
1744 if (param
->need_ptk_4_way
) {
1745 cmd
->peer_flags
|= WMI_PEER_NEED_PTK_4_WAY
;
1746 if (!hw_crypto_disabled
)
1747 cmd
->peer_flags
&= ~WMI_PEER_AUTH
;
1749 if (param
->need_gtk_2_way
)
1750 cmd
->peer_flags
|= WMI_PEER_NEED_GTK_2_WAY
;
1751 /* safe mode bypass the 4-way handshake */
1752 if (param
->safe_mode_enabled
)
1753 cmd
->peer_flags
&= ~(WMI_PEER_NEED_PTK_4_WAY
|
1754 WMI_PEER_NEED_GTK_2_WAY
);
1756 if (param
->is_pmf_enabled
)
1757 cmd
->peer_flags
|= WMI_PEER_PMF
;
1759 /* Disable AMSDU for station transmit, if user configures it */
1760 /* Disable AMSDU for AP transmit to 11n Stations, if user configures
1762 * if (param->amsdu_disable) Add after FW support
1765 /* Target asserts if node is marked HT and all MCS is set to 0.
1766 * Mark the node as non-HT if all the mcs rates are disabled through
1769 if (param
->peer_ht_rates
.num_rates
== 0)
1770 cmd
->peer_flags
&= ~WMI_PEER_HT
;
1773 int ath11k_wmi_send_peer_assoc_cmd(struct ath11k
*ar
,
1774 struct peer_assoc_params
*param
)
1776 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
1777 struct wmi_peer_assoc_complete_cmd
*cmd
;
1778 struct wmi_vht_rate_set
*mcs
;
1779 struct wmi_he_rate_set
*he_mcs
;
1780 struct sk_buff
*skb
;
1781 struct wmi_tlv
*tlv
;
1783 u32 peer_legacy_rates_align
;
1784 u32 peer_ht_rates_align
;
1787 peer_legacy_rates_align
= roundup(param
->peer_legacy_rates
.num_rates
,
1789 peer_ht_rates_align
= roundup(param
->peer_ht_rates
.num_rates
,
1792 len
= sizeof(*cmd
) +
1793 TLV_HDR_SIZE
+ (peer_legacy_rates_align
* sizeof(u8
)) +
1794 TLV_HDR_SIZE
+ (peer_ht_rates_align
* sizeof(u8
)) +
1795 sizeof(*mcs
) + TLV_HDR_SIZE
+
1796 (sizeof(*he_mcs
) * param
->peer_he_mcs_count
);
1798 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
1805 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1806 WMI_TAG_PEER_ASSOC_COMPLETE_CMD
) |
1807 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
1809 cmd
->vdev_id
= param
->vdev_id
;
1811 cmd
->peer_new_assoc
= param
->peer_new_assoc
;
1812 cmd
->peer_associd
= param
->peer_associd
;
1814 ath11k_wmi_copy_peer_flags(cmd
, param
,
1815 test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED
,
1816 &ar
->ab
->dev_flags
));
1818 ether_addr_copy(cmd
->peer_macaddr
.addr
, param
->peer_mac
);
1820 cmd
->peer_rate_caps
= param
->peer_rate_caps
;
1821 cmd
->peer_caps
= param
->peer_caps
;
1822 cmd
->peer_listen_intval
= param
->peer_listen_intval
;
1823 cmd
->peer_ht_caps
= param
->peer_ht_caps
;
1824 cmd
->peer_max_mpdu
= param
->peer_max_mpdu
;
1825 cmd
->peer_mpdu_density
= param
->peer_mpdu_density
;
1826 cmd
->peer_vht_caps
= param
->peer_vht_caps
;
1827 cmd
->peer_phymode
= param
->peer_phymode
;
1829 /* Update 11ax capabilities */
1830 cmd
->peer_he_cap_info
= param
->peer_he_cap_macinfo
[0];
1831 cmd
->peer_he_cap_info_ext
= param
->peer_he_cap_macinfo
[1];
1832 cmd
->peer_he_cap_info_internal
= param
->peer_he_cap_macinfo_internal
;
1833 cmd
->peer_he_caps_6ghz
= param
->peer_he_caps_6ghz
;
1834 cmd
->peer_he_ops
= param
->peer_he_ops
;
1835 memcpy(&cmd
->peer_he_cap_phy
, ¶m
->peer_he_cap_phyinfo
,
1836 sizeof(param
->peer_he_cap_phyinfo
));
1837 memcpy(&cmd
->peer_ppet
, ¶m
->peer_ppet
,
1838 sizeof(param
->peer_ppet
));
1840 /* Update peer legacy rate information */
1841 ptr
+= sizeof(*cmd
);
1844 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1845 FIELD_PREP(WMI_TLV_LEN
, peer_legacy_rates_align
);
1847 ptr
+= TLV_HDR_SIZE
;
1849 cmd
->num_peer_legacy_rates
= param
->peer_legacy_rates
.num_rates
;
1850 memcpy(ptr
, param
->peer_legacy_rates
.rates
,
1851 param
->peer_legacy_rates
.num_rates
);
1853 /* Update peer HT rate information */
1854 ptr
+= peer_legacy_rates_align
;
1857 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
1858 FIELD_PREP(WMI_TLV_LEN
, peer_ht_rates_align
);
1859 ptr
+= TLV_HDR_SIZE
;
1860 cmd
->num_peer_ht_rates
= param
->peer_ht_rates
.num_rates
;
1861 memcpy(ptr
, param
->peer_ht_rates
.rates
,
1862 param
->peer_ht_rates
.num_rates
);
1865 ptr
+= peer_ht_rates_align
;
1869 mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_VHT_RATE_SET
) |
1870 FIELD_PREP(WMI_TLV_LEN
, sizeof(*mcs
) - TLV_HDR_SIZE
);
1872 cmd
->peer_nss
= param
->peer_nss
;
1874 /* Update bandwidth-NSS mapping */
1875 cmd
->peer_bw_rxnss_override
= 0;
1876 cmd
->peer_bw_rxnss_override
|= param
->peer_bw_rxnss_override
;
1878 if (param
->vht_capable
) {
1879 mcs
->rx_max_rate
= param
->rx_max_rate
;
1880 mcs
->rx_mcs_set
= param
->rx_mcs_set
;
1881 mcs
->tx_max_rate
= param
->tx_max_rate
;
1882 mcs
->tx_mcs_set
= param
->tx_mcs_set
;
1886 cmd
->peer_he_mcs
= param
->peer_he_mcs_count
;
1887 cmd
->min_data_rate
= param
->min_data_rate
;
1889 ptr
+= sizeof(*mcs
);
1891 len
= param
->peer_he_mcs_count
* sizeof(*he_mcs
);
1894 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
1895 FIELD_PREP(WMI_TLV_LEN
, len
);
1896 ptr
+= TLV_HDR_SIZE
;
1898 /* Loop through the HE rate set */
1899 for (i
= 0; i
< param
->peer_he_mcs_count
; i
++) {
1901 he_mcs
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
1902 WMI_TAG_HE_RATE_SET
) |
1903 FIELD_PREP(WMI_TLV_LEN
,
1904 sizeof(*he_mcs
) - TLV_HDR_SIZE
);
1906 he_mcs
->rx_mcs_set
= param
->peer_he_rx_mcs_set
[i
];
1907 he_mcs
->tx_mcs_set
= param
->peer_he_tx_mcs_set
[i
];
1908 ptr
+= sizeof(*he_mcs
);
1911 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_PEER_ASSOC_CMDID
);
1914 "failed to send WMI_PEER_ASSOC_CMDID\n");
1918 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
1919 "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
1920 cmd
->vdev_id
, cmd
->peer_associd
, param
->peer_mac
,
1921 cmd
->peer_flags
, cmd
->peer_rate_caps
, cmd
->peer_caps
,
1922 cmd
->peer_listen_intval
, cmd
->peer_ht_caps
,
1923 cmd
->peer_max_mpdu
, cmd
->peer_nss
, cmd
->peer_phymode
,
1924 cmd
->peer_mpdu_density
,
1925 cmd
->peer_vht_caps
, cmd
->peer_he_cap_info
,
1926 cmd
->peer_he_ops
, cmd
->peer_he_cap_info_ext
,
1927 cmd
->peer_he_cap_phy
[0], cmd
->peer_he_cap_phy
[1],
1928 cmd
->peer_he_cap_phy
[2],
1929 cmd
->peer_bw_rxnss_override
);
1934 void ath11k_wmi_start_scan_init(struct ath11k
*ar
,
1935 struct scan_req_params
*arg
)
1937 /* setup commonly used values */
1938 arg
->scan_req_id
= 1;
1939 arg
->scan_priority
= WMI_SCAN_PRIORITY_LOW
;
1940 arg
->dwell_time_active
= 50;
1941 arg
->dwell_time_active_2g
= 0;
1942 arg
->dwell_time_passive
= 150;
1943 arg
->dwell_time_active_6g
= 40;
1944 arg
->dwell_time_passive_6g
= 30;
1945 arg
->min_rest_time
= 50;
1946 arg
->max_rest_time
= 500;
1947 arg
->repeat_probe_time
= 0;
1948 arg
->probe_spacing_time
= 0;
1950 arg
->max_scan_time
= 20000;
1951 arg
->probe_delay
= 5;
1952 arg
->notify_scan_events
= WMI_SCAN_EVENT_STARTED
|
1953 WMI_SCAN_EVENT_COMPLETED
|
1954 WMI_SCAN_EVENT_BSS_CHANNEL
|
1955 WMI_SCAN_EVENT_FOREIGN_CHAN
|
1956 WMI_SCAN_EVENT_DEQUEUED
;
1957 arg
->scan_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
1960 /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
1961 * ZEROs in probe request
1963 eth_broadcast_addr(arg
->bssid_list
[0].addr
);
1967 ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd
*cmd
,
1968 struct scan_req_params
*param
)
1970 /* Scan events subscription */
1971 if (param
->scan_ev_started
)
1972 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_STARTED
;
1973 if (param
->scan_ev_completed
)
1974 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_COMPLETED
;
1975 if (param
->scan_ev_bss_chan
)
1976 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_BSS_CHANNEL
;
1977 if (param
->scan_ev_foreign_chan
)
1978 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN
;
1979 if (param
->scan_ev_dequeued
)
1980 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_DEQUEUED
;
1981 if (param
->scan_ev_preempted
)
1982 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_PREEMPTED
;
1983 if (param
->scan_ev_start_failed
)
1984 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_START_FAILED
;
1985 if (param
->scan_ev_restarted
)
1986 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESTARTED
;
1987 if (param
->scan_ev_foreign_chn_exit
)
1988 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
;
1989 if (param
->scan_ev_suspended
)
1990 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_SUSPENDED
;
1991 if (param
->scan_ev_resumed
)
1992 cmd
->notify_scan_events
|= WMI_SCAN_EVENT_RESUMED
;
1994 /** Set scan control flags */
1995 cmd
->scan_ctrl_flags
= 0;
1996 if (param
->scan_f_passive
)
1997 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_PASSIVE
;
1998 if (param
->scan_f_strict_passive_pch
)
1999 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN
;
2000 if (param
->scan_f_promisc_mode
)
2001 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROMISCUOS
;
2002 if (param
->scan_f_capture_phy_err
)
2003 cmd
->scan_ctrl_flags
|= WMI_SCAN_CAPTURE_PHY_ERROR
;
2004 if (param
->scan_f_half_rate
)
2005 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_HALF_RATE_SUPPORT
;
2006 if (param
->scan_f_quarter_rate
)
2007 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT
;
2008 if (param
->scan_f_cck_rates
)
2009 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_CCK_RATES
;
2010 if (param
->scan_f_ofdm_rates
)
2011 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_OFDM_RATES
;
2012 if (param
->scan_f_chan_stat_evnt
)
2013 cmd
->scan_ctrl_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
2014 if (param
->scan_f_filter_prb_req
)
2015 cmd
->scan_ctrl_flags
|= WMI_SCAN_FILTER_PROBE_REQ
;
2016 if (param
->scan_f_bcast_probe
)
2017 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_BCAST_PROBE_REQ
;
2018 if (param
->scan_f_offchan_mgmt_tx
)
2019 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_MGMT_TX
;
2020 if (param
->scan_f_offchan_data_tx
)
2021 cmd
->scan_ctrl_flags
|= WMI_SCAN_OFFCHAN_DATA_TX
;
2022 if (param
->scan_f_force_active_dfs_chn
)
2023 cmd
->scan_ctrl_flags
|= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS
;
2024 if (param
->scan_f_add_tpc_ie_in_probe
)
2025 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ
;
2026 if (param
->scan_f_add_ds_ie_in_probe
)
2027 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ
;
2028 if (param
->scan_f_add_spoofed_mac_in_probe
)
2029 cmd
->scan_ctrl_flags
|= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ
;
2030 if (param
->scan_f_add_rand_seq_in_probe
)
2031 cmd
->scan_ctrl_flags
|= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ
;
2032 if (param
->scan_f_en_ie_whitelist_in_probe
)
2033 cmd
->scan_ctrl_flags
|=
2034 WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ
;
2036 /* for adaptive scan mode using 3 bits (21 - 23 bits) */
2037 WMI_SCAN_SET_DWELL_MODE(cmd
->scan_ctrl_flags
,
2038 param
->adaptive_dwell_time_mode
);
2041 int ath11k_wmi_send_scan_start_cmd(struct ath11k
*ar
,
2042 struct scan_req_params
*params
)
2044 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2045 struct wmi_start_scan_cmd
*cmd
;
2046 struct wmi_ssid
*ssid
= NULL
;
2047 struct wmi_mac_addr
*bssid
;
2048 struct sk_buff
*skb
;
2049 struct wmi_tlv
*tlv
;
2053 u8 extraie_len_with_pad
= 0;
2054 struct hint_short_ssid
*s_ssid
= NULL
;
2055 struct hint_bssid
*hint_bssid
= NULL
;
2059 len
+= TLV_HDR_SIZE
;
2060 if (params
->num_chan
)
2061 len
+= params
->num_chan
* sizeof(u32
);
2063 len
+= TLV_HDR_SIZE
;
2064 if (params
->num_ssids
)
2065 len
+= params
->num_ssids
* sizeof(*ssid
);
2067 len
+= TLV_HDR_SIZE
;
2068 if (params
->num_bssid
)
2069 len
+= sizeof(*bssid
) * params
->num_bssid
;
2071 len
+= TLV_HDR_SIZE
;
2072 if (params
->extraie
.len
)
2073 extraie_len_with_pad
=
2074 roundup(params
->extraie
.len
, sizeof(u32
));
2075 len
+= extraie_len_with_pad
;
2077 if (params
->num_hint_bssid
)
2078 len
+= TLV_HDR_SIZE
+
2079 params
->num_hint_bssid
* sizeof(struct hint_bssid
);
2081 if (params
->num_hint_s_ssid
)
2082 len
+= TLV_HDR_SIZE
+
2083 params
->num_hint_s_ssid
* sizeof(struct hint_short_ssid
);
2085 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2092 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_START_SCAN_CMD
) |
2093 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2095 cmd
->scan_id
= params
->scan_id
;
2096 cmd
->scan_req_id
= params
->scan_req_id
;
2097 cmd
->vdev_id
= params
->vdev_id
;
2098 cmd
->scan_priority
= params
->scan_priority
;
2099 cmd
->notify_scan_events
= params
->notify_scan_events
;
2101 ath11k_wmi_copy_scan_event_cntrl_flags(cmd
, params
);
2103 cmd
->dwell_time_active
= params
->dwell_time_active
;
2104 cmd
->dwell_time_active_2g
= params
->dwell_time_active_2g
;
2105 cmd
->dwell_time_passive
= params
->dwell_time_passive
;
2106 cmd
->dwell_time_active_6g
= params
->dwell_time_active_6g
;
2107 cmd
->dwell_time_passive_6g
= params
->dwell_time_passive_6g
;
2108 cmd
->min_rest_time
= params
->min_rest_time
;
2109 cmd
->max_rest_time
= params
->max_rest_time
;
2110 cmd
->repeat_probe_time
= params
->repeat_probe_time
;
2111 cmd
->probe_spacing_time
= params
->probe_spacing_time
;
2112 cmd
->idle_time
= params
->idle_time
;
2113 cmd
->max_scan_time
= params
->max_scan_time
;
2114 cmd
->probe_delay
= params
->probe_delay
;
2115 cmd
->burst_duration
= params
->burst_duration
;
2116 cmd
->num_chan
= params
->num_chan
;
2117 cmd
->num_bssid
= params
->num_bssid
;
2118 cmd
->num_ssids
= params
->num_ssids
;
2119 cmd
->ie_len
= params
->extraie
.len
;
2120 cmd
->n_probes
= params
->n_probes
;
2122 ptr
+= sizeof(*cmd
);
2124 len
= params
->num_chan
* sizeof(u32
);
2127 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
2128 FIELD_PREP(WMI_TLV_LEN
, len
);
2129 ptr
+= TLV_HDR_SIZE
;
2130 tmp_ptr
= (u32
*)ptr
;
2132 for (i
= 0; i
< params
->num_chan
; ++i
)
2133 tmp_ptr
[i
] = params
->chan_list
[i
];
2137 len
= params
->num_ssids
* sizeof(*ssid
);
2139 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2140 FIELD_PREP(WMI_TLV_LEN
, len
);
2142 ptr
+= TLV_HDR_SIZE
;
2144 if (params
->num_ssids
) {
2146 for (i
= 0; i
< params
->num_ssids
; ++i
) {
2147 ssid
->ssid_len
= params
->ssid
[i
].length
;
2148 memcpy(ssid
->ssid
, params
->ssid
[i
].ssid
,
2149 params
->ssid
[i
].length
);
2154 ptr
+= (params
->num_ssids
* sizeof(*ssid
));
2155 len
= params
->num_bssid
* sizeof(*bssid
);
2157 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2158 FIELD_PREP(WMI_TLV_LEN
, len
);
2160 ptr
+= TLV_HDR_SIZE
;
2163 if (params
->num_bssid
) {
2164 for (i
= 0; i
< params
->num_bssid
; ++i
) {
2165 ether_addr_copy(bssid
->addr
,
2166 params
->bssid_list
[i
].addr
);
2171 ptr
+= params
->num_bssid
* sizeof(*bssid
);
2173 len
= extraie_len_with_pad
;
2175 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
2176 FIELD_PREP(WMI_TLV_LEN
, len
);
2177 ptr
+= TLV_HDR_SIZE
;
2179 if (params
->extraie
.len
)
2180 memcpy(ptr
, params
->extraie
.ptr
,
2181 params
->extraie
.len
);
2183 ptr
+= extraie_len_with_pad
;
2185 if (params
->num_hint_s_ssid
) {
2186 len
= params
->num_hint_s_ssid
* sizeof(struct hint_short_ssid
);
2188 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2189 FIELD_PREP(WMI_TLV_LEN
, len
);
2190 ptr
+= TLV_HDR_SIZE
;
2192 for (i
= 0; i
< params
->num_hint_s_ssid
; ++i
) {
2193 s_ssid
->freq_flags
= params
->hint_s_ssid
[i
].freq_flags
;
2194 s_ssid
->short_ssid
= params
->hint_s_ssid
[i
].short_ssid
;
2200 if (params
->num_hint_bssid
) {
2201 len
= params
->num_hint_bssid
* sizeof(struct hint_bssid
);
2203 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_FIXED_STRUCT
) |
2204 FIELD_PREP(WMI_TLV_LEN
, len
);
2205 ptr
+= TLV_HDR_SIZE
;
2207 for (i
= 0; i
< params
->num_hint_bssid
; ++i
) {
2208 hint_bssid
->freq_flags
=
2209 params
->hint_bssid
[i
].freq_flags
;
2210 ether_addr_copy(¶ms
->hint_bssid
[i
].bssid
.addr
[0],
2211 &hint_bssid
->bssid
.addr
[0]);
2216 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2217 WMI_START_SCAN_CMDID
);
2219 ath11k_warn(ar
->ab
, "failed to send WMI_START_SCAN_CMDID\n");
2226 int ath11k_wmi_send_scan_stop_cmd(struct ath11k
*ar
,
2227 struct scan_cancel_param
*param
)
2229 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2230 struct wmi_stop_scan_cmd
*cmd
;
2231 struct sk_buff
*skb
;
2234 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2238 cmd
= (struct wmi_stop_scan_cmd
*)skb
->data
;
2240 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_STOP_SCAN_CMD
) |
2241 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2243 cmd
->vdev_id
= param
->vdev_id
;
2244 cmd
->requestor
= param
->requester
;
2245 cmd
->scan_id
= param
->scan_id
;
2246 cmd
->pdev_id
= param
->pdev_id
;
2247 /* stop the scan with the corresponding scan_id */
2248 if (param
->req_type
== WLAN_SCAN_CANCEL_PDEV_ALL
) {
2249 /* Cancelling all scans */
2250 cmd
->req_type
= WMI_SCAN_STOP_ALL
;
2251 } else if (param
->req_type
== WLAN_SCAN_CANCEL_VDEV_ALL
) {
2252 /* Cancelling VAP scans */
2253 cmd
->req_type
= WMI_SCN_STOP_VAP_ALL
;
2254 } else if (param
->req_type
== WLAN_SCAN_CANCEL_SINGLE
) {
2255 /* Cancelling specific scan */
2256 cmd
->req_type
= WMI_SCAN_STOP_ONE
;
2258 ath11k_warn(ar
->ab
, "invalid scan cancel param %d",
2264 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2265 WMI_STOP_SCAN_CMDID
);
2267 ath11k_warn(ar
->ab
, "failed to send WMI_STOP_SCAN_CMDID\n");
2274 int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k
*ar
,
2275 struct scan_chan_list_params
*chan_list
)
2277 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2278 struct wmi_scan_chan_list_cmd
*cmd
;
2279 struct sk_buff
*skb
;
2280 struct wmi_channel
*chan_info
;
2281 struct channel_param
*tchan_info
;
2282 struct wmi_tlv
*tlv
;
2285 u16 num_send_chans
, num_sends
= 0, max_chan_limit
= 0;
2288 tchan_info
= &chan_list
->ch_param
[0];
2289 while (chan_list
->nallchans
) {
2290 len
= sizeof(*cmd
) + TLV_HDR_SIZE
;
2291 max_chan_limit
= (wmi
->wmi_ab
->max_msg_len
[ar
->pdev_idx
] - len
) /
2294 if (chan_list
->nallchans
> max_chan_limit
)
2295 num_send_chans
= max_chan_limit
;
2297 num_send_chans
= chan_list
->nallchans
;
2299 chan_list
->nallchans
-= num_send_chans
;
2300 len
+= sizeof(*chan_info
) * num_send_chans
;
2302 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2306 cmd
= (struct wmi_scan_chan_list_cmd
*)skb
->data
;
2307 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_SCAN_CHAN_LIST_CMD
) |
2308 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2309 cmd
->pdev_id
= chan_list
->pdev_id
;
2310 cmd
->num_scan_chans
= num_send_chans
;
2312 cmd
->flags
|= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG
;
2314 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2315 "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
2316 num_send_chans
, len
, cmd
->pdev_id
, num_sends
);
2318 ptr
= skb
->data
+ sizeof(*cmd
);
2320 len
= sizeof(*chan_info
) * num_send_chans
;
2322 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2323 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2324 ptr
+= TLV_HDR_SIZE
;
2326 for (i
= 0; i
< num_send_chans
; ++i
) {
2328 memset(chan_info
, 0, sizeof(*chan_info
));
2329 len
= sizeof(*chan_info
);
2330 chan_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2332 FIELD_PREP(WMI_TLV_LEN
,
2333 len
- TLV_HDR_SIZE
);
2335 reg1
= &chan_info
->reg_info_1
;
2336 reg2
= &chan_info
->reg_info_2
;
2337 chan_info
->mhz
= tchan_info
->mhz
;
2338 chan_info
->band_center_freq1
= tchan_info
->cfreq1
;
2339 chan_info
->band_center_freq2
= tchan_info
->cfreq2
;
2341 if (tchan_info
->is_chan_passive
)
2342 chan_info
->info
|= WMI_CHAN_INFO_PASSIVE
;
2343 if (tchan_info
->allow_he
)
2344 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HE
;
2345 else if (tchan_info
->allow_vht
)
2346 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_VHT
;
2347 else if (tchan_info
->allow_ht
)
2348 chan_info
->info
|= WMI_CHAN_INFO_ALLOW_HT
;
2349 if (tchan_info
->half_rate
)
2350 chan_info
->info
|= WMI_CHAN_INFO_HALF_RATE
;
2351 if (tchan_info
->quarter_rate
)
2352 chan_info
->info
|= WMI_CHAN_INFO_QUARTER_RATE
;
2353 if (tchan_info
->psc_channel
)
2354 chan_info
->info
|= WMI_CHAN_INFO_PSC
;
2356 chan_info
->info
|= FIELD_PREP(WMI_CHAN_INFO_MODE
,
2357 tchan_info
->phy_mode
);
2358 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR
,
2359 tchan_info
->minpower
);
2360 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR
,
2361 tchan_info
->maxpower
);
2362 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR
,
2363 tchan_info
->maxregpower
);
2364 *reg1
|= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS
,
2365 tchan_info
->reg_class_id
);
2366 *reg2
|= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX
,
2367 tchan_info
->antennamax
);
2369 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2370 "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
2371 i
, chan_info
->mhz
, chan_info
->info
);
2373 ptr
+= sizeof(*chan_info
);
2378 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_SCAN_CHAN_LIST_CMDID
);
2380 ath11k_warn(ar
->ab
, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
2391 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k
*ar
, u32 vdev_id
,
2392 struct wmi_wmm_params_all_arg
*param
)
2394 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2395 struct wmi_vdev_set_wmm_params_cmd
*cmd
;
2396 struct wmi_wmm_params
*wmm_param
;
2397 struct wmi_wmm_params_arg
*wmi_wmm_arg
;
2398 struct sk_buff
*skb
;
2401 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2405 cmd
= (struct wmi_vdev_set_wmm_params_cmd
*)skb
->data
;
2406 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2407 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2408 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2410 cmd
->vdev_id
= vdev_id
;
2411 cmd
->wmm_param_type
= 0;
2413 for (ac
= 0; ac
< WME_NUM_AC
; ac
++) {
2416 wmi_wmm_arg
= ¶m
->ac_be
;
2419 wmi_wmm_arg
= ¶m
->ac_bk
;
2422 wmi_wmm_arg
= ¶m
->ac_vi
;
2425 wmi_wmm_arg
= ¶m
->ac_vo
;
2429 wmm_param
= (struct wmi_wmm_params
*)&cmd
->wmm_params
[ac
];
2430 wmm_param
->tlv_header
=
2431 FIELD_PREP(WMI_TLV_TAG
,
2432 WMI_TAG_VDEV_SET_WMM_PARAMS_CMD
) |
2433 FIELD_PREP(WMI_TLV_LEN
,
2434 sizeof(*wmm_param
) - TLV_HDR_SIZE
);
2436 wmm_param
->aifs
= wmi_wmm_arg
->aifs
;
2437 wmm_param
->cwmin
= wmi_wmm_arg
->cwmin
;
2438 wmm_param
->cwmax
= wmi_wmm_arg
->cwmax
;
2439 wmm_param
->txoplimit
= wmi_wmm_arg
->txop
;
2440 wmm_param
->acm
= wmi_wmm_arg
->acm
;
2441 wmm_param
->no_ack
= wmi_wmm_arg
->no_ack
;
2443 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2444 "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
2445 ac
, wmm_param
->aifs
, wmm_param
->cwmin
,
2446 wmm_param
->cwmax
, wmm_param
->txoplimit
,
2447 wmm_param
->acm
, wmm_param
->no_ack
);
2449 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2450 WMI_VDEV_SET_WMM_PARAMS_CMDID
);
2453 "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
2460 int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k
*ar
,
2463 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2464 struct wmi_dfs_phyerr_offload_cmd
*cmd
;
2465 struct sk_buff
*skb
;
2468 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2472 cmd
= (struct wmi_dfs_phyerr_offload_cmd
*)skb
->data
;
2474 FIELD_PREP(WMI_TLV_TAG
,
2475 WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD
) |
2476 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2478 cmd
->pdev_id
= pdev_id
;
2480 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2481 "WMI dfs phy err offload enable pdev id %d\n", pdev_id
);
2483 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2484 WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID
);
2487 "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
2494 int ath11k_wmi_delba_send(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2495 u32 tid
, u32 initiator
, u32 reason
)
2497 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2498 struct wmi_delba_send_cmd
*cmd
;
2499 struct sk_buff
*skb
;
2502 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2506 cmd
= (struct wmi_delba_send_cmd
*)skb
->data
;
2507 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_DELBA_SEND_CMD
) |
2508 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2509 cmd
->vdev_id
= vdev_id
;
2510 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2512 cmd
->initiator
= initiator
;
2513 cmd
->reasoncode
= reason
;
2515 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2516 "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
2517 vdev_id
, mac
, tid
, initiator
, reason
);
2519 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_DELBA_SEND_CMDID
);
2523 "failed to send WMI_DELBA_SEND_CMDID cmd\n");
2530 int ath11k_wmi_addba_set_resp(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2531 u32 tid
, u32 status
)
2533 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2534 struct wmi_addba_setresponse_cmd
*cmd
;
2535 struct sk_buff
*skb
;
2538 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2542 cmd
= (struct wmi_addba_setresponse_cmd
*)skb
->data
;
2544 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_SETRESPONSE_CMD
) |
2545 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2546 cmd
->vdev_id
= vdev_id
;
2547 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2549 cmd
->statuscode
= status
;
2551 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2552 "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
2553 vdev_id
, mac
, tid
, status
);
2555 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_SET_RESP_CMDID
);
2559 "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
2566 int ath11k_wmi_addba_send(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
,
2567 u32 tid
, u32 buf_size
)
2569 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2570 struct wmi_addba_send_cmd
*cmd
;
2571 struct sk_buff
*skb
;
2574 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2578 cmd
= (struct wmi_addba_send_cmd
*)skb
->data
;
2579 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_SEND_CMD
) |
2580 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2581 cmd
->vdev_id
= vdev_id
;
2582 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2584 cmd
->buffersize
= buf_size
;
2586 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2587 "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
2588 vdev_id
, mac
, tid
, buf_size
);
2590 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_SEND_CMDID
);
2594 "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
2601 int ath11k_wmi_addba_clear_resp(struct ath11k
*ar
, u32 vdev_id
, const u8
*mac
)
2603 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2604 struct wmi_addba_clear_resp_cmd
*cmd
;
2605 struct sk_buff
*skb
;
2608 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2612 cmd
= (struct wmi_addba_clear_resp_cmd
*)skb
->data
;
2614 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ADDBA_CLEAR_RESP_CMD
) |
2615 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2616 cmd
->vdev_id
= vdev_id
;
2617 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
2619 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2620 "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
2623 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_ADDBA_CLEAR_RESP_CMDID
);
2627 "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
2634 int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k
*ar
, u8
*addr
, u8 enable
)
2636 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2637 struct wmi_pdev_pktlog_filter_cmd
*cmd
;
2638 struct wmi_pdev_pktlog_filter_info
*info
;
2639 struct sk_buff
*skb
;
2640 struct wmi_tlv
*tlv
;
2644 len
= sizeof(*cmd
) + sizeof(*info
) + TLV_HDR_SIZE
;
2645 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2649 cmd
= (struct wmi_pdev_pktlog_filter_cmd
*)skb
->data
;
2651 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD
) |
2652 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2654 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2656 cmd
->enable
= enable
;
2658 ptr
= skb
->data
+ sizeof(*cmd
);
2661 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2662 FIELD_PREP(WMI_TLV_LEN
, sizeof(*info
));
2664 ptr
+= TLV_HDR_SIZE
;
2667 ether_addr_copy(info
->peer_macaddr
.addr
, addr
);
2668 info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO
) |
2669 FIELD_PREP(WMI_TLV_LEN
,
2670 sizeof(*info
) - TLV_HDR_SIZE
);
2672 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2673 WMI_PDEV_PKTLOG_FILTER_CMDID
);
2675 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2683 ath11k_wmi_send_init_country_cmd(struct ath11k
*ar
,
2684 struct wmi_init_country_params init_cc_params
)
2686 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2687 struct wmi_init_country_cmd
*cmd
;
2688 struct sk_buff
*skb
;
2691 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2695 cmd
= (struct wmi_init_country_cmd
*)skb
->data
;
2697 FIELD_PREP(WMI_TLV_TAG
,
2698 WMI_TAG_SET_INIT_COUNTRY_CMD
) |
2699 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2701 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2703 switch (init_cc_params
.flags
) {
2705 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_ALPHA
;
2706 memcpy((u8
*)&cmd
->cc_info
.alpha2
,
2707 init_cc_params
.cc_info
.alpha2
, 3);
2710 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE
;
2711 cmd
->cc_info
.country_code
= init_cc_params
.cc_info
.country_code
;
2714 cmd
->init_cc_type
= WMI_COUNTRY_INFO_TYPE_REGDOMAIN
;
2715 cmd
->cc_info
.regdom_id
= init_cc_params
.cc_info
.regdom_id
;
2722 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2723 WMI_SET_INIT_COUNTRY_CMDID
);
2728 "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
2737 ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k
*ar
,
2738 struct thermal_mitigation_params
*param
)
2740 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2741 struct wmi_therm_throt_config_request_cmd
*cmd
;
2742 struct wmi_therm_throt_level_config_info
*lvl_conf
;
2743 struct wmi_tlv
*tlv
;
2744 struct sk_buff
*skb
;
2747 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+
2748 THERMAL_LEVELS
* sizeof(struct wmi_therm_throt_level_config_info
);
2750 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2754 cmd
= (struct wmi_therm_throt_config_request_cmd
*)skb
->data
;
2756 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_THERM_THROT_CONFIG_REQUEST
) |
2757 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2759 cmd
->pdev_id
= ar
->pdev
->pdev_id
;
2760 cmd
->enable
= param
->enable
;
2761 cmd
->dc
= param
->dc
;
2762 cmd
->dc_per_event
= param
->dc_per_event
;
2763 cmd
->therm_throt_levels
= THERMAL_LEVELS
;
2765 tlv
= (struct wmi_tlv
*)(skb
->data
+ sizeof(*cmd
));
2766 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
2767 FIELD_PREP(WMI_TLV_LEN
,
2769 sizeof(struct wmi_therm_throt_level_config_info
)));
2771 lvl_conf
= (struct wmi_therm_throt_level_config_info
*)(skb
->data
+
2774 for (i
= 0; i
< THERMAL_LEVELS
; i
++) {
2775 lvl_conf
->tlv_header
=
2776 FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO
) |
2777 FIELD_PREP(WMI_TLV_LEN
, sizeof(*lvl_conf
) - TLV_HDR_SIZE
);
2779 lvl_conf
->temp_lwm
= param
->levelconf
[i
].tmplwm
;
2780 lvl_conf
->temp_hwm
= param
->levelconf
[i
].tmphwm
;
2781 lvl_conf
->dc_off_percent
= param
->levelconf
[i
].dcoffpercent
;
2782 lvl_conf
->prio
= param
->levelconf
[i
].priority
;
2786 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_THERM_THROT_SET_CONF_CMDID
);
2788 ath11k_warn(ar
->ab
, "failed to send THERM_THROT_SET_CONF cmd\n");
2792 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
2793 "WMI vdev set thermal throt pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
2794 ar
->pdev
->pdev_id
, param
->enable
, param
->dc
,
2795 param
->dc_per_event
, THERMAL_LEVELS
);
2800 int ath11k_wmi_pdev_pktlog_enable(struct ath11k
*ar
, u32 pktlog_filter
)
2802 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2803 struct wmi_pktlog_enable_cmd
*cmd
;
2804 struct sk_buff
*skb
;
2807 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2811 cmd
= (struct wmi_pktlog_enable_cmd
*)skb
->data
;
2813 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD
) |
2814 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2816 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2817 cmd
->evlist
= pktlog_filter
;
2818 cmd
->enable
= ATH11K_WMI_PKTLOG_ENABLE_FORCE
;
2820 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2821 WMI_PDEV_PKTLOG_ENABLE_CMDID
);
2823 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2830 int ath11k_wmi_pdev_pktlog_disable(struct ath11k
*ar
)
2832 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2833 struct wmi_pktlog_disable_cmd
*cmd
;
2834 struct sk_buff
*skb
;
2837 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, sizeof(*cmd
));
2841 cmd
= (struct wmi_pktlog_disable_cmd
*)skb
->data
;
2843 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD
) |
2844 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
2846 cmd
->pdev_id
= DP_HW2SW_MACID(ar
->pdev
->pdev_id
);
2848 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2849 WMI_PDEV_PKTLOG_DISABLE_CMDID
);
2851 ath11k_warn(ar
->ab
, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
2859 ath11k_wmi_send_twt_enable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2861 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2862 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2863 struct wmi_twt_enable_params_cmd
*cmd
;
2864 struct sk_buff
*skb
;
2869 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2873 cmd
= (struct wmi_twt_enable_params_cmd
*)skb
->data
;
2874 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_ENABLE_CMD
) |
2875 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2876 cmd
->pdev_id
= pdev_id
;
2877 cmd
->sta_cong_timer_ms
= ATH11K_TWT_DEF_STA_CONG_TIMER_MS
;
2878 cmd
->default_slot_size
= ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE
;
2879 cmd
->congestion_thresh_setup
= ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP
;
2880 cmd
->congestion_thresh_teardown
=
2881 ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN
;
2882 cmd
->congestion_thresh_critical
=
2883 ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL
;
2884 cmd
->interference_thresh_teardown
=
2885 ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN
;
2886 cmd
->interference_thresh_setup
=
2887 ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP
;
2888 cmd
->min_no_sta_setup
= ATH11K_TWT_DEF_MIN_NO_STA_SETUP
;
2889 cmd
->min_no_sta_teardown
= ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN
;
2890 cmd
->no_of_bcast_mcast_slots
= ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS
;
2891 cmd
->min_no_twt_slots
= ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS
;
2892 cmd
->max_no_sta_twt
= ATH11K_TWT_DEF_MAX_NO_STA_TWT
;
2893 cmd
->mode_check_interval
= ATH11K_TWT_DEF_MODE_CHECK_INTERVAL
;
2894 cmd
->add_sta_slot_interval
= ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL
;
2895 cmd
->remove_sta_slot_interval
=
2896 ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL
;
2897 /* TODO add MBSSID support */
2898 cmd
->mbss_support
= 0;
2900 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2901 WMI_TWT_ENABLE_CMDID
);
2903 ath11k_warn(ab
, "Failed to send WMI_TWT_ENABLE_CMDID");
2910 ath11k_wmi_send_twt_disable_cmd(struct ath11k
*ar
, u32 pdev_id
)
2912 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2913 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2914 struct wmi_twt_disable_params_cmd
*cmd
;
2915 struct sk_buff
*skb
;
2920 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2924 cmd
= (struct wmi_twt_disable_params_cmd
*)skb
->data
;
2925 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_TWT_DISABLE_CMD
) |
2926 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2927 cmd
->pdev_id
= pdev_id
;
2929 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2930 WMI_TWT_DISABLE_CMDID
);
2932 ath11k_warn(ab
, "Failed to send WMI_TWT_DISABLE_CMDID");
2939 ath11k_wmi_send_obss_spr_cmd(struct ath11k
*ar
, u32 vdev_id
,
2940 struct ieee80211_he_obss_pd
*he_obss_pd
)
2942 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2943 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2944 struct wmi_obss_spatial_reuse_params_cmd
*cmd
;
2945 struct sk_buff
*skb
;
2950 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2954 cmd
= (struct wmi_obss_spatial_reuse_params_cmd
*)skb
->data
;
2955 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2956 WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD
) |
2957 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2958 cmd
->vdev_id
= vdev_id
;
2959 cmd
->enable
= he_obss_pd
->enable
;
2960 cmd
->obss_min
= he_obss_pd
->min_offset
;
2961 cmd
->obss_max
= he_obss_pd
->max_offset
;
2963 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
2964 WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID
);
2967 "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
2974 ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k
*ar
, u32 vdev_id
,
2975 u8 bss_color
, u32 period
,
2978 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
2979 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
2980 struct wmi_obss_color_collision_cfg_params_cmd
*cmd
;
2981 struct sk_buff
*skb
;
2986 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
2990 cmd
= (struct wmi_obss_color_collision_cfg_params_cmd
*)skb
->data
;
2991 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
2992 WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG
) |
2993 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
2994 cmd
->vdev_id
= vdev_id
;
2995 cmd
->evt_type
= enable
? ATH11K_OBSS_COLOR_COLLISION_DETECTION
:
2996 ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE
;
2997 cmd
->current_bss_color
= bss_color
;
2998 cmd
->detection_period_ms
= period
;
2999 cmd
->scan_period_ms
= ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS
;
3000 cmd
->free_slot_expiry_time_ms
= 0;
3003 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3004 "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
3005 cmd
->vdev_id
, cmd
->evt_type
, cmd
->current_bss_color
,
3006 cmd
->detection_period_ms
, cmd
->scan_period_ms
);
3008 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
3009 WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID
);
3011 ath11k_warn(ab
, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
3017 int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k
*ar
, u32 vdev_id
,
3020 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
3021 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
3022 struct wmi_bss_color_change_enable_params_cmd
*cmd
;
3023 struct sk_buff
*skb
;
3028 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
3032 cmd
= (struct wmi_bss_color_change_enable_params_cmd
*)skb
->data
;
3033 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_BSS_COLOR_CHANGE_ENABLE
) |
3034 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
3035 cmd
->vdev_id
= vdev_id
;
3036 cmd
->enable
= enable
? 1 : 0;
3038 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3039 "wmi_send_bss_color_change_enable id %d enable %d\n",
3040 cmd
->vdev_id
, cmd
->enable
);
3042 ret
= ath11k_wmi_cmd_send(wmi
, skb
,
3043 WMI_BSS_COLOR_CHANGE_ENABLE_CMDID
);
3045 ath11k_warn(ab
, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
3051 int ath11k_wmi_fils_discovery_tmpl(struct ath11k
*ar
, u32 vdev_id
,
3052 struct sk_buff
*tmpl
)
3054 struct wmi_tlv
*tlv
;
3055 struct sk_buff
*skb
;
3059 struct wmi_fils_discovery_tmpl_cmd
*cmd
;
3061 aligned_len
= roundup(tmpl
->len
, 4);
3062 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ aligned_len
;
3064 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3065 "WMI vdev %i set FILS discovery template\n", vdev_id
);
3067 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, len
);
3071 cmd
= (struct wmi_fils_discovery_tmpl_cmd
*)skb
->data
;
3072 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3073 WMI_TAG_FILS_DISCOVERY_TMPL_CMD
) |
3074 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3075 cmd
->vdev_id
= vdev_id
;
3076 cmd
->buf_len
= tmpl
->len
;
3077 ptr
= skb
->data
+ sizeof(*cmd
);
3080 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
3081 FIELD_PREP(WMI_TLV_LEN
, aligned_len
);
3082 memcpy(tlv
->value
, tmpl
->data
, tmpl
->len
);
3084 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_FILS_DISCOVERY_TMPL_CMDID
);
3087 "WMI vdev %i failed to send FILS discovery template command\n",
3094 int ath11k_wmi_probe_resp_tmpl(struct ath11k
*ar
, u32 vdev_id
,
3095 struct sk_buff
*tmpl
)
3097 struct wmi_probe_tmpl_cmd
*cmd
;
3098 struct wmi_bcn_prb_info
*probe_info
;
3099 struct wmi_tlv
*tlv
;
3100 struct sk_buff
*skb
;
3103 size_t aligned_len
= roundup(tmpl
->len
, 4);
3105 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3106 "WMI vdev %i set probe response template\n", vdev_id
);
3108 len
= sizeof(*cmd
) + sizeof(*probe_info
) + TLV_HDR_SIZE
+ aligned_len
;
3110 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, len
);
3114 cmd
= (struct wmi_probe_tmpl_cmd
*)skb
->data
;
3115 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PRB_TMPL_CMD
) |
3116 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3117 cmd
->vdev_id
= vdev_id
;
3118 cmd
->buf_len
= tmpl
->len
;
3120 ptr
= skb
->data
+ sizeof(*cmd
);
3123 len
= sizeof(*probe_info
);
3124 probe_info
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3125 WMI_TAG_BCN_PRB_INFO
) |
3126 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
3127 probe_info
->caps
= 0;
3128 probe_info
->erp
= 0;
3130 ptr
+= sizeof(*probe_info
);
3133 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_BYTE
) |
3134 FIELD_PREP(WMI_TLV_LEN
, aligned_len
);
3135 memcpy(tlv
->value
, tmpl
->data
, tmpl
->len
);
3137 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_PRB_TMPL_CMDID
);
3140 "WMI vdev %i failed to send probe response template command\n",
3147 int ath11k_wmi_fils_discovery(struct ath11k
*ar
, u32 vdev_id
, u32 interval
,
3148 bool unsol_bcast_probe_resp_enabled
)
3150 struct sk_buff
*skb
;
3152 struct wmi_fils_discovery_cmd
*cmd
;
3154 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3155 "WMI vdev %i set %s interval to %u TU\n",
3156 vdev_id
, unsol_bcast_probe_resp_enabled
?
3157 "unsolicited broadcast probe response" : "FILS discovery",
3161 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, len
);
3165 cmd
= (struct wmi_fils_discovery_cmd
*)skb
->data
;
3166 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ENABLE_FILS_CMD
) |
3167 FIELD_PREP(WMI_TLV_LEN
, len
- TLV_HDR_SIZE
);
3168 cmd
->vdev_id
= vdev_id
;
3169 cmd
->interval
= interval
;
3170 cmd
->config
= unsol_bcast_probe_resp_enabled
;
3172 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_ENABLE_FILS_CMDID
);
3175 "WMI vdev %i failed to send FILS discovery enable/disable command\n",
3183 ath11k_fill_band_to_mac_param(struct ath11k_base
*soc
,
3184 struct wmi_host_pdev_band_to_mac
*band_to_mac
)
3187 struct ath11k_hal_reg_capabilities_ext
*hal_reg_cap
;
3188 struct ath11k_pdev
*pdev
;
3190 for (i
= 0; i
< soc
->num_radios
; i
++) {
3191 pdev
= &soc
->pdevs
[i
];
3192 hal_reg_cap
= &soc
->hal_reg_cap
[i
];
3193 band_to_mac
[i
].pdev_id
= pdev
->pdev_id
;
3195 switch (pdev
->cap
.supported_bands
) {
3196 case WMI_HOST_WLAN_2G_5G_CAP
:
3197 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
3198 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
3200 case WMI_HOST_WLAN_2G_CAP
:
3201 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_2ghz_chan
;
3202 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_2ghz_chan
;
3204 case WMI_HOST_WLAN_5G_CAP
:
3205 band_to_mac
[i
].start_freq
= hal_reg_cap
->low_5ghz_chan
;
3206 band_to_mac
[i
].end_freq
= hal_reg_cap
->high_5ghz_chan
;
3215 ath11k_wmi_copy_resource_config(struct wmi_resource_config
*wmi_cfg
,
3216 struct target_resource_config
*tg_cfg
)
3218 wmi_cfg
->num_vdevs
= tg_cfg
->num_vdevs
;
3219 wmi_cfg
->num_peers
= tg_cfg
->num_peers
;
3220 wmi_cfg
->num_offload_peers
= tg_cfg
->num_offload_peers
;
3221 wmi_cfg
->num_offload_reorder_buffs
= tg_cfg
->num_offload_reorder_buffs
;
3222 wmi_cfg
->num_peer_keys
= tg_cfg
->num_peer_keys
;
3223 wmi_cfg
->num_tids
= tg_cfg
->num_tids
;
3224 wmi_cfg
->ast_skid_limit
= tg_cfg
->ast_skid_limit
;
3225 wmi_cfg
->tx_chain_mask
= tg_cfg
->tx_chain_mask
;
3226 wmi_cfg
->rx_chain_mask
= tg_cfg
->rx_chain_mask
;
3227 wmi_cfg
->rx_timeout_pri
[0] = tg_cfg
->rx_timeout_pri
[0];
3228 wmi_cfg
->rx_timeout_pri
[1] = tg_cfg
->rx_timeout_pri
[1];
3229 wmi_cfg
->rx_timeout_pri
[2] = tg_cfg
->rx_timeout_pri
[2];
3230 wmi_cfg
->rx_timeout_pri
[3] = tg_cfg
->rx_timeout_pri
[3];
3231 wmi_cfg
->rx_decap_mode
= tg_cfg
->rx_decap_mode
;
3232 wmi_cfg
->scan_max_pending_req
= tg_cfg
->scan_max_pending_req
;
3233 wmi_cfg
->bmiss_offload_max_vdev
= tg_cfg
->bmiss_offload_max_vdev
;
3234 wmi_cfg
->roam_offload_max_vdev
= tg_cfg
->roam_offload_max_vdev
;
3235 wmi_cfg
->roam_offload_max_ap_profiles
=
3236 tg_cfg
->roam_offload_max_ap_profiles
;
3237 wmi_cfg
->num_mcast_groups
= tg_cfg
->num_mcast_groups
;
3238 wmi_cfg
->num_mcast_table_elems
= tg_cfg
->num_mcast_table_elems
;
3239 wmi_cfg
->mcast2ucast_mode
= tg_cfg
->mcast2ucast_mode
;
3240 wmi_cfg
->tx_dbg_log_size
= tg_cfg
->tx_dbg_log_size
;
3241 wmi_cfg
->num_wds_entries
= tg_cfg
->num_wds_entries
;
3242 wmi_cfg
->dma_burst_size
= tg_cfg
->dma_burst_size
;
3243 wmi_cfg
->mac_aggr_delim
= tg_cfg
->mac_aggr_delim
;
3244 wmi_cfg
->rx_skip_defrag_timeout_dup_detection_check
=
3245 tg_cfg
->rx_skip_defrag_timeout_dup_detection_check
;
3246 wmi_cfg
->vow_config
= tg_cfg
->vow_config
;
3247 wmi_cfg
->gtk_offload_max_vdev
= tg_cfg
->gtk_offload_max_vdev
;
3248 wmi_cfg
->num_msdu_desc
= tg_cfg
->num_msdu_desc
;
3249 wmi_cfg
->max_frag_entries
= tg_cfg
->max_frag_entries
;
3250 wmi_cfg
->num_tdls_vdevs
= tg_cfg
->num_tdls_vdevs
;
3251 wmi_cfg
->num_tdls_conn_table_entries
=
3252 tg_cfg
->num_tdls_conn_table_entries
;
3253 wmi_cfg
->beacon_tx_offload_max_vdev
=
3254 tg_cfg
->beacon_tx_offload_max_vdev
;
3255 wmi_cfg
->num_multicast_filter_entries
=
3256 tg_cfg
->num_multicast_filter_entries
;
3257 wmi_cfg
->num_wow_filters
= tg_cfg
->num_wow_filters
;
3258 wmi_cfg
->num_keep_alive_pattern
= tg_cfg
->num_keep_alive_pattern
;
3259 wmi_cfg
->keep_alive_pattern_size
= tg_cfg
->keep_alive_pattern_size
;
3260 wmi_cfg
->max_tdls_concurrent_sleep_sta
=
3261 tg_cfg
->max_tdls_concurrent_sleep_sta
;
3262 wmi_cfg
->max_tdls_concurrent_buffer_sta
=
3263 tg_cfg
->max_tdls_concurrent_buffer_sta
;
3264 wmi_cfg
->wmi_send_separate
= tg_cfg
->wmi_send_separate
;
3265 wmi_cfg
->num_ocb_vdevs
= tg_cfg
->num_ocb_vdevs
;
3266 wmi_cfg
->num_ocb_channels
= tg_cfg
->num_ocb_channels
;
3267 wmi_cfg
->num_ocb_schedules
= tg_cfg
->num_ocb_schedules
;
3268 wmi_cfg
->bpf_instruction_size
= tg_cfg
->bpf_instruction_size
;
3269 wmi_cfg
->max_bssid_rx_filters
= tg_cfg
->max_bssid_rx_filters
;
3270 wmi_cfg
->use_pdev_id
= tg_cfg
->use_pdev_id
;
3271 wmi_cfg
->flag1
= tg_cfg
->atf_config
;
3272 wmi_cfg
->peer_map_unmap_v2_support
= tg_cfg
->peer_map_unmap_v2_support
;
3273 wmi_cfg
->sched_params
= tg_cfg
->sched_params
;
3274 wmi_cfg
->twt_ap_pdev_count
= tg_cfg
->twt_ap_pdev_count
;
3275 wmi_cfg
->twt_ap_sta_count
= tg_cfg
->twt_ap_sta_count
;
3278 static int ath11k_init_cmd_send(struct ath11k_pdev_wmi
*wmi
,
3279 struct wmi_init_cmd_param
*param
)
3281 struct ath11k_base
*ab
= wmi
->wmi_ab
->ab
;
3282 struct sk_buff
*skb
;
3283 struct wmi_init_cmd
*cmd
;
3284 struct wmi_resource_config
*cfg
;
3285 struct wmi_pdev_set_hw_mode_cmd_param
*hw_mode
;
3286 struct wmi_pdev_band_to_mac
*band_to_mac
;
3287 struct wlan_host_mem_chunk
*host_mem_chunks
;
3288 struct wmi_tlv
*tlv
;
3291 u32 hw_mode_len
= 0;
3294 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
)
3295 hw_mode_len
= sizeof(*hw_mode
) + TLV_HDR_SIZE
+
3296 (param
->num_band_to_mac
* sizeof(*band_to_mac
));
3298 len
= sizeof(*cmd
) + TLV_HDR_SIZE
+ sizeof(*cfg
) + hw_mode_len
+
3299 (param
->num_mem_chunks
? (sizeof(*host_mem_chunks
) * WMI_MAX_MEM_REQS
) : 0);
3301 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, len
);
3305 cmd
= (struct wmi_init_cmd
*)skb
->data
;
3307 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_INIT_CMD
) |
3308 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3310 ptr
= skb
->data
+ sizeof(*cmd
);
3313 ath11k_wmi_copy_resource_config(cfg
, param
->res_cfg
);
3315 cfg
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_RESOURCE_CONFIG
) |
3316 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cfg
) - TLV_HDR_SIZE
);
3318 ptr
+= sizeof(*cfg
);
3319 host_mem_chunks
= ptr
+ TLV_HDR_SIZE
;
3320 len
= sizeof(struct wlan_host_mem_chunk
);
3322 for (idx
= 0; idx
< param
->num_mem_chunks
; ++idx
) {
3323 host_mem_chunks
[idx
].tlv_header
=
3324 FIELD_PREP(WMI_TLV_TAG
,
3325 WMI_TAG_WLAN_HOST_MEMORY_CHUNK
) |
3326 FIELD_PREP(WMI_TLV_LEN
, len
);
3328 host_mem_chunks
[idx
].ptr
= param
->mem_chunks
[idx
].paddr
;
3329 host_mem_chunks
[idx
].size
= param
->mem_chunks
[idx
].len
;
3330 host_mem_chunks
[idx
].req_id
= param
->mem_chunks
[idx
].req_id
;
3332 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
3333 "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
3334 param
->mem_chunks
[idx
].req_id
,
3335 (u64
)param
->mem_chunks
[idx
].paddr
,
3336 param
->mem_chunks
[idx
].len
);
3338 cmd
->num_host_mem_chunks
= param
->num_mem_chunks
;
3339 len
= sizeof(struct wlan_host_mem_chunk
) * param
->num_mem_chunks
;
3341 /* num_mem_chunks is zero */
3343 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
3344 FIELD_PREP(WMI_TLV_LEN
, len
);
3345 ptr
+= TLV_HDR_SIZE
+ len
;
3347 if (param
->hw_mode_id
!= WMI_HOST_HW_MODE_MAX
) {
3348 hw_mode
= (struct wmi_pdev_set_hw_mode_cmd_param
*)ptr
;
3349 hw_mode
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3350 WMI_TAG_PDEV_SET_HW_MODE_CMD
) |
3351 FIELD_PREP(WMI_TLV_LEN
,
3352 sizeof(*hw_mode
) - TLV_HDR_SIZE
);
3354 hw_mode
->hw_mode_index
= param
->hw_mode_id
;
3355 hw_mode
->num_band_to_mac
= param
->num_band_to_mac
;
3357 ptr
+= sizeof(*hw_mode
);
3359 len
= param
->num_band_to_mac
* sizeof(*band_to_mac
);
3361 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_STRUCT
) |
3362 FIELD_PREP(WMI_TLV_LEN
, len
);
3364 ptr
+= TLV_HDR_SIZE
;
3365 len
= sizeof(*band_to_mac
);
3367 for (idx
= 0; idx
< param
->num_band_to_mac
; idx
++) {
3368 band_to_mac
= (void *)ptr
;
3370 band_to_mac
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3371 WMI_TAG_PDEV_BAND_TO_MAC
) |
3372 FIELD_PREP(WMI_TLV_LEN
,
3373 len
- TLV_HDR_SIZE
);
3374 band_to_mac
->pdev_id
= param
->band_to_mac
[idx
].pdev_id
;
3375 band_to_mac
->start_freq
=
3376 param
->band_to_mac
[idx
].start_freq
;
3377 band_to_mac
->end_freq
=
3378 param
->band_to_mac
[idx
].end_freq
;
3379 ptr
+= sizeof(*band_to_mac
);
3383 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_INIT_CMDID
);
3385 ath11k_warn(ab
, "failed to send WMI_INIT_CMDID\n");
3392 int ath11k_wmi_pdev_lro_cfg(struct ath11k
*ar
,
3395 struct ath11k_wmi_pdev_lro_config_cmd
*cmd
;
3396 struct sk_buff
*skb
;
3399 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3403 cmd
= (struct ath11k_wmi_pdev_lro_config_cmd
*)skb
->data
;
3404 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_LRO_INFO_CMD
) |
3405 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3407 get_random_bytes(cmd
->th_4
, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE
);
3408 get_random_bytes(cmd
->th_6
, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE
);
3410 cmd
->pdev_id
= pdev_id
;
3412 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_LRO_CONFIG_CMDID
);
3415 "failed to send lro cfg req wmi cmd\n");
3419 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3420 "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id
);
3427 int ath11k_wmi_wait_for_service_ready(struct ath11k_base
*ab
)
3429 unsigned long time_left
;
3431 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.service_ready
,
3432 WMI_SERVICE_READY_TIMEOUT_HZ
);
3439 int ath11k_wmi_wait_for_unified_ready(struct ath11k_base
*ab
)
3441 unsigned long time_left
;
3443 time_left
= wait_for_completion_timeout(&ab
->wmi_ab
.unified_ready
,
3444 WMI_SERVICE_READY_TIMEOUT_HZ
);
3451 int ath11k_wmi_set_hw_mode(struct ath11k_base
*ab
,
3452 enum wmi_host_hw_mode_config_type mode
)
3454 struct wmi_pdev_set_hw_mode_cmd_param
*cmd
;
3455 struct sk_buff
*skb
;
3456 struct ath11k_wmi_base
*wmi_ab
= &ab
->wmi_ab
;
3462 skb
= ath11k_wmi_alloc_skb(wmi_ab
, len
);
3466 cmd
= (struct wmi_pdev_set_hw_mode_cmd_param
*)skb
->data
;
3468 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_PDEV_SET_HW_MODE_CMD
) |
3469 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3471 cmd
->pdev_id
= WMI_PDEV_ID_SOC
;
3472 cmd
->hw_mode_index
= mode
;
3474 ret
= ath11k_wmi_cmd_send(&wmi_ab
->wmi
[0], skb
, WMI_PDEV_SET_HW_MODE_CMDID
);
3476 ath11k_warn(ab
, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
3483 int ath11k_wmi_cmd_init(struct ath11k_base
*ab
)
3485 struct ath11k_wmi_base
*wmi_sc
= &ab
->wmi_ab
;
3486 struct wmi_init_cmd_param init_param
;
3487 struct target_resource_config config
;
3489 memset(&init_param
, 0, sizeof(init_param
));
3490 memset(&config
, 0, sizeof(config
));
3492 ab
->hw_params
.hw_ops
->wmi_init_config(ab
, &config
);
3494 memcpy(&wmi_sc
->wlan_resource_config
, &config
, sizeof(config
));
3496 init_param
.res_cfg
= &wmi_sc
->wlan_resource_config
;
3497 init_param
.num_mem_chunks
= wmi_sc
->num_mem_chunks
;
3498 init_param
.hw_mode_id
= wmi_sc
->preferred_hw_mode
;
3499 init_param
.mem_chunks
= wmi_sc
->mem_chunks
;
3501 if (ab
->hw_params
.single_pdev_only
)
3502 init_param
.hw_mode_id
= WMI_HOST_HW_MODE_MAX
;
3504 init_param
.num_band_to_mac
= ab
->num_radios
;
3505 ath11k_fill_band_to_mac_param(ab
, init_param
.band_to_mac
);
3507 return ath11k_init_cmd_send(&wmi_sc
->wmi
[0], &init_param
);
3510 int ath11k_wmi_vdev_spectral_conf(struct ath11k
*ar
,
3511 struct ath11k_wmi_vdev_spectral_conf_param
*param
)
3513 struct ath11k_wmi_vdev_spectral_conf_cmd
*cmd
;
3514 struct sk_buff
*skb
;
3517 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3521 cmd
= (struct ath11k_wmi_vdev_spectral_conf_cmd
*)skb
->data
;
3522 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3523 WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD
) |
3524 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3526 memcpy(&cmd
->param
, param
, sizeof(*param
));
3528 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
3529 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID
);
3532 "failed to send spectral scan config wmi cmd\n");
3536 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3537 "WMI spectral scan config cmd vdev_id 0x%x\n",
3546 int ath11k_wmi_vdev_spectral_enable(struct ath11k
*ar
, u32 vdev_id
,
3547 u32 trigger
, u32 enable
)
3549 struct ath11k_wmi_vdev_spectral_enable_cmd
*cmd
;
3550 struct sk_buff
*skb
;
3553 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3557 cmd
= (struct ath11k_wmi_vdev_spectral_enable_cmd
*)skb
->data
;
3558 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
3559 WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD
) |
3560 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3562 cmd
->vdev_id
= vdev_id
;
3563 cmd
->trigger_cmd
= trigger
;
3564 cmd
->enable_cmd
= enable
;
3566 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
3567 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID
);
3570 "failed to send spectral enable wmi cmd\n");
3574 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3575 "WMI spectral enable cmd vdev id 0x%x\n",
3584 int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k
*ar
,
3585 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd
*param
)
3587 struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd
*cmd
;
3588 struct sk_buff
*skb
;
3591 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, sizeof(*cmd
));
3595 cmd
= (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd
*)skb
->data
;
3596 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_DMA_RING_CFG_REQ
) |
3597 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
3599 cmd
->pdev_id
= param
->pdev_id
;
3600 cmd
->module_id
= param
->module_id
;
3601 cmd
->base_paddr_lo
= param
->base_paddr_lo
;
3602 cmd
->base_paddr_hi
= param
->base_paddr_hi
;
3603 cmd
->head_idx_paddr_lo
= param
->head_idx_paddr_lo
;
3604 cmd
->head_idx_paddr_hi
= param
->head_idx_paddr_hi
;
3605 cmd
->tail_idx_paddr_lo
= param
->tail_idx_paddr_lo
;
3606 cmd
->tail_idx_paddr_hi
= param
->tail_idx_paddr_hi
;
3607 cmd
->num_elems
= param
->num_elems
;
3608 cmd
->buf_size
= param
->buf_size
;
3609 cmd
->num_resp_per_event
= param
->num_resp_per_event
;
3610 cmd
->event_timeout_ms
= param
->event_timeout_ms
;
3612 ret
= ath11k_wmi_cmd_send(ar
->wmi
, skb
,
3613 WMI_PDEV_DMA_RING_CFG_REQ_CMDID
);
3616 "failed to send dma ring cfg req wmi cmd\n");
3620 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
3621 "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
3630 static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base
*soc
,
3632 const void *ptr
, void *data
)
3634 struct wmi_tlv_dma_buf_release_parse
*parse
= data
;
3636 if (tag
!= WMI_TAG_DMA_BUF_RELEASE_ENTRY
)
3639 if (parse
->num_buf_entry
>= parse
->fixed
.num_buf_release_entry
)
3642 parse
->num_buf_entry
++;
3646 static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base
*soc
,
3648 const void *ptr
, void *data
)
3650 struct wmi_tlv_dma_buf_release_parse
*parse
= data
;
3652 if (tag
!= WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA
)
3655 if (parse
->num_meta
>= parse
->fixed
.num_meta_data_entry
)
3662 static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base
*ab
,
3664 const void *ptr
, void *data
)
3666 struct wmi_tlv_dma_buf_release_parse
*parse
= data
;
3670 case WMI_TAG_DMA_BUF_RELEASE
:
3671 memcpy(&parse
->fixed
, ptr
,
3672 sizeof(struct ath11k_wmi_dma_buf_release_fixed_param
));
3673 parse
->fixed
.pdev_id
= DP_HW2SW_MACID(parse
->fixed
.pdev_id
);
3675 case WMI_TAG_ARRAY_STRUCT
:
3676 if (!parse
->buf_entry_done
) {
3677 parse
->num_buf_entry
= 0;
3678 parse
->buf_entry
= (struct wmi_dma_buf_release_entry
*)ptr
;
3680 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3681 ath11k_wmi_tlv_dma_buf_entry_parse
,
3684 ath11k_warn(ab
, "failed to parse dma buf entry tlv %d\n",
3689 parse
->buf_entry_done
= true;
3690 } else if (!parse
->meta_data_done
) {
3691 parse
->num_meta
= 0;
3692 parse
->meta_data
= (struct wmi_dma_buf_release_meta_data
*)ptr
;
3694 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3695 ath11k_wmi_tlv_dma_buf_meta_parse
,
3698 ath11k_warn(ab
, "failed to parse dma buf meta tlv %d\n",
3703 parse
->meta_data_done
= true;
3712 static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base
*ab
,
3713 struct sk_buff
*skb
)
3715 struct wmi_tlv_dma_buf_release_parse parse
= { };
3716 struct ath11k_dbring_buf_release_event param
;
3719 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
3720 ath11k_wmi_tlv_dma_buf_parse
,
3723 ath11k_warn(ab
, "failed to parse dma buf release tlv %d\n", ret
);
3727 param
.fixed
= parse
.fixed
;
3728 param
.buf_entry
= parse
.buf_entry
;
3729 param
.num_buf_entry
= parse
.num_buf_entry
;
3730 param
.meta_data
= parse
.meta_data
;
3731 param
.num_meta
= parse
.num_meta
;
3733 ret
= ath11k_dbring_buffer_release_event(ab
, ¶m
);
3735 ath11k_warn(ab
, "failed to handle dma buf release event %d\n", ret
);
3740 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base
*soc
,
3742 const void *ptr
, void *data
)
3744 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3745 struct wmi_hw_mode_capabilities
*hw_mode_cap
;
3748 if (tag
!= WMI_TAG_HW_MODE_CAPABILITIES
)
3751 if (svc_rdy_ext
->n_hw_mode_caps
>= svc_rdy_ext
->param
.num_hw_modes
)
3754 hw_mode_cap
= container_of(ptr
, struct wmi_hw_mode_capabilities
,
3756 svc_rdy_ext
->n_hw_mode_caps
++;
3758 phy_map
= hw_mode_cap
->phy_id_map
;
3760 svc_rdy_ext
->tot_phy_id
++;
3761 phy_map
= phy_map
>> 1;
3767 static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base
*soc
,
3768 u16 len
, const void *ptr
, void *data
)
3770 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3771 struct wmi_hw_mode_capabilities
*hw_mode_caps
;
3772 enum wmi_host_hw_mode_config_type mode
, pref
;
3776 svc_rdy_ext
->n_hw_mode_caps
= 0;
3777 svc_rdy_ext
->hw_mode_caps
= (struct wmi_hw_mode_capabilities
*)ptr
;
3779 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3780 ath11k_wmi_tlv_hw_mode_caps_parse
,
3783 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3788 while (i
< svc_rdy_ext
->n_hw_mode_caps
) {
3789 hw_mode_caps
= &svc_rdy_ext
->hw_mode_caps
[i
];
3790 mode
= hw_mode_caps
->hw_mode_id
;
3791 pref
= soc
->wmi_ab
.preferred_hw_mode
;
3793 if (ath11k_hw_mode_pri_map
[mode
] < ath11k_hw_mode_pri_map
[pref
]) {
3794 svc_rdy_ext
->pref_hw_mode_caps
= *hw_mode_caps
;
3795 soc
->wmi_ab
.preferred_hw_mode
= mode
;
3800 ath11k_dbg(soc
, ATH11K_DBG_WMI
, "preferred_hw_mode:%d\n",
3801 soc
->wmi_ab
.preferred_hw_mode
);
3802 if (soc
->wmi_ab
.preferred_hw_mode
== WMI_HOST_HW_MODE_MAX
)
3808 static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base
*soc
,
3810 const void *ptr
, void *data
)
3812 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3814 if (tag
!= WMI_TAG_MAC_PHY_CAPABILITIES
)
3817 if (svc_rdy_ext
->n_mac_phy_caps
>= svc_rdy_ext
->tot_phy_id
)
3820 len
= min_t(u16
, len
, sizeof(struct wmi_mac_phy_capabilities
));
3821 if (!svc_rdy_ext
->n_mac_phy_caps
) {
3822 svc_rdy_ext
->mac_phy_caps
= kzalloc((svc_rdy_ext
->tot_phy_id
) * len
,
3824 if (!svc_rdy_ext
->mac_phy_caps
)
3828 memcpy(svc_rdy_ext
->mac_phy_caps
+ svc_rdy_ext
->n_mac_phy_caps
, ptr
, len
);
3829 svc_rdy_ext
->n_mac_phy_caps
++;
3833 static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base
*soc
,
3835 const void *ptr
, void *data
)
3837 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3839 if (tag
!= WMI_TAG_HAL_REG_CAPABILITIES_EXT
)
3842 if (svc_rdy_ext
->n_ext_hal_reg_caps
>= svc_rdy_ext
->param
.num_phy
)
3845 svc_rdy_ext
->n_ext_hal_reg_caps
++;
3849 static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base
*soc
,
3850 u16 len
, const void *ptr
, void *data
)
3852 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3853 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3854 struct ath11k_hal_reg_capabilities_ext reg_cap
;
3858 svc_rdy_ext
->n_ext_hal_reg_caps
= 0;
3859 svc_rdy_ext
->ext_hal_reg_caps
= (struct wmi_hal_reg_capabilities_ext
*)ptr
;
3860 ret
= ath11k_wmi_tlv_iter(soc
, ptr
, len
,
3861 ath11k_wmi_tlv_ext_hal_reg_caps_parse
,
3864 ath11k_warn(soc
, "failed to parse tlv %d\n", ret
);
3868 for (i
= 0; i
< svc_rdy_ext
->param
.num_phy
; i
++) {
3869 ret
= ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle
,
3870 svc_rdy_ext
->soc_hal_reg_caps
,
3871 svc_rdy_ext
->ext_hal_reg_caps
, i
,
3874 ath11k_warn(soc
, "failed to extract reg cap %d\n", i
);
3878 memcpy(&soc
->hal_reg_cap
[reg_cap
.phy_id
],
3879 ®_cap
, sizeof(reg_cap
));
3884 static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base
*soc
,
3885 u16 len
, const void *ptr
,
3888 struct ath11k_pdev_wmi
*wmi_handle
= &soc
->wmi_ab
.wmi
[0];
3889 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
3890 u8 hw_mode_id
= svc_rdy_ext
->pref_hw_mode_caps
.hw_mode_id
;
3895 svc_rdy_ext
->soc_hal_reg_caps
= (struct wmi_soc_hal_reg_capabilities
*)ptr
;
3896 svc_rdy_ext
->param
.num_phy
= svc_rdy_ext
->soc_hal_reg_caps
->num_phy
;
3898 soc
->num_radios
= 0;
3899 phy_id_map
= svc_rdy_ext
->pref_hw_mode_caps
.phy_id_map
;
3901 while (phy_id_map
&& soc
->num_radios
< MAX_RADIOS
) {
3902 ret
= ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle
,
3903 svc_rdy_ext
->hw_caps
,
3904 svc_rdy_ext
->hw_mode_caps
,
3905 svc_rdy_ext
->soc_hal_reg_caps
,
3906 svc_rdy_ext
->mac_phy_caps
,
3907 hw_mode_id
, soc
->num_radios
,
3908 &soc
->pdevs
[pdev_index
]);
3910 ath11k_warn(soc
, "failed to extract mac caps, idx :%d\n",
3917 /* For QCA6390, save mac_phy capability in the same pdev */
3918 if (soc
->hw_params
.single_pdev_only
)
3921 pdev_index
= soc
->num_radios
;
3923 /* TODO: mac_phy_cap prints */
3927 /* For QCA6390, set num_radios to 1 because host manages
3928 * both 2G and 5G radio in one pdev.
3929 * Set pdev_id = 0 and 0 means soc level.
3931 if (soc
->hw_params
.single_pdev_only
) {
3932 soc
->num_radios
= 1;
3933 soc
->pdevs
[0].pdev_id
= 0;
3939 static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base
*soc
,
3941 const void *ptr
, void *data
)
3943 struct wmi_tlv_dma_ring_caps_parse
*parse
= data
;
3945 if (tag
!= WMI_TAG_DMA_RING_CAPABILITIES
)
3948 parse
->n_dma_ring_caps
++;
3952 static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base
*ab
,
3958 sz
= num_cap
* sizeof(struct ath11k_dbring_cap
);
3959 ptr
= kzalloc(sz
, GFP_ATOMIC
);
3964 ab
->num_db_cap
= num_cap
;
3969 static void ath11k_wmi_free_dbring_caps(struct ath11k_base
*ab
)
3975 static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base
*ab
,
3976 u16 len
, const void *ptr
, void *data
)
3978 struct wmi_tlv_dma_ring_caps_parse
*dma_caps_parse
= data
;
3979 struct wmi_dma_ring_capabilities
*dma_caps
;
3980 struct ath11k_dbring_cap
*dir_buff_caps
;
3984 dma_caps_parse
->n_dma_ring_caps
= 0;
3985 dma_caps
= (struct wmi_dma_ring_capabilities
*)ptr
;
3986 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
3987 ath11k_wmi_tlv_dma_ring_caps_parse
,
3990 ath11k_warn(ab
, "failed to parse dma ring caps tlv %d\n", ret
);
3994 if (!dma_caps_parse
->n_dma_ring_caps
)
3997 if (ab
->num_db_cap
) {
3998 ath11k_warn(ab
, "Already processed, so ignoring dma ring caps\n");
4002 ret
= ath11k_wmi_alloc_dbring_caps(ab
, dma_caps_parse
->n_dma_ring_caps
);
4006 dir_buff_caps
= ab
->db_caps
;
4007 for (i
= 0; i
< dma_caps_parse
->n_dma_ring_caps
; i
++) {
4008 if (dma_caps
[i
].module_id
>= WMI_DIRECT_BUF_MAX
) {
4009 ath11k_warn(ab
, "Invalid module id %d\n", dma_caps
[i
].module_id
);
4014 dir_buff_caps
[i
].id
= dma_caps
[i
].module_id
;
4015 dir_buff_caps
[i
].pdev_id
= DP_HW2SW_MACID(dma_caps
[i
].pdev_id
);
4016 dir_buff_caps
[i
].min_elem
= dma_caps
[i
].min_elem
;
4017 dir_buff_caps
[i
].min_buf_sz
= dma_caps
[i
].min_buf_sz
;
4018 dir_buff_caps
[i
].min_buf_align
= dma_caps
[i
].min_buf_align
;
4024 ath11k_wmi_free_dbring_caps(ab
);
4028 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base
*ab
,
4030 const void *ptr
, void *data
)
4032 struct ath11k_pdev_wmi
*wmi_handle
= &ab
->wmi_ab
.wmi
[0];
4033 struct wmi_tlv_svc_rdy_ext_parse
*svc_rdy_ext
= data
;
4037 case WMI_TAG_SERVICE_READY_EXT_EVENT
:
4038 ret
= ath11k_pull_svc_ready_ext(wmi_handle
, ptr
,
4039 &svc_rdy_ext
->param
);
4041 ath11k_warn(ab
, "unable to extract ext params\n");
4046 case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS
:
4047 svc_rdy_ext
->hw_caps
= (struct wmi_soc_mac_phy_hw_mode_caps
*)ptr
;
4048 svc_rdy_ext
->param
.num_hw_modes
= svc_rdy_ext
->hw_caps
->num_hw_modes
;
4051 case WMI_TAG_SOC_HAL_REG_CAPABILITIES
:
4052 ret
= ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab
, len
, ptr
,
4058 case WMI_TAG_ARRAY_STRUCT
:
4059 if (!svc_rdy_ext
->hw_mode_done
) {
4060 ret
= ath11k_wmi_tlv_hw_mode_caps(ab
, len
, ptr
,
4065 svc_rdy_ext
->hw_mode_done
= true;
4066 } else if (!svc_rdy_ext
->mac_phy_done
) {
4067 svc_rdy_ext
->n_mac_phy_caps
= 0;
4068 ret
= ath11k_wmi_tlv_iter(ab
, ptr
, len
,
4069 ath11k_wmi_tlv_mac_phy_caps_parse
,
4072 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
4076 svc_rdy_ext
->mac_phy_done
= true;
4077 } else if (!svc_rdy_ext
->ext_hal_reg_done
) {
4078 ret
= ath11k_wmi_tlv_ext_hal_reg_caps(ab
, len
, ptr
,
4083 svc_rdy_ext
->ext_hal_reg_done
= true;
4084 } else if (!svc_rdy_ext
->mac_phy_chainmask_combo_done
) {
4085 svc_rdy_ext
->mac_phy_chainmask_combo_done
= true;
4086 } else if (!svc_rdy_ext
->mac_phy_chainmask_cap_done
) {
4087 svc_rdy_ext
->mac_phy_chainmask_cap_done
= true;
4088 } else if (!svc_rdy_ext
->oem_dma_ring_cap_done
) {
4089 svc_rdy_ext
->oem_dma_ring_cap_done
= true;
4090 } else if (!svc_rdy_ext
->dma_ring_cap_done
) {
4091 ret
= ath11k_wmi_tlv_dma_ring_caps(ab
, len
, ptr
,
4092 &svc_rdy_ext
->dma_caps_parse
);
4096 svc_rdy_ext
->dma_ring_cap_done
= true;
4106 static int ath11k_service_ready_ext_event(struct ath11k_base
*ab
,
4107 struct sk_buff
*skb
)
4109 struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext
= { };
4112 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
4113 ath11k_wmi_tlv_svc_rdy_ext_parse
,
4116 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
4120 if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG
, ab
->wmi_ab
.svc_map
))
4121 complete(&ab
->wmi_ab
.service_ready
);
4123 kfree(svc_rdy_ext
.mac_phy_caps
);
4127 ath11k_wmi_free_dbring_caps(ab
);
4131 static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base
*ab
,
4133 const void *ptr
, void *data
)
4135 struct wmi_tlv_svc_rdy_ext2_parse
*parse
= data
;
4139 case WMI_TAG_ARRAY_STRUCT
:
4140 if (!parse
->dma_ring_cap_done
) {
4141 ret
= ath11k_wmi_tlv_dma_ring_caps(ab
, len
, ptr
,
4142 &parse
->dma_caps_parse
);
4146 parse
->dma_ring_cap_done
= true;
4156 static int ath11k_service_ready_ext2_event(struct ath11k_base
*ab
,
4157 struct sk_buff
*skb
)
4159 struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2
= { };
4162 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
4163 ath11k_wmi_tlv_svc_rdy_ext2_parse
,
4166 ath11k_warn(ab
, "failed to parse ext2 event tlv %d\n", ret
);
4170 complete(&ab
->wmi_ab
.service_ready
);
4175 ath11k_wmi_free_dbring_caps(ab
);
4179 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4180 struct wmi_vdev_start_resp_event
*vdev_rsp
)
4183 const struct wmi_vdev_start_resp_event
*ev
;
4186 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4189 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4193 ev
= tb
[WMI_TAG_VDEV_START_RESPONSE_EVENT
];
4195 ath11k_warn(ab
, "failed to fetch vdev start resp ev");
4200 memset(vdev_rsp
, 0, sizeof(*vdev_rsp
));
4202 vdev_rsp
->vdev_id
= ev
->vdev_id
;
4203 vdev_rsp
->requestor_id
= ev
->requestor_id
;
4204 vdev_rsp
->resp_type
= ev
->resp_type
;
4205 vdev_rsp
->status
= ev
->status
;
4206 vdev_rsp
->chain_mask
= ev
->chain_mask
;
4207 vdev_rsp
->smps_mode
= ev
->smps_mode
;
4208 vdev_rsp
->mac_id
= ev
->mac_id
;
4209 vdev_rsp
->cfgd_tx_streams
= ev
->cfgd_tx_streams
;
4210 vdev_rsp
->cfgd_rx_streams
= ev
->cfgd_rx_streams
;
4216 static struct cur_reg_rule
4217 *create_reg_rules_from_wmi(u32 num_reg_rules
,
4218 struct wmi_regulatory_rule_struct
*wmi_reg_rule
)
4220 struct cur_reg_rule
*reg_rule_ptr
;
4223 reg_rule_ptr
= kzalloc((num_reg_rules
* sizeof(*reg_rule_ptr
)),
4229 for (count
= 0; count
< num_reg_rules
; count
++) {
4230 reg_rule_ptr
[count
].start_freq
=
4231 FIELD_GET(REG_RULE_START_FREQ
,
4232 wmi_reg_rule
[count
].freq_info
);
4233 reg_rule_ptr
[count
].end_freq
=
4234 FIELD_GET(REG_RULE_END_FREQ
,
4235 wmi_reg_rule
[count
].freq_info
);
4236 reg_rule_ptr
[count
].max_bw
=
4237 FIELD_GET(REG_RULE_MAX_BW
,
4238 wmi_reg_rule
[count
].bw_pwr_info
);
4239 reg_rule_ptr
[count
].reg_power
=
4240 FIELD_GET(REG_RULE_REG_PWR
,
4241 wmi_reg_rule
[count
].bw_pwr_info
);
4242 reg_rule_ptr
[count
].ant_gain
=
4243 FIELD_GET(REG_RULE_ANT_GAIN
,
4244 wmi_reg_rule
[count
].bw_pwr_info
);
4245 reg_rule_ptr
[count
].flags
=
4246 FIELD_GET(REG_RULE_FLAGS
,
4247 wmi_reg_rule
[count
].flag_info
);
4250 return reg_rule_ptr
;
4253 static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base
*ab
,
4254 struct sk_buff
*skb
,
4255 struct cur_regulatory_info
*reg_info
)
4258 const struct wmi_reg_chan_list_cc_event
*chan_list_event_hdr
;
4259 struct wmi_regulatory_rule_struct
*wmi_reg_rule
;
4260 u32 num_2g_reg_rules
, num_5g_reg_rules
;
4263 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processing regulatory channel list\n");
4265 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4268 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4272 chan_list_event_hdr
= tb
[WMI_TAG_REG_CHAN_LIST_CC_EVENT
];
4273 if (!chan_list_event_hdr
) {
4274 ath11k_warn(ab
, "failed to fetch reg chan list update ev\n");
4279 reg_info
->num_2g_reg_rules
= chan_list_event_hdr
->num_2g_reg_rules
;
4280 reg_info
->num_5g_reg_rules
= chan_list_event_hdr
->num_5g_reg_rules
;
4282 if (!(reg_info
->num_2g_reg_rules
+ reg_info
->num_5g_reg_rules
)) {
4283 ath11k_warn(ab
, "No regulatory rules available in the event info\n");
4288 memcpy(reg_info
->alpha2
, &chan_list_event_hdr
->alpha2
,
4290 reg_info
->dfs_region
= chan_list_event_hdr
->dfs_region
;
4291 reg_info
->phybitmap
= chan_list_event_hdr
->phybitmap
;
4292 reg_info
->num_phy
= chan_list_event_hdr
->num_phy
;
4293 reg_info
->phy_id
= chan_list_event_hdr
->phy_id
;
4294 reg_info
->ctry_code
= chan_list_event_hdr
->country_id
;
4295 reg_info
->reg_dmn_pair
= chan_list_event_hdr
->domain_code
;
4296 if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_PASS
)
4297 reg_info
->status_code
= REG_SET_CC_STATUS_PASS
;
4298 else if (chan_list_event_hdr
->status_code
== WMI_REG_CURRENT_ALPHA2_NOT_FOUND
)
4299 reg_info
->status_code
= REG_CURRENT_ALPHA2_NOT_FOUND
;
4300 else if (chan_list_event_hdr
->status_code
== WMI_REG_INIT_ALPHA2_NOT_FOUND
)
4301 reg_info
->status_code
= REG_INIT_ALPHA2_NOT_FOUND
;
4302 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_CHANGE_NOT_ALLOWED
)
4303 reg_info
->status_code
= REG_SET_CC_CHANGE_NOT_ALLOWED
;
4304 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_NO_MEMORY
)
4305 reg_info
->status_code
= REG_SET_CC_STATUS_NO_MEMORY
;
4306 else if (chan_list_event_hdr
->status_code
== WMI_REG_SET_CC_STATUS_FAIL
)
4307 reg_info
->status_code
= REG_SET_CC_STATUS_FAIL
;
4309 reg_info
->min_bw_2g
= chan_list_event_hdr
->min_bw_2g
;
4310 reg_info
->max_bw_2g
= chan_list_event_hdr
->max_bw_2g
;
4311 reg_info
->min_bw_5g
= chan_list_event_hdr
->min_bw_5g
;
4312 reg_info
->max_bw_5g
= chan_list_event_hdr
->max_bw_5g
;
4314 num_2g_reg_rules
= reg_info
->num_2g_reg_rules
;
4315 num_5g_reg_rules
= reg_info
->num_5g_reg_rules
;
4317 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4318 "%s:cc %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
4319 __func__
, reg_info
->alpha2
, reg_info
->dfs_region
,
4320 reg_info
->min_bw_2g
, reg_info
->max_bw_2g
,
4321 reg_info
->min_bw_5g
, reg_info
->max_bw_5g
);
4323 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
4324 "%s: num_2g_reg_rules %d num_5g_reg_rules %d", __func__
,
4325 num_2g_reg_rules
, num_5g_reg_rules
);
4328 (struct wmi_regulatory_rule_struct
*)((u8
*)chan_list_event_hdr
4329 + sizeof(*chan_list_event_hdr
)
4330 + sizeof(struct wmi_tlv
));
4332 if (num_2g_reg_rules
) {
4333 reg_info
->reg_rules_2g_ptr
= create_reg_rules_from_wmi(num_2g_reg_rules
,
4335 if (!reg_info
->reg_rules_2g_ptr
) {
4337 ath11k_warn(ab
, "Unable to Allocate memory for 2g rules\n");
4342 if (num_5g_reg_rules
) {
4343 wmi_reg_rule
+= num_2g_reg_rules
;
4344 reg_info
->reg_rules_5g_ptr
= create_reg_rules_from_wmi(num_5g_reg_rules
,
4346 if (!reg_info
->reg_rules_5g_ptr
) {
4348 ath11k_warn(ab
, "Unable to Allocate memory for 5g rules\n");
4353 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "processed regulatory channel list\n");
4359 static int ath11k_pull_peer_del_resp_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4360 struct wmi_peer_delete_resp_event
*peer_del_resp
)
4363 const struct wmi_peer_delete_resp_event
*ev
;
4366 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4369 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4373 ev
= tb
[WMI_TAG_PEER_DELETE_RESP_EVENT
];
4375 ath11k_warn(ab
, "failed to fetch peer delete resp ev");
4380 memset(peer_del_resp
, 0, sizeof(*peer_del_resp
));
4382 peer_del_resp
->vdev_id
= ev
->vdev_id
;
4383 ether_addr_copy(peer_del_resp
->peer_macaddr
.addr
,
4384 ev
->peer_macaddr
.addr
);
4390 static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base
*ab
,
4391 struct sk_buff
*skb
,
4395 const struct wmi_vdev_delete_resp_event
*ev
;
4398 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4401 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4405 ev
= tb
[WMI_TAG_VDEV_DELETE_RESP_EVENT
];
4407 ath11k_warn(ab
, "failed to fetch vdev delete resp ev");
4412 *vdev_id
= ev
->vdev_id
;
4418 static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base
*ab
, void *evt_buf
,
4419 u32 len
, u32
*vdev_id
,
4423 const struct wmi_bcn_tx_status_event
*ev
;
4426 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
4429 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4433 ev
= tb
[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT
];
4435 ath11k_warn(ab
, "failed to fetch bcn tx status ev");
4440 *vdev_id
= ev
->vdev_id
;
4441 *tx_status
= ev
->tx_status
;
4447 static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4451 const struct wmi_vdev_stopped_event
*ev
;
4454 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4457 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4461 ev
= tb
[WMI_TAG_VDEV_STOPPED_EVENT
];
4463 ath11k_warn(ab
, "failed to fetch vdev stop ev");
4468 *vdev_id
= ev
->vdev_id
;
4474 static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base
*ab
,
4475 struct sk_buff
*skb
,
4476 struct mgmt_rx_event_params
*hdr
)
4479 const struct wmi_mgmt_rx_hdr
*ev
;
4483 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4486 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4490 ev
= tb
[WMI_TAG_MGMT_RX_HDR
];
4491 frame
= tb
[WMI_TAG_ARRAY_BYTE
];
4493 if (!ev
|| !frame
) {
4494 ath11k_warn(ab
, "failed to fetch mgmt rx hdr");
4499 hdr
->pdev_id
= ev
->pdev_id
;
4500 hdr
->chan_freq
= ev
->chan_freq
;
4501 hdr
->channel
= ev
->channel
;
4503 hdr
->rate
= ev
->rate
;
4504 hdr
->phy_mode
= ev
->phy_mode
;
4505 hdr
->buf_len
= ev
->buf_len
;
4506 hdr
->status
= ev
->status
;
4507 hdr
->flags
= ev
->flags
;
4508 hdr
->rssi
= ev
->rssi
;
4509 hdr
->tsf_delta
= ev
->tsf_delta
;
4510 memcpy(hdr
->rssi_ctl
, ev
->rssi_ctl
, sizeof(hdr
->rssi_ctl
));
4512 if (skb
->len
< (frame
- skb
->data
) + hdr
->buf_len
) {
4513 ath11k_warn(ab
, "invalid length in mgmt rx hdr ev");
4518 /* shift the sk_buff to point to `frame` */
4520 skb_put(skb
, frame
- skb
->data
);
4521 skb_pull(skb
, frame
- skb
->data
);
4522 skb_put(skb
, hdr
->buf_len
);
4524 ath11k_ce_byte_swap(skb
->data
, hdr
->buf_len
);
4530 static int wmi_process_mgmt_tx_comp(struct ath11k
*ar
, u32 desc_id
,
4533 struct sk_buff
*msdu
;
4534 struct ieee80211_tx_info
*info
;
4535 struct ath11k_skb_cb
*skb_cb
;
4537 spin_lock_bh(&ar
->txmgmt_idr_lock
);
4538 msdu
= idr_find(&ar
->txmgmt_idr
, desc_id
);
4541 ath11k_warn(ar
->ab
, "received mgmt tx compl for invalid msdu_id: %d\n",
4543 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
4547 idr_remove(&ar
->txmgmt_idr
, desc_id
);
4548 spin_unlock_bh(&ar
->txmgmt_idr_lock
);
4550 skb_cb
= ATH11K_SKB_CB(msdu
);
4551 dma_unmap_single(ar
->ab
->dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
4553 info
= IEEE80211_SKB_CB(msdu
);
4554 if ((!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
)) && !status
)
4555 info
->flags
|= IEEE80211_TX_STAT_ACK
;
4557 ieee80211_tx_status_irqsafe(ar
->hw
, msdu
);
4559 /* WARN when we received this event without doing any mgmt tx */
4560 if (atomic_dec_if_positive(&ar
->num_pending_mgmt_tx
) < 0)
4566 static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base
*ab
,
4567 struct sk_buff
*skb
,
4568 struct wmi_mgmt_tx_compl_event
*param
)
4571 const struct wmi_mgmt_tx_compl_event
*ev
;
4574 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4577 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4581 ev
= tb
[WMI_TAG_MGMT_TX_COMPL_EVENT
];
4583 ath11k_warn(ab
, "failed to fetch mgmt tx compl ev");
4588 param
->pdev_id
= ev
->pdev_id
;
4589 param
->desc_id
= ev
->desc_id
;
4590 param
->status
= ev
->status
;
4596 static void ath11k_wmi_event_scan_started(struct ath11k
*ar
)
4598 lockdep_assert_held(&ar
->data_lock
);
4600 switch (ar
->scan
.state
) {
4601 case ATH11K_SCAN_IDLE
:
4602 case ATH11K_SCAN_RUNNING
:
4603 case ATH11K_SCAN_ABORTING
:
4604 ath11k_warn(ar
->ab
, "received scan started event in an invalid scan state: %s (%d)\n",
4605 ath11k_scan_state_str(ar
->scan
.state
),
4608 case ATH11K_SCAN_STARTING
:
4609 ar
->scan
.state
= ATH11K_SCAN_RUNNING
;
4610 complete(&ar
->scan
.started
);
4615 static void ath11k_wmi_event_scan_start_failed(struct ath11k
*ar
)
4617 lockdep_assert_held(&ar
->data_lock
);
4619 switch (ar
->scan
.state
) {
4620 case ATH11K_SCAN_IDLE
:
4621 case ATH11K_SCAN_RUNNING
:
4622 case ATH11K_SCAN_ABORTING
:
4623 ath11k_warn(ar
->ab
, "received scan start failed event in an invalid scan state: %s (%d)\n",
4624 ath11k_scan_state_str(ar
->scan
.state
),
4627 case ATH11K_SCAN_STARTING
:
4628 complete(&ar
->scan
.started
);
4629 __ath11k_mac_scan_finish(ar
);
4634 static void ath11k_wmi_event_scan_completed(struct ath11k
*ar
)
4636 lockdep_assert_held(&ar
->data_lock
);
4638 switch (ar
->scan
.state
) {
4639 case ATH11K_SCAN_IDLE
:
4640 case ATH11K_SCAN_STARTING
:
4641 /* One suspected reason scan can be completed while starting is
4642 * if firmware fails to deliver all scan events to the host,
4643 * e.g. when transport pipe is full. This has been observed
4644 * with spectral scan phyerr events starving wmi transport
4645 * pipe. In such case the "scan completed" event should be (and
4646 * is) ignored by the host as it may be just firmware's scan
4647 * state machine recovering.
4649 ath11k_warn(ar
->ab
, "received scan completed event in an invalid scan state: %s (%d)\n",
4650 ath11k_scan_state_str(ar
->scan
.state
),
4653 case ATH11K_SCAN_RUNNING
:
4654 case ATH11K_SCAN_ABORTING
:
4655 __ath11k_mac_scan_finish(ar
);
4660 static void ath11k_wmi_event_scan_bss_chan(struct ath11k
*ar
)
4662 lockdep_assert_held(&ar
->data_lock
);
4664 switch (ar
->scan
.state
) {
4665 case ATH11K_SCAN_IDLE
:
4666 case ATH11K_SCAN_STARTING
:
4667 ath11k_warn(ar
->ab
, "received scan bss chan event in an invalid scan state: %s (%d)\n",
4668 ath11k_scan_state_str(ar
->scan
.state
),
4671 case ATH11K_SCAN_RUNNING
:
4672 case ATH11K_SCAN_ABORTING
:
4673 ar
->scan_channel
= NULL
;
4678 static void ath11k_wmi_event_scan_foreign_chan(struct ath11k
*ar
, u32 freq
)
4680 lockdep_assert_held(&ar
->data_lock
);
4682 switch (ar
->scan
.state
) {
4683 case ATH11K_SCAN_IDLE
:
4684 case ATH11K_SCAN_STARTING
:
4685 ath11k_warn(ar
->ab
, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
4686 ath11k_scan_state_str(ar
->scan
.state
),
4689 case ATH11K_SCAN_RUNNING
:
4690 case ATH11K_SCAN_ABORTING
:
4691 ar
->scan_channel
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
4697 ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type
,
4698 enum wmi_scan_completion_reason reason
)
4701 case WMI_SCAN_EVENT_STARTED
:
4703 case WMI_SCAN_EVENT_COMPLETED
:
4705 case WMI_SCAN_REASON_COMPLETED
:
4707 case WMI_SCAN_REASON_CANCELLED
:
4708 return "completed [cancelled]";
4709 case WMI_SCAN_REASON_PREEMPTED
:
4710 return "completed [preempted]";
4711 case WMI_SCAN_REASON_TIMEDOUT
:
4712 return "completed [timedout]";
4713 case WMI_SCAN_REASON_INTERNAL_FAILURE
:
4714 return "completed [internal err]";
4715 case WMI_SCAN_REASON_MAX
:
4718 return "completed [unknown]";
4719 case WMI_SCAN_EVENT_BSS_CHANNEL
:
4720 return "bss channel";
4721 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
4722 return "foreign channel";
4723 case WMI_SCAN_EVENT_DEQUEUED
:
4725 case WMI_SCAN_EVENT_PREEMPTED
:
4727 case WMI_SCAN_EVENT_START_FAILED
:
4728 return "start failed";
4729 case WMI_SCAN_EVENT_RESTARTED
:
4731 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
4732 return "foreign channel exit";
4738 static int ath11k_pull_scan_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4739 struct wmi_scan_event
*scan_evt_param
)
4742 const struct wmi_scan_event
*ev
;
4745 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4748 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4752 ev
= tb
[WMI_TAG_SCAN_EVENT
];
4754 ath11k_warn(ab
, "failed to fetch scan ev");
4759 scan_evt_param
->event_type
= ev
->event_type
;
4760 scan_evt_param
->reason
= ev
->reason
;
4761 scan_evt_param
->channel_freq
= ev
->channel_freq
;
4762 scan_evt_param
->scan_req_id
= ev
->scan_req_id
;
4763 scan_evt_param
->scan_id
= ev
->scan_id
;
4764 scan_evt_param
->vdev_id
= ev
->vdev_id
;
4765 scan_evt_param
->tsf_timestamp
= ev
->tsf_timestamp
;
4771 static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4772 struct wmi_peer_sta_kickout_arg
*arg
)
4775 const struct wmi_peer_sta_kickout_event
*ev
;
4778 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4781 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4785 ev
= tb
[WMI_TAG_PEER_STA_KICKOUT_EVENT
];
4787 ath11k_warn(ab
, "failed to fetch peer sta kickout ev");
4792 arg
->mac_addr
= ev
->peer_macaddr
.addr
;
4798 static int ath11k_pull_roam_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4799 struct wmi_roam_event
*roam_ev
)
4802 const struct wmi_roam_event
*ev
;
4805 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4808 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4812 ev
= tb
[WMI_TAG_ROAM_EVENT
];
4814 ath11k_warn(ab
, "failed to fetch roam ev");
4819 roam_ev
->vdev_id
= ev
->vdev_id
;
4820 roam_ev
->reason
= ev
->reason
;
4821 roam_ev
->rssi
= ev
->rssi
;
4827 static int freq_to_idx(struct ath11k
*ar
, int freq
)
4829 struct ieee80211_supported_band
*sband
;
4830 int band
, ch
, idx
= 0;
4832 for (band
= NL80211_BAND_2GHZ
; band
< NUM_NL80211_BANDS
; band
++) {
4833 sband
= ar
->hw
->wiphy
->bands
[band
];
4837 for (ch
= 0; ch
< sband
->n_channels
; ch
++, idx
++)
4838 if (sband
->channels
[ch
].center_freq
== freq
)
4846 static int ath11k_pull_chan_info_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
4847 u32 len
, struct wmi_chan_info_event
*ch_info_ev
)
4850 const struct wmi_chan_info_event
*ev
;
4853 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
4856 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4860 ev
= tb
[WMI_TAG_CHAN_INFO_EVENT
];
4862 ath11k_warn(ab
, "failed to fetch chan info ev");
4867 ch_info_ev
->err_code
= ev
->err_code
;
4868 ch_info_ev
->freq
= ev
->freq
;
4869 ch_info_ev
->cmd_flags
= ev
->cmd_flags
;
4870 ch_info_ev
->noise_floor
= ev
->noise_floor
;
4871 ch_info_ev
->rx_clear_count
= ev
->rx_clear_count
;
4872 ch_info_ev
->cycle_count
= ev
->cycle_count
;
4873 ch_info_ev
->chan_tx_pwr_range
= ev
->chan_tx_pwr_range
;
4874 ch_info_ev
->chan_tx_pwr_tp
= ev
->chan_tx_pwr_tp
;
4875 ch_info_ev
->rx_frame_count
= ev
->rx_frame_count
;
4876 ch_info_ev
->tx_frame_cnt
= ev
->tx_frame_cnt
;
4877 ch_info_ev
->mac_clk_mhz
= ev
->mac_clk_mhz
;
4878 ch_info_ev
->vdev_id
= ev
->vdev_id
;
4885 ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4886 struct wmi_pdev_bss_chan_info_event
*bss_ch_info_ev
)
4889 const struct wmi_pdev_bss_chan_info_event
*ev
;
4892 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4895 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4899 ev
= tb
[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT
];
4901 ath11k_warn(ab
, "failed to fetch pdev bss chan info ev");
4906 bss_ch_info_ev
->pdev_id
= ev
->pdev_id
;
4907 bss_ch_info_ev
->freq
= ev
->freq
;
4908 bss_ch_info_ev
->noise_floor
= ev
->noise_floor
;
4909 bss_ch_info_ev
->rx_clear_count_low
= ev
->rx_clear_count_low
;
4910 bss_ch_info_ev
->rx_clear_count_high
= ev
->rx_clear_count_high
;
4911 bss_ch_info_ev
->cycle_count_low
= ev
->cycle_count_low
;
4912 bss_ch_info_ev
->cycle_count_high
= ev
->cycle_count_high
;
4913 bss_ch_info_ev
->tx_cycle_count_low
= ev
->tx_cycle_count_low
;
4914 bss_ch_info_ev
->tx_cycle_count_high
= ev
->tx_cycle_count_high
;
4915 bss_ch_info_ev
->rx_cycle_count_low
= ev
->rx_cycle_count_low
;
4916 bss_ch_info_ev
->rx_cycle_count_high
= ev
->rx_cycle_count_high
;
4917 bss_ch_info_ev
->rx_bss_cycle_count_low
= ev
->rx_bss_cycle_count_low
;
4918 bss_ch_info_ev
->rx_bss_cycle_count_high
= ev
->rx_bss_cycle_count_high
;
4925 ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4926 struct wmi_vdev_install_key_complete_arg
*arg
)
4929 const struct wmi_vdev_install_key_compl_event
*ev
;
4932 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4935 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4939 ev
= tb
[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT
];
4941 ath11k_warn(ab
, "failed to fetch vdev install key compl ev");
4946 arg
->vdev_id
= ev
->vdev_id
;
4947 arg
->macaddr
= ev
->peer_macaddr
.addr
;
4948 arg
->key_idx
= ev
->key_idx
;
4949 arg
->key_flags
= ev
->key_flags
;
4950 arg
->status
= ev
->status
;
4956 static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base
*ab
, struct sk_buff
*skb
,
4957 struct wmi_peer_assoc_conf_arg
*peer_assoc_conf
)
4960 const struct wmi_peer_assoc_conf_event
*ev
;
4963 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
4966 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
4970 ev
= tb
[WMI_TAG_PEER_ASSOC_CONF_EVENT
];
4972 ath11k_warn(ab
, "failed to fetch peer assoc conf ev");
4977 peer_assoc_conf
->vdev_id
= ev
->vdev_id
;
4978 peer_assoc_conf
->macaddr
= ev
->peer_macaddr
.addr
;
4984 static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base
*src
,
4985 struct ath11k_fw_stats_pdev
*dst
)
4987 dst
->ch_noise_floor
= src
->chan_nf
;
4988 dst
->tx_frame_count
= src
->tx_frame_count
;
4989 dst
->rx_frame_count
= src
->rx_frame_count
;
4990 dst
->rx_clear_count
= src
->rx_clear_count
;
4991 dst
->cycle_count
= src
->cycle_count
;
4992 dst
->phy_err_count
= src
->phy_err_count
;
4993 dst
->chan_tx_power
= src
->chan_tx_pwr
;
4997 ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx
*src
,
4998 struct ath11k_fw_stats_pdev
*dst
)
5000 dst
->comp_queued
= src
->comp_queued
;
5001 dst
->comp_delivered
= src
->comp_delivered
;
5002 dst
->msdu_enqued
= src
->msdu_enqued
;
5003 dst
->mpdu_enqued
= src
->mpdu_enqued
;
5004 dst
->wmm_drop
= src
->wmm_drop
;
5005 dst
->local_enqued
= src
->local_enqued
;
5006 dst
->local_freed
= src
->local_freed
;
5007 dst
->hw_queued
= src
->hw_queued
;
5008 dst
->hw_reaped
= src
->hw_reaped
;
5009 dst
->underrun
= src
->underrun
;
5010 dst
->tx_abort
= src
->tx_abort
;
5011 dst
->mpdus_requed
= src
->mpdus_requed
;
5012 dst
->tx_ko
= src
->tx_ko
;
5013 dst
->data_rc
= src
->data_rc
;
5014 dst
->self_triggers
= src
->self_triggers
;
5015 dst
->sw_retry_failure
= src
->sw_retry_failure
;
5016 dst
->illgl_rate_phy_err
= src
->illgl_rate_phy_err
;
5017 dst
->pdev_cont_xretry
= src
->pdev_cont_xretry
;
5018 dst
->pdev_tx_timeout
= src
->pdev_tx_timeout
;
5019 dst
->pdev_resets
= src
->pdev_resets
;
5020 dst
->stateless_tid_alloc_failure
= src
->stateless_tid_alloc_failure
;
5021 dst
->phy_underrun
= src
->phy_underrun
;
5022 dst
->txop_ovf
= src
->txop_ovf
;
5025 static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx
*src
,
5026 struct ath11k_fw_stats_pdev
*dst
)
5028 dst
->mid_ppdu_route_change
= src
->mid_ppdu_route_change
;
5029 dst
->status_rcvd
= src
->status_rcvd
;
5030 dst
->r0_frags
= src
->r0_frags
;
5031 dst
->r1_frags
= src
->r1_frags
;
5032 dst
->r2_frags
= src
->r2_frags
;
5033 dst
->r3_frags
= src
->r3_frags
;
5034 dst
->htt_msdus
= src
->htt_msdus
;
5035 dst
->htt_mpdus
= src
->htt_mpdus
;
5036 dst
->loc_msdus
= src
->loc_msdus
;
5037 dst
->loc_mpdus
= src
->loc_mpdus
;
5038 dst
->oversize_amsdu
= src
->oversize_amsdu
;
5039 dst
->phy_errs
= src
->phy_errs
;
5040 dst
->phy_err_drop
= src
->phy_err_drop
;
5041 dst
->mpdu_errs
= src
->mpdu_errs
;
5045 ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats
*src
,
5046 struct ath11k_fw_stats_vdev
*dst
)
5050 dst
->vdev_id
= src
->vdev_id
;
5051 dst
->beacon_snr
= src
->beacon_snr
;
5052 dst
->data_snr
= src
->data_snr
;
5053 dst
->num_rx_frames
= src
->num_rx_frames
;
5054 dst
->num_rts_fail
= src
->num_rts_fail
;
5055 dst
->num_rts_success
= src
->num_rts_success
;
5056 dst
->num_rx_err
= src
->num_rx_err
;
5057 dst
->num_rx_discard
= src
->num_rx_discard
;
5058 dst
->num_tx_not_acked
= src
->num_tx_not_acked
;
5060 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames
); i
++)
5061 dst
->num_tx_frames
[i
] = src
->num_tx_frames
[i
];
5063 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_retries
); i
++)
5064 dst
->num_tx_frames_retries
[i
] = src
->num_tx_frames_retries
[i
];
5066 for (i
= 0; i
< ARRAY_SIZE(src
->num_tx_frames_failures
); i
++)
5067 dst
->num_tx_frames_failures
[i
] = src
->num_tx_frames_failures
[i
];
5069 for (i
= 0; i
< ARRAY_SIZE(src
->tx_rate_history
); i
++)
5070 dst
->tx_rate_history
[i
] = src
->tx_rate_history
[i
];
5072 for (i
= 0; i
< ARRAY_SIZE(src
->beacon_rssi_history
); i
++)
5073 dst
->beacon_rssi_history
[i
] = src
->beacon_rssi_history
[i
];
5077 ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats
*src
,
5078 struct ath11k_fw_stats_bcn
*dst
)
5080 dst
->vdev_id
= src
->vdev_id
;
5081 dst
->tx_bcn_succ_cnt
= src
->tx_bcn_succ_cnt
;
5082 dst
->tx_bcn_outage_cnt
= src
->tx_bcn_outage_cnt
;
5085 int ath11k_wmi_pull_fw_stats(struct ath11k_base
*ab
, struct sk_buff
*skb
,
5086 struct ath11k_fw_stats
*stats
)
5089 const struct wmi_stats_event
*ev
;
5094 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, len
, GFP_ATOMIC
);
5097 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5101 ev
= tb
[WMI_TAG_STATS_EVENT
];
5102 data
= tb
[WMI_TAG_ARRAY_BYTE
];
5104 ath11k_warn(ab
, "failed to fetch update stats ev");
5109 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5110 "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
5112 ev
->num_pdev_stats
, ev
->num_vdev_stats
,
5115 stats
->pdev_id
= ev
->pdev_id
;
5116 stats
->stats_id
= 0;
5118 for (i
= 0; i
< ev
->num_pdev_stats
; i
++) {
5119 const struct wmi_pdev_stats
*src
;
5120 struct ath11k_fw_stats_pdev
*dst
;
5123 if (len
< sizeof(*src
)) {
5128 stats
->stats_id
= WMI_REQUEST_PDEV_STAT
;
5130 data
+= sizeof(*src
);
5131 len
-= sizeof(*src
);
5133 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
5137 ath11k_wmi_pull_pdev_stats_base(&src
->base
, dst
);
5138 ath11k_wmi_pull_pdev_stats_tx(&src
->tx
, dst
);
5139 ath11k_wmi_pull_pdev_stats_rx(&src
->rx
, dst
);
5140 list_add_tail(&dst
->list
, &stats
->pdevs
);
5143 for (i
= 0; i
< ev
->num_vdev_stats
; i
++) {
5144 const struct wmi_vdev_stats
*src
;
5145 struct ath11k_fw_stats_vdev
*dst
;
5148 if (len
< sizeof(*src
)) {
5153 stats
->stats_id
= WMI_REQUEST_VDEV_STAT
;
5155 data
+= sizeof(*src
);
5156 len
-= sizeof(*src
);
5158 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
5162 ath11k_wmi_pull_vdev_stats(src
, dst
);
5163 list_add_tail(&dst
->list
, &stats
->vdevs
);
5166 for (i
= 0; i
< ev
->num_bcn_stats
; i
++) {
5167 const struct wmi_bcn_stats
*src
;
5168 struct ath11k_fw_stats_bcn
*dst
;
5171 if (len
< sizeof(*src
)) {
5176 stats
->stats_id
= WMI_REQUEST_BCN_STAT
;
5178 data
+= sizeof(*src
);
5179 len
-= sizeof(*src
);
5181 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
5185 ath11k_wmi_pull_bcn_stats(src
, dst
);
5186 list_add_tail(&dst
->list
, &stats
->bcn
);
5194 ath11k_pull_pdev_temp_ev(struct ath11k_base
*ab
, u8
*evt_buf
,
5195 u32 len
, const struct wmi_pdev_temperature_event
*ev
)
5200 tb
= ath11k_wmi_tlv_parse_alloc(ab
, evt_buf
, len
, GFP_ATOMIC
);
5203 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
5207 ev
= tb
[WMI_TAG_PDEV_TEMPERATURE_EVENT
];
5209 ath11k_warn(ab
, "failed to fetch pdev temp ev");
5218 size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head
*head
)
5220 struct ath11k_fw_stats_vdev
*i
;
5223 list_for_each_entry(i
, head
, list
)
5229 static size_t ath11k_wmi_fw_stats_num_bcn(struct list_head
*head
)
5231 struct ath11k_fw_stats_bcn
*i
;
5234 list_for_each_entry(i
, head
, list
)
5241 ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
5242 char *buf
, u32
*length
)
5245 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5247 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5248 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
5249 "ath11k PDEV stats");
5250 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5251 "=================");
5253 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5254 "Channel noise floor", pdev
->ch_noise_floor
);
5255 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5256 "Channel TX power", pdev
->chan_tx_power
);
5257 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5258 "TX frame count", pdev
->tx_frame_count
);
5259 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5260 "RX frame count", pdev
->rx_frame_count
);
5261 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5262 "RX clear count", pdev
->rx_clear_count
);
5263 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5264 "Cycle count", pdev
->cycle_count
);
5265 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5266 "PHY error count", pdev
->phy_err_count
);
5272 ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
5273 char *buf
, u32
*length
)
5276 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5278 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
5279 "ath11k PDEV TX stats");
5280 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5281 "====================");
5283 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5284 "HTT cookies queued", pdev
->comp_queued
);
5285 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5286 "HTT cookies disp.", pdev
->comp_delivered
);
5287 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5288 "MSDU queued", pdev
->msdu_enqued
);
5289 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5290 "MPDU queued", pdev
->mpdu_enqued
);
5291 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5292 "MSDUs dropped", pdev
->wmm_drop
);
5293 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5294 "Local enqued", pdev
->local_enqued
);
5295 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5296 "Local freed", pdev
->local_freed
);
5297 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5298 "HW queued", pdev
->hw_queued
);
5299 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5300 "PPDUs reaped", pdev
->hw_reaped
);
5301 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5302 "Num underruns", pdev
->underrun
);
5303 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5304 "PPDUs cleaned", pdev
->tx_abort
);
5305 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5306 "MPDUs requed", pdev
->mpdus_requed
);
5307 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5308 "Excessive retries", pdev
->tx_ko
);
5309 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5310 "HW rate", pdev
->data_rc
);
5311 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5312 "Sched self triggers", pdev
->self_triggers
);
5313 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5314 "Dropped due to SW retries",
5315 pdev
->sw_retry_failure
);
5316 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5317 "Illegal rate phy errors",
5318 pdev
->illgl_rate_phy_err
);
5319 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5320 "PDEV continuous xretry", pdev
->pdev_cont_xretry
);
5321 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5322 "TX timeout", pdev
->pdev_tx_timeout
);
5323 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5324 "PDEV resets", pdev
->pdev_resets
);
5325 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5326 "Stateless TIDs alloc failures",
5327 pdev
->stateless_tid_alloc_failure
);
5328 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5329 "PHY underrun", pdev
->phy_underrun
);
5330 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10u\n",
5331 "MPDU is more than txop limit", pdev
->txop_ovf
);
5336 ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev
*pdev
,
5337 char *buf
, u32
*length
)
5340 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5342 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n%30s\n",
5343 "ath11k PDEV RX stats");
5344 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5345 "====================");
5347 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5348 "Mid PPDU route change",
5349 pdev
->mid_ppdu_route_change
);
5350 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5351 "Tot. number of statuses", pdev
->status_rcvd
);
5352 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5353 "Extra frags on rings 0", pdev
->r0_frags
);
5354 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5355 "Extra frags on rings 1", pdev
->r1_frags
);
5356 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5357 "Extra frags on rings 2", pdev
->r2_frags
);
5358 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5359 "Extra frags on rings 3", pdev
->r3_frags
);
5360 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5361 "MSDUs delivered to HTT", pdev
->htt_msdus
);
5362 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5363 "MPDUs delivered to HTT", pdev
->htt_mpdus
);
5364 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5365 "MSDUs delivered to stack", pdev
->loc_msdus
);
5366 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5367 "MPDUs delivered to stack", pdev
->loc_mpdus
);
5368 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5369 "Oversized AMSUs", pdev
->oversize_amsdu
);
5370 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5371 "PHY errors", pdev
->phy_errs
);
5372 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5373 "PHY errors drops", pdev
->phy_err_drop
);
5374 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %10d\n",
5375 "MPDU errors (FCS, MIC, ENC)", pdev
->mpdu_errs
);
5380 ath11k_wmi_fw_vdev_stats_fill(struct ath11k
*ar
,
5381 const struct ath11k_fw_stats_vdev
*vdev
,
5382 char *buf
, u32
*length
)
5385 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5386 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, vdev
->vdev_id
);
5390 /* VDEV stats has all the active VDEVs of other PDEVs as well,
5391 * ignoring those not part of requested PDEV
5396 vif_macaddr
= arvif
->vif
->addr
;
5398 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5399 "VDEV ID", vdev
->vdev_id
);
5400 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
5401 "VDEV MAC address", vif_macaddr
);
5402 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5403 "beacon snr", vdev
->beacon_snr
);
5404 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5405 "data snr", vdev
->data_snr
);
5406 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5407 "num rx frames", vdev
->num_rx_frames
);
5408 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5409 "num rts fail", vdev
->num_rts_fail
);
5410 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5411 "num rts success", vdev
->num_rts_success
);
5412 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5413 "num rx err", vdev
->num_rx_err
);
5414 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5415 "num rx discard", vdev
->num_rx_discard
);
5416 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5417 "num tx not acked", vdev
->num_tx_not_acked
);
5419 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames
); i
++)
5420 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5423 vdev
->num_tx_frames
[i
]);
5425 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_retries
); i
++)
5426 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5428 "num tx frames retries", i
,
5429 vdev
->num_tx_frames_retries
[i
]);
5431 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->num_tx_frames_failures
); i
++)
5432 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5434 "num tx frames failures", i
,
5435 vdev
->num_tx_frames_failures
[i
]);
5437 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->tx_rate_history
); i
++)
5438 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5439 "%25s [%02d] 0x%08x\n",
5440 "tx rate history", i
,
5441 vdev
->tx_rate_history
[i
]);
5443 for (i
= 0 ; i
< ARRAY_SIZE(vdev
->beacon_rssi_history
); i
++)
5444 len
+= scnprintf(buf
+ len
, buf_len
- len
,
5446 "beacon rssi history", i
,
5447 vdev
->beacon_rssi_history
[i
]);
5449 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5454 ath11k_wmi_fw_bcn_stats_fill(struct ath11k
*ar
,
5455 const struct ath11k_fw_stats_bcn
*bcn
,
5456 char *buf
, u32
*length
)
5459 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5460 struct ath11k_vif
*arvif
= ath11k_mac_get_arvif(ar
, bcn
->vdev_id
);
5464 ath11k_warn(ar
->ab
, "invalid vdev id %d in bcn stats",
5469 vdev_macaddr
= arvif
->vif
->addr
;
5471 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5472 "VDEV ID", bcn
->vdev_id
);
5473 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %pM\n",
5474 "VDEV MAC address", vdev_macaddr
);
5475 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5476 "================");
5477 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5478 "Num of beacon tx success", bcn
->tx_bcn_succ_cnt
);
5479 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s %u\n",
5480 "Num of beacon tx failures", bcn
->tx_bcn_outage_cnt
);
5482 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5486 void ath11k_wmi_fw_stats_fill(struct ath11k
*ar
,
5487 struct ath11k_fw_stats
*fw_stats
,
5488 u32 stats_id
, char *buf
)
5491 u32 buf_len
= ATH11K_FW_STATS_BUF_SIZE
;
5492 const struct ath11k_fw_stats_pdev
*pdev
;
5493 const struct ath11k_fw_stats_vdev
*vdev
;
5494 const struct ath11k_fw_stats_bcn
*bcn
;
5497 spin_lock_bh(&ar
->data_lock
);
5499 if (stats_id
== WMI_REQUEST_PDEV_STAT
) {
5500 pdev
= list_first_entry_or_null(&fw_stats
->pdevs
,
5501 struct ath11k_fw_stats_pdev
, list
);
5503 ath11k_warn(ar
->ab
, "failed to get pdev stats\n");
5507 ath11k_wmi_fw_pdev_base_stats_fill(pdev
, buf
, &len
);
5508 ath11k_wmi_fw_pdev_tx_stats_fill(pdev
, buf
, &len
);
5509 ath11k_wmi_fw_pdev_rx_stats_fill(pdev
, buf
, &len
);
5512 if (stats_id
== WMI_REQUEST_VDEV_STAT
) {
5513 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5514 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n",
5515 "ath11k VDEV stats");
5516 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5517 "=================");
5519 list_for_each_entry(vdev
, &fw_stats
->vdevs
, list
)
5520 ath11k_wmi_fw_vdev_stats_fill(ar
, vdev
, buf
, &len
);
5523 if (stats_id
== WMI_REQUEST_BCN_STAT
) {
5524 num_bcn
= ath11k_wmi_fw_stats_num_bcn(&fw_stats
->bcn
);
5526 len
+= scnprintf(buf
+ len
, buf_len
- len
, "\n");
5527 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s (%zu)\n",
5528 "ath11k Beacon stats", num_bcn
);
5529 len
+= scnprintf(buf
+ len
, buf_len
- len
, "%30s\n\n",
5530 "===================");
5532 list_for_each_entry(bcn
, &fw_stats
->bcn
, list
)
5533 ath11k_wmi_fw_bcn_stats_fill(ar
, bcn
, buf
, &len
);
5537 spin_unlock_bh(&ar
->data_lock
);
5545 static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base
*ab
)
5547 /* try to send pending beacons first. they take priority */
5548 wake_up(&ab
->wmi_ab
.tx_credits_wq
);
5551 static void ath11k_wmi_htc_tx_complete(struct ath11k_base
*ab
,
5552 struct sk_buff
*skb
)
5557 static bool ath11k_reg_is_world_alpha(char *alpha
)
5559 return alpha
[0] == '0' && alpha
[1] == '0';
5562 static int ath11k_reg_chan_list_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5564 struct cur_regulatory_info
*reg_info
= NULL
;
5565 struct ieee80211_regdomain
*regd
= NULL
;
5566 bool intersect
= false;
5567 int ret
= 0, pdev_idx
;
5570 reg_info
= kzalloc(sizeof(*reg_info
), GFP_ATOMIC
);
5576 ret
= ath11k_pull_reg_chan_list_update_ev(ab
, skb
, reg_info
);
5578 ath11k_warn(ab
, "failed to extract regulatory info from received event\n");
5582 if (reg_info
->status_code
!= REG_SET_CC_STATUS_PASS
) {
5583 /* In case of failure to set the requested ctry,
5584 * fw retains the current regd. We print a failure info
5585 * and return from here.
5587 ath11k_warn(ab
, "Failed to set the requested Country regulatory setting\n");
5591 pdev_idx
= reg_info
->phy_id
;
5593 if (pdev_idx
>= ab
->num_radios
) {
5594 /* Process the event for phy0 only if single_pdev_only
5595 * is true. If pdev_idx is valid but not 0, discard the
5596 * event. Otherwise, it goes to fallback.
5598 if (ab
->hw_params
.single_pdev_only
&&
5599 pdev_idx
< ab
->hw_params
.num_rxmda_per_pdev
)
5605 /* Avoid multiple overwrites to default regd, during core
5606 * stop-start after mac registration.
5608 if (ab
->default_regd
[pdev_idx
] && !ab
->new_regd
[pdev_idx
] &&
5609 !memcmp((char *)ab
->default_regd
[pdev_idx
]->alpha2
,
5610 (char *)reg_info
->alpha2
, 2))
5613 /* Intersect new rules with default regd if a new country setting was
5614 * requested, i.e a default regd was already set during initialization
5615 * and the regd coming from this event has a valid country info.
5617 if (ab
->default_regd
[pdev_idx
] &&
5618 !ath11k_reg_is_world_alpha((char *)
5619 ab
->default_regd
[pdev_idx
]->alpha2
) &&
5620 !ath11k_reg_is_world_alpha((char *)reg_info
->alpha2
))
5623 regd
= ath11k_reg_build_regd(ab
, reg_info
, intersect
);
5625 ath11k_warn(ab
, "failed to build regd from reg_info\n");
5629 spin_lock(&ab
->base_lock
);
5630 if (test_bit(ATH11K_FLAG_REGISTERED
, &ab
->dev_flags
)) {
5631 /* Once mac is registered, ar is valid and all CC events from
5632 * fw is considered to be received due to user requests
5634 * Free previously built regd before assigning the newly
5635 * generated regd to ar. NULL pointer handling will be
5636 * taken care by kfree itself.
5638 ar
= ab
->pdevs
[pdev_idx
].ar
;
5639 kfree(ab
->new_regd
[pdev_idx
]);
5640 ab
->new_regd
[pdev_idx
] = regd
;
5641 ieee80211_queue_work(ar
->hw
, &ar
->regd_update_work
);
5643 /* Multiple events for the same *ar is not expected. But we
5644 * can still clear any previously stored default_regd if we
5645 * are receiving this event for the same radio by mistake.
5646 * NULL pointer handling will be taken care by kfree itself.
5648 kfree(ab
->default_regd
[pdev_idx
]);
5649 /* This regd would be applied during mac registration */
5650 ab
->default_regd
[pdev_idx
] = regd
;
5652 ab
->dfs_region
= reg_info
->dfs_region
;
5653 spin_unlock(&ab
->base_lock
);
5658 /* Fallback to older reg (by sending previous country setting
5659 * again if fw has succeded and we failed to process here.
5660 * The Regdomain should be uniform across driver and fw. Since the
5661 * FW has processed the command and sent a success status, we expect
5662 * this function to succeed as well. If it doesn't, CTRY needs to be
5663 * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
5665 /* TODO: This is rare, but still should also be handled */
5669 kfree(reg_info
->reg_rules_2g_ptr
);
5670 kfree(reg_info
->reg_rules_5g_ptr
);
5676 static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base
*ab
, u16 tag
, u16 len
,
5677 const void *ptr
, void *data
)
5679 struct wmi_tlv_rdy_parse
*rdy_parse
= data
;
5680 struct wmi_ready_event fixed_param
;
5681 struct wmi_mac_addr
*addr_list
;
5682 struct ath11k_pdev
*pdev
;
5687 case WMI_TAG_READY_EVENT
:
5688 memset(&fixed_param
, 0, sizeof(fixed_param
));
5689 memcpy(&fixed_param
, (struct wmi_ready_event
*)ptr
,
5690 min_t(u16
, sizeof(fixed_param
), len
));
5691 ab
->wlan_init_status
= fixed_param
.ready_event_min
.status
;
5692 rdy_parse
->num_extra_mac_addr
=
5693 fixed_param
.ready_event_min
.num_extra_mac_addr
;
5695 ether_addr_copy(ab
->mac_addr
,
5696 fixed_param
.ready_event_min
.mac_addr
.addr
);
5697 ab
->pktlog_defs_checksum
= fixed_param
.pktlog_defs_checksum
;
5698 ab
->wmi_ready
= true;
5700 case WMI_TAG_ARRAY_FIXED_STRUCT
:
5701 addr_list
= (struct wmi_mac_addr
*)ptr
;
5702 num_mac_addr
= rdy_parse
->num_extra_mac_addr
;
5704 if (!(ab
->num_radios
> 1 && num_mac_addr
>= ab
->num_radios
))
5707 for (i
= 0; i
< ab
->num_radios
; i
++) {
5708 pdev
= &ab
->pdevs
[i
];
5709 ether_addr_copy(pdev
->mac_addr
, addr_list
[i
].addr
);
5711 ab
->pdevs_macaddr_valid
= true;
5720 static int ath11k_ready_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5722 struct wmi_tlv_rdy_parse rdy_parse
= { };
5725 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
5726 ath11k_wmi_tlv_rdy_parse
, &rdy_parse
);
5728 ath11k_warn(ab
, "failed to parse tlv %d\n", ret
);
5732 complete(&ab
->wmi_ab
.unified_ready
);
5736 static void ath11k_peer_delete_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5738 struct wmi_peer_delete_resp_event peer_del_resp
;
5741 if (ath11k_pull_peer_del_resp_ev(ab
, skb
, &peer_del_resp
) != 0) {
5742 ath11k_warn(ab
, "failed to extract peer delete resp");
5747 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer_del_resp
.vdev_id
);
5749 ath11k_warn(ab
, "invalid vdev id in peer delete resp ev %d",
5750 peer_del_resp
.vdev_id
);
5755 complete(&ar
->peer_delete_done
);
5757 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "peer delete resp for vdev id %d addr %pM\n",
5758 peer_del_resp
.vdev_id
, peer_del_resp
.peer_macaddr
.addr
);
5761 static void ath11k_vdev_delete_resp_event(struct ath11k_base
*ab
,
5762 struct sk_buff
*skb
)
5767 if (ath11k_pull_vdev_del_resp_ev(ab
, skb
, &vdev_id
) != 0) {
5768 ath11k_warn(ab
, "failed to extract vdev delete resp");
5773 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, vdev_id
);
5775 ath11k_warn(ab
, "invalid vdev id in vdev delete resp ev %d",
5781 complete(&ar
->vdev_delete_done
);
5785 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev delete resp for vdev id %d\n",
5789 static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status
)
5791 switch (vdev_resp_status
) {
5792 case WMI_VDEV_START_RESPONSE_INVALID_VDEVID
:
5793 return "invalid vdev id";
5794 case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED
:
5795 return "not supported";
5796 case WMI_VDEV_START_RESPONSE_DFS_VIOLATION
:
5797 return "dfs violation";
5798 case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN
:
5799 return "invalid regdomain";
5805 static void ath11k_vdev_start_resp_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5807 struct wmi_vdev_start_resp_event vdev_start_resp
;
5811 if (ath11k_pull_vdev_start_resp_tlv(ab
, skb
, &vdev_start_resp
) != 0) {
5812 ath11k_warn(ab
, "failed to extract vdev start resp");
5817 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, vdev_start_resp
.vdev_id
);
5819 ath11k_warn(ab
, "invalid vdev id in vdev start resp ev %d",
5820 vdev_start_resp
.vdev_id
);
5825 ar
->last_wmi_vdev_start_status
= 0;
5827 status
= vdev_start_resp
.status
;
5829 if (WARN_ON_ONCE(status
)) {
5830 ath11k_warn(ab
, "vdev start resp error status %d (%s)\n",
5831 status
, ath11k_wmi_vdev_resp_print(status
));
5832 ar
->last_wmi_vdev_start_status
= status
;
5835 complete(&ar
->vdev_setup_done
);
5839 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev start resp for vdev id %d",
5840 vdev_start_resp
.vdev_id
);
5843 static void ath11k_bcn_tx_status_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5845 u32 vdev_id
, tx_status
;
5847 if (ath11k_pull_bcn_tx_status_ev(ab
, skb
->data
, skb
->len
,
5848 &vdev_id
, &tx_status
) != 0) {
5849 ath11k_warn(ab
, "failed to extract bcn tx status");
5854 static void ath11k_vdev_stopped_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5859 if (ath11k_pull_vdev_stopped_param_tlv(ab
, skb
, &vdev_id
) != 0) {
5860 ath11k_warn(ab
, "failed to extract vdev stopped event");
5865 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, vdev_id
);
5867 ath11k_warn(ab
, "invalid vdev id in vdev stopped ev %d",
5873 complete(&ar
->vdev_setup_done
);
5877 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "vdev stopped for vdev id %d", vdev_id
);
5880 static void ath11k_mgmt_rx_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5882 struct mgmt_rx_event_params rx_ev
= {0};
5884 struct ieee80211_rx_status
*status
= IEEE80211_SKB_RXCB(skb
);
5885 struct ieee80211_hdr
*hdr
;
5887 struct ieee80211_supported_band
*sband
;
5889 if (ath11k_pull_mgmt_rx_params_tlv(ab
, skb
, &rx_ev
) != 0) {
5890 ath11k_warn(ab
, "failed to extract mgmt rx event");
5895 memset(status
, 0, sizeof(*status
));
5897 ath11k_dbg(ab
, ATH11K_DBG_MGMT
, "mgmt rx event status %08x\n",
5901 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, rx_ev
.pdev_id
);
5904 ath11k_warn(ab
, "invalid pdev_id %d in mgmt_rx_event\n",
5910 if ((test_bit(ATH11K_CAC_RUNNING
, &ar
->dev_flags
)) ||
5911 (rx_ev
.status
& (WMI_RX_STATUS_ERR_DECRYPT
|
5912 WMI_RX_STATUS_ERR_KEY_CACHE_MISS
| WMI_RX_STATUS_ERR_CRC
))) {
5917 if (rx_ev
.status
& WMI_RX_STATUS_ERR_MIC
)
5918 status
->flag
|= RX_FLAG_MMIC_ERROR
;
5920 if (rx_ev
.chan_freq
>= ATH11K_MIN_6G_FREQ
) {
5921 status
->band
= NL80211_BAND_6GHZ
;
5922 } else if (rx_ev
.channel
>= 1 && rx_ev
.channel
<= 14) {
5923 status
->band
= NL80211_BAND_2GHZ
;
5924 } else if (rx_ev
.channel
>= 36 && rx_ev
.channel
<= ATH11K_MAX_5G_CHAN
) {
5925 status
->band
= NL80211_BAND_5GHZ
;
5927 /* Shouldn't happen unless list of advertised channels to
5928 * mac80211 has been changed.
5935 if (rx_ev
.phy_mode
== MODE_11B
&&
5936 (status
->band
== NL80211_BAND_5GHZ
|| status
->band
== NL80211_BAND_6GHZ
))
5937 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
5938 "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status
->band
);
5940 sband
= &ar
->mac
.sbands
[status
->band
];
5942 status
->freq
= ieee80211_channel_to_frequency(rx_ev
.channel
,
5944 status
->signal
= rx_ev
.snr
+ ATH11K_DEFAULT_NOISE_FLOOR
;
5945 status
->rate_idx
= ath11k_mac_bitrate_to_idx(sband
, rx_ev
.rate
/ 100);
5947 hdr
= (struct ieee80211_hdr
*)skb
->data
;
5948 fc
= le16_to_cpu(hdr
->frame_control
);
5950 /* Firmware is guaranteed to report all essential management frames via
5951 * WMI while it can deliver some extra via HTT. Since there can be
5952 * duplicates split the reporting wrt monitor/sniffing.
5954 status
->flag
|= RX_FLAG_SKIP_MONITOR
;
5956 /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
5957 * Don't clear that. Also, FW delivers broadcast management frames
5958 * (ex: group privacy action frames in mesh) as encrypted payload.
5960 if (ieee80211_has_protected(hdr
->frame_control
) &&
5961 !is_multicast_ether_addr(ieee80211_get_DA(hdr
))) {
5962 status
->flag
|= RX_FLAG_DECRYPTED
;
5964 if (!ieee80211_is_robust_mgmt_frame(skb
)) {
5965 status
->flag
|= RX_FLAG_IV_STRIPPED
|
5966 RX_FLAG_MMIC_STRIPPED
;
5967 hdr
->frame_control
= __cpu_to_le16(fc
&
5968 ~IEEE80211_FCTL_PROTECTED
);
5972 /* TODO: Pending handle beacon implementation
5973 *if (ieee80211_is_beacon(hdr->frame_control))
5974 * ath11k_mac_handle_beacon(ar, skb);
5977 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5978 "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
5980 fc
& IEEE80211_FCTL_FTYPE
, fc
& IEEE80211_FCTL_STYPE
);
5982 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
5983 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
5984 status
->freq
, status
->band
, status
->signal
,
5987 ieee80211_rx_ni(ar
->hw
, skb
);
5993 static void ath11k_mgmt_tx_compl_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
5995 struct wmi_mgmt_tx_compl_event tx_compl_param
= {0};
5998 if (ath11k_pull_mgmt_tx_compl_param_tlv(ab
, skb
, &tx_compl_param
) != 0) {
5999 ath11k_warn(ab
, "failed to extract mgmt tx compl event");
6004 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, tx_compl_param
.pdev_id
);
6006 ath11k_warn(ab
, "invalid pdev id %d in mgmt_tx_compl_event\n",
6007 tx_compl_param
.pdev_id
);
6011 wmi_process_mgmt_tx_comp(ar
, tx_compl_param
.desc_id
,
6012 tx_compl_param
.status
);
6014 ath11k_dbg(ab
, ATH11K_DBG_MGMT
,
6015 "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
6016 tx_compl_param
.pdev_id
, tx_compl_param
.desc_id
,
6017 tx_compl_param
.status
);
6023 static struct ath11k
*ath11k_get_ar_on_scan_abort(struct ath11k_base
*ab
,
6027 struct ath11k_pdev
*pdev
;
6030 for (i
= 0; i
< ab
->num_radios
; i
++) {
6031 pdev
= rcu_dereference(ab
->pdevs_active
[i
]);
6032 if (pdev
&& pdev
->ar
) {
6035 spin_lock_bh(&ar
->data_lock
);
6036 if (ar
->scan
.state
== ATH11K_SCAN_ABORTING
&&
6037 ar
->scan
.vdev_id
== vdev_id
) {
6038 spin_unlock_bh(&ar
->data_lock
);
6041 spin_unlock_bh(&ar
->data_lock
);
6047 static void ath11k_scan_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6050 struct wmi_scan_event scan_ev
= {0};
6052 if (ath11k_pull_scan_ev(ab
, skb
, &scan_ev
) != 0) {
6053 ath11k_warn(ab
, "failed to extract scan event");
6059 /* In case the scan was cancelled, ex. during interface teardown,
6060 * the interface will not be found in active interfaces.
6061 * Rather, in such scenarios, iterate over the active pdev's to
6062 * search 'ar' if the corresponding 'ar' scan is ABORTING and the
6063 * aborting scan's vdev id matches this event info.
6065 if (scan_ev
.event_type
== WMI_SCAN_EVENT_COMPLETED
&&
6066 scan_ev
.reason
== WMI_SCAN_REASON_CANCELLED
)
6067 ar
= ath11k_get_ar_on_scan_abort(ab
, scan_ev
.vdev_id
);
6069 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, scan_ev
.vdev_id
);
6072 ath11k_warn(ab
, "Received scan event for unknown vdev");
6077 spin_lock_bh(&ar
->data_lock
);
6079 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6080 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
6081 ath11k_wmi_event_scan_type_str(scan_ev
.event_type
, scan_ev
.reason
),
6082 scan_ev
.event_type
, scan_ev
.reason
, scan_ev
.channel_freq
,
6083 scan_ev
.scan_req_id
, scan_ev
.scan_id
, scan_ev
.vdev_id
,
6084 ath11k_scan_state_str(ar
->scan
.state
), ar
->scan
.state
);
6086 switch (scan_ev
.event_type
) {
6087 case WMI_SCAN_EVENT_STARTED
:
6088 ath11k_wmi_event_scan_started(ar
);
6090 case WMI_SCAN_EVENT_COMPLETED
:
6091 ath11k_wmi_event_scan_completed(ar
);
6093 case WMI_SCAN_EVENT_BSS_CHANNEL
:
6094 ath11k_wmi_event_scan_bss_chan(ar
);
6096 case WMI_SCAN_EVENT_FOREIGN_CHAN
:
6097 ath11k_wmi_event_scan_foreign_chan(ar
, scan_ev
.channel_freq
);
6099 case WMI_SCAN_EVENT_START_FAILED
:
6100 ath11k_warn(ab
, "received scan start failure event\n");
6101 ath11k_wmi_event_scan_start_failed(ar
);
6103 case WMI_SCAN_EVENT_DEQUEUED
:
6104 case WMI_SCAN_EVENT_PREEMPTED
:
6105 case WMI_SCAN_EVENT_RESTARTED
:
6106 case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT
:
6111 spin_unlock_bh(&ar
->data_lock
);
6116 static void ath11k_peer_sta_kickout_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6118 struct wmi_peer_sta_kickout_arg arg
= {};
6119 struct ieee80211_sta
*sta
;
6120 struct ath11k_peer
*peer
;
6123 if (ath11k_pull_peer_sta_kickout_ev(ab
, skb
, &arg
) != 0) {
6124 ath11k_warn(ab
, "failed to extract peer sta kickout event");
6130 spin_lock_bh(&ab
->base_lock
);
6132 peer
= ath11k_peer_find_by_addr(ab
, arg
.mac_addr
);
6135 ath11k_warn(ab
, "peer not found %pM\n",
6140 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer
->vdev_id
);
6142 ath11k_warn(ab
, "invalid vdev id in peer sta kickout ev %d",
6147 sta
= ieee80211_find_sta_by_ifaddr(ar
->hw
,
6148 arg
.mac_addr
, NULL
);
6150 ath11k_warn(ab
, "Spurious quick kickout for STA %pM\n",
6155 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "peer sta kickout event %pM",
6158 ieee80211_report_low_ack(sta
, 10);
6161 spin_unlock_bh(&ab
->base_lock
);
6165 static void ath11k_roam_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6167 struct wmi_roam_event roam_ev
= {};
6170 if (ath11k_pull_roam_ev(ab
, skb
, &roam_ev
) != 0) {
6171 ath11k_warn(ab
, "failed to extract roam event");
6175 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6176 "wmi roam event vdev %u reason 0x%08x rssi %d\n",
6177 roam_ev
.vdev_id
, roam_ev
.reason
, roam_ev
.rssi
);
6180 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, roam_ev
.vdev_id
);
6182 ath11k_warn(ab
, "invalid vdev id in roam ev %d",
6188 if (roam_ev
.reason
>= WMI_ROAM_REASON_MAX
)
6189 ath11k_warn(ab
, "ignoring unknown roam event reason %d on vdev %i\n",
6190 roam_ev
.reason
, roam_ev
.vdev_id
);
6192 switch (roam_ev
.reason
) {
6193 case WMI_ROAM_REASON_BEACON_MISS
:
6194 /* TODO: Pending beacon miss and connection_loss_work
6196 * ath11k_mac_handle_beacon_miss(ar, vdev_id);
6199 case WMI_ROAM_REASON_BETTER_AP
:
6200 case WMI_ROAM_REASON_LOW_RSSI
:
6201 case WMI_ROAM_REASON_SUITABLE_AP_FOUND
:
6202 case WMI_ROAM_REASON_HO_FAILED
:
6203 ath11k_warn(ab
, "ignoring not implemented roam event reason %d on vdev %i\n",
6204 roam_ev
.reason
, roam_ev
.vdev_id
);
6211 static void ath11k_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6213 struct wmi_chan_info_event ch_info_ev
= {0};
6215 struct survey_info
*survey
;
6217 /* HW channel counters frequency value in hertz */
6218 u32 cc_freq_hz
= ab
->cc_freq_hz
;
6220 if (ath11k_pull_chan_info_ev(ab
, skb
->data
, skb
->len
, &ch_info_ev
) != 0) {
6221 ath11k_warn(ab
, "failed to extract chan info event");
6225 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6226 "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
6227 ch_info_ev
.vdev_id
, ch_info_ev
.err_code
, ch_info_ev
.freq
,
6228 ch_info_ev
.cmd_flags
, ch_info_ev
.noise_floor
,
6229 ch_info_ev
.rx_clear_count
, ch_info_ev
.cycle_count
,
6230 ch_info_ev
.mac_clk_mhz
);
6232 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_END_RESP
) {
6233 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "chan info report completed\n");
6238 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, ch_info_ev
.vdev_id
);
6240 ath11k_warn(ab
, "invalid vdev id in chan info ev %d",
6241 ch_info_ev
.vdev_id
);
6245 spin_lock_bh(&ar
->data_lock
);
6247 switch (ar
->scan
.state
) {
6248 case ATH11K_SCAN_IDLE
:
6249 case ATH11K_SCAN_STARTING
:
6250 ath11k_warn(ab
, "received chan info event without a scan request, ignoring\n");
6252 case ATH11K_SCAN_RUNNING
:
6253 case ATH11K_SCAN_ABORTING
:
6257 idx
= freq_to_idx(ar
, ch_info_ev
.freq
);
6258 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
6259 ath11k_warn(ab
, "chan info: invalid frequency %d (idx %d out of bounds)\n",
6260 ch_info_ev
.freq
, idx
);
6264 /* If FW provides MAC clock frequency in Mhz, overriding the initialized
6265 * HW channel counters frequency value
6267 if (ch_info_ev
.mac_clk_mhz
)
6268 cc_freq_hz
= (ch_info_ev
.mac_clk_mhz
* 1000);
6270 if (ch_info_ev
.cmd_flags
== WMI_CHAN_INFO_START_RESP
) {
6271 survey
= &ar
->survey
[idx
];
6272 memset(survey
, 0, sizeof(*survey
));
6273 survey
->noise
= ch_info_ev
.noise_floor
;
6274 survey
->filled
= SURVEY_INFO_NOISE_DBM
| SURVEY_INFO_TIME
|
6275 SURVEY_INFO_TIME_BUSY
;
6276 survey
->time
= div_u64(ch_info_ev
.cycle_count
, cc_freq_hz
);
6277 survey
->time_busy
= div_u64(ch_info_ev
.rx_clear_count
, cc_freq_hz
);
6280 spin_unlock_bh(&ar
->data_lock
);
6285 ath11k_pdev_bss_chan_info_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6287 struct wmi_pdev_bss_chan_info_event bss_ch_info_ev
= {};
6288 struct survey_info
*survey
;
6290 u32 cc_freq_hz
= ab
->cc_freq_hz
;
6291 u64 busy
, total
, tx
, rx
, rx_bss
;
6294 if (ath11k_pull_pdev_bss_chan_info_ev(ab
, skb
, &bss_ch_info_ev
) != 0) {
6295 ath11k_warn(ab
, "failed to extract pdev bss chan info event");
6299 busy
= (u64
)(bss_ch_info_ev
.rx_clear_count_high
) << 32 |
6300 bss_ch_info_ev
.rx_clear_count_low
;
6302 total
= (u64
)(bss_ch_info_ev
.cycle_count_high
) << 32 |
6303 bss_ch_info_ev
.cycle_count_low
;
6305 tx
= (u64
)(bss_ch_info_ev
.tx_cycle_count_high
) << 32 |
6306 bss_ch_info_ev
.tx_cycle_count_low
;
6308 rx
= (u64
)(bss_ch_info_ev
.rx_cycle_count_high
) << 32 |
6309 bss_ch_info_ev
.rx_cycle_count_low
;
6311 rx_bss
= (u64
)(bss_ch_info_ev
.rx_bss_cycle_count_high
) << 32 |
6312 bss_ch_info_ev
.rx_bss_cycle_count_low
;
6314 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6315 "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
6316 bss_ch_info_ev
.pdev_id
, bss_ch_info_ev
.freq
,
6317 bss_ch_info_ev
.noise_floor
, busy
, total
,
6321 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, bss_ch_info_ev
.pdev_id
);
6324 ath11k_warn(ab
, "invalid pdev id %d in bss_chan_info event\n",
6325 bss_ch_info_ev
.pdev_id
);
6330 spin_lock_bh(&ar
->data_lock
);
6331 idx
= freq_to_idx(ar
, bss_ch_info_ev
.freq
);
6332 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
6333 ath11k_warn(ab
, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
6334 bss_ch_info_ev
.freq
, idx
);
6338 survey
= &ar
->survey
[idx
];
6340 survey
->noise
= bss_ch_info_ev
.noise_floor
;
6341 survey
->time
= div_u64(total
, cc_freq_hz
);
6342 survey
->time_busy
= div_u64(busy
, cc_freq_hz
);
6343 survey
->time_rx
= div_u64(rx_bss
, cc_freq_hz
);
6344 survey
->time_tx
= div_u64(tx
, cc_freq_hz
);
6345 survey
->filled
|= (SURVEY_INFO_NOISE_DBM
|
6347 SURVEY_INFO_TIME_BUSY
|
6348 SURVEY_INFO_TIME_RX
|
6349 SURVEY_INFO_TIME_TX
);
6351 spin_unlock_bh(&ar
->data_lock
);
6352 complete(&ar
->bss_survey_done
);
6357 static void ath11k_vdev_install_key_compl_event(struct ath11k_base
*ab
,
6358 struct sk_buff
*skb
)
6360 struct wmi_vdev_install_key_complete_arg install_key_compl
= {0};
6363 if (ath11k_pull_vdev_install_key_compl_ev(ab
, skb
, &install_key_compl
) != 0) {
6364 ath11k_warn(ab
, "failed to extract install key compl event");
6368 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6369 "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
6370 install_key_compl
.key_idx
, install_key_compl
.key_flags
,
6371 install_key_compl
.macaddr
, install_key_compl
.status
);
6374 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, install_key_compl
.vdev_id
);
6376 ath11k_warn(ab
, "invalid vdev id in install key compl ev %d",
6377 install_key_compl
.vdev_id
);
6382 ar
->install_key_status
= 0;
6384 if (install_key_compl
.status
!= WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS
) {
6385 ath11k_warn(ab
, "install key failed for %pM status %d\n",
6386 install_key_compl
.macaddr
, install_key_compl
.status
);
6387 ar
->install_key_status
= install_key_compl
.status
;
6390 complete(&ar
->install_key_done
);
6394 static void ath11k_service_available_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6397 const struct wmi_service_available_event
*ev
;
6401 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6404 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6408 ev
= tb
[WMI_TAG_SERVICE_AVAILABLE_EVENT
];
6410 ath11k_warn(ab
, "failed to fetch svc available ev");
6415 /* TODO: Use wmi_service_segment_offset information to get the service
6416 * especially when more services are advertised in multiple sevice
6419 for (i
= 0, j
= WMI_MAX_SERVICE
;
6420 i
< WMI_SERVICE_SEGMENT_BM_SIZE32
&& j
< WMI_MAX_EXT_SERVICE
;
6423 if (ev
->wmi_service_segment_bitmap
[i
] &
6424 BIT(j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
))
6425 set_bit(j
, ab
->wmi_ab
.svc_map
);
6426 } while (++j
% WMI_AVAIL_SERVICE_BITS_IN_SIZE32
);
6429 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6430 "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
6431 ev
->wmi_service_segment_bitmap
[0], ev
->wmi_service_segment_bitmap
[1],
6432 ev
->wmi_service_segment_bitmap
[2], ev
->wmi_service_segment_bitmap
[3]);
6437 static void ath11k_peer_assoc_conf_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6439 struct wmi_peer_assoc_conf_arg peer_assoc_conf
= {0};
6442 if (ath11k_pull_peer_assoc_conf_ev(ab
, skb
, &peer_assoc_conf
) != 0) {
6443 ath11k_warn(ab
, "failed to extract peer assoc conf event");
6447 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6448 "peer assoc conf ev vdev id %d macaddr %pM\n",
6449 peer_assoc_conf
.vdev_id
, peer_assoc_conf
.macaddr
);
6452 ar
= ath11k_mac_get_ar_by_vdev_id(ab
, peer_assoc_conf
.vdev_id
);
6455 ath11k_warn(ab
, "invalid vdev id in peer assoc conf ev %d",
6456 peer_assoc_conf
.vdev_id
);
6461 complete(&ar
->peer_assoc_done
);
6465 static void ath11k_update_stats_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6467 ath11k_debugfs_fw_stats_process(ab
, skb
);
6470 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
6471 * is not part of BDF CTL(Conformance test limits) table entries.
6473 static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base
*ab
,
6474 struct sk_buff
*skb
)
6477 const struct wmi_pdev_ctl_failsafe_chk_event
*ev
;
6480 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6483 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6487 ev
= tb
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT
];
6489 ath11k_warn(ab
, "failed to fetch pdev ctl failsafe check ev");
6494 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6495 "pdev ctl failsafe check ev status %d\n",
6496 ev
->ctl_failsafe_status
);
6498 /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
6499 * to 10 dBm else the CTL power entry in the BDF would be picked up.
6501 if (ev
->ctl_failsafe_status
!= 0)
6502 ath11k_warn(ab
, "pdev ctl failsafe failure status %d",
6503 ev
->ctl_failsafe_status
);
6509 ath11k_wmi_process_csa_switch_count_event(struct ath11k_base
*ab
,
6510 const struct wmi_pdev_csa_switch_ev
*ev
,
6511 const u32
*vdev_ids
)
6514 struct ath11k_vif
*arvif
;
6516 /* Finish CSA once the switch count becomes NULL */
6517 if (ev
->current_switch_count
)
6521 for (i
= 0; i
< ev
->num_vdevs
; i
++) {
6522 arvif
= ath11k_mac_get_arvif_by_vdev_id(ab
, vdev_ids
[i
]);
6525 ath11k_warn(ab
, "Recvd csa status for unknown vdev %d",
6530 if (arvif
->is_up
&& arvif
->vif
->csa_active
)
6531 ieee80211_csa_finish(arvif
->vif
);
6537 ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base
*ab
,
6538 struct sk_buff
*skb
)
6541 const struct wmi_pdev_csa_switch_ev
*ev
;
6542 const u32
*vdev_ids
;
6545 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6548 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6552 ev
= tb
[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT
];
6553 vdev_ids
= tb
[WMI_TAG_ARRAY_UINT32
];
6555 if (!ev
|| !vdev_ids
) {
6556 ath11k_warn(ab
, "failed to fetch pdev csa switch count ev");
6561 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6562 "pdev csa switch count %d for pdev %d, num_vdevs %d",
6563 ev
->current_switch_count
, ev
->pdev_id
,
6566 ath11k_wmi_process_csa_switch_count_event(ab
, ev
, vdev_ids
);
6572 ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6575 const struct wmi_pdev_radar_ev
*ev
;
6579 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6582 ath11k_warn(ab
, "failed to parse tlv: %d\n", ret
);
6586 ev
= tb
[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT
];
6589 ath11k_warn(ab
, "failed to fetch pdev dfs radar detected ev");
6594 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6595 "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
6596 ev
->pdev_id
, ev
->detection_mode
, ev
->chan_freq
, ev
->chan_width
,
6597 ev
->detector_id
, ev
->segment_id
, ev
->timestamp
, ev
->is_chirp
,
6598 ev
->freq_offset
, ev
->sidx
);
6600 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
->pdev_id
);
6603 ath11k_warn(ab
, "radar detected in invalid pdev %d\n",
6608 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "DFS Radar Detected in pdev %d\n",
6611 if (ar
->dfs_block_radar_events
)
6612 ath11k_info(ab
, "DFS Radar detected, but ignored as requested\n");
6614 ieee80211_radar_detected(ar
->hw
);
6621 ath11k_wmi_pdev_temperature_event(struct ath11k_base
*ab
,
6622 struct sk_buff
*skb
)
6625 struct wmi_pdev_temperature_event ev
= {0};
6627 if (ath11k_pull_pdev_temp_ev(ab
, skb
->data
, skb
->len
, &ev
) != 0) {
6628 ath11k_warn(ab
, "failed to extract pdev temperature event");
6632 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6633 "pdev temperature ev temp %d pdev_id %d\n", ev
.temp
, ev
.pdev_id
);
6635 ar
= ath11k_mac_get_ar_by_pdev_id(ab
, ev
.pdev_id
);
6637 ath11k_warn(ab
, "invalid pdev id in pdev temperature ev %d", ev
.pdev_id
);
6641 ath11k_thermal_event_temperature(ar
, ev
.temp
);
6644 static void ath11k_fils_discovery_event(struct ath11k_base
*ab
,
6645 struct sk_buff
*skb
)
6648 const struct wmi_fils_discovery_event
*ev
;
6651 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6655 "failed to parse FILS discovery event tlv %d\n",
6660 ev
= tb
[WMI_TAG_HOST_SWFDA_EVENT
];
6662 ath11k_warn(ab
, "failed to fetch FILS discovery event\n");
6668 "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
6669 ev
->vdev_id
, ev
->fils_tt
, ev
->tbtt
);
6674 static void ath11k_probe_resp_tx_status_event(struct ath11k_base
*ab
,
6675 struct sk_buff
*skb
)
6678 const struct wmi_probe_resp_tx_status_event
*ev
;
6681 tb
= ath11k_wmi_tlv_parse_alloc(ab
, skb
->data
, skb
->len
, GFP_ATOMIC
);
6685 "failed to parse probe response transmission status event tlv: %d\n",
6690 ev
= tb
[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT
];
6693 "failed to fetch probe response transmission status event");
6700 "Probe response transmission failed for vdev_id %u, status %u\n",
6701 ev
->vdev_id
, ev
->tx_status
);
6706 static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base
*ab
,
6708 const void *ptr
, void *data
)
6710 struct wmi_wow_ev_arg
*ev
= data
;
6711 const char *wow_pg_fault
;
6715 case WMI_TAG_WOW_EVENT_INFO
:
6716 memcpy(ev
, ptr
, sizeof(*ev
));
6717 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "wow wakeup host reason %d %s\n",
6718 ev
->wake_reason
, wow_reason(ev
->wake_reason
));
6721 case WMI_TAG_ARRAY_BYTE
:
6722 if (ev
&& ev
->wake_reason
== WOW_REASON_PAGE_FAULT
) {
6724 /* the first 4 bytes are length */
6725 wow_pg_len
= *(int *)wow_pg_fault
;
6726 wow_pg_fault
+= sizeof(int);
6727 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "wow data_len = %d\n",
6729 ath11k_dbg_dump(ab
, ATH11K_DBG_WMI
,
6730 "wow_event_info_type packet present",
6743 static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6745 struct wmi_wow_ev_arg ev
= { };
6748 ret
= ath11k_wmi_tlv_iter(ab
, skb
->data
, skb
->len
,
6749 ath11k_wmi_tlv_wow_wakeup_host_parse
,
6752 ath11k_warn(ab
, "failed to parse wmi wow tlv: %d\n", ret
);
6756 complete(&ab
->wow
.wakeup_completed
);
6759 static void ath11k_wmi_tlv_op_rx(struct ath11k_base
*ab
, struct sk_buff
*skb
)
6761 struct wmi_cmd_hdr
*cmd_hdr
;
6762 enum wmi_tlv_event_id id
;
6764 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
6765 id
= FIELD_GET(WMI_CMD_HDR_CMD_ID
, (cmd_hdr
->cmd_id
));
6767 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
6771 /* Process all the WMI events here */
6772 case WMI_SERVICE_READY_EVENTID
:
6773 ath11k_service_ready_event(ab
, skb
);
6775 case WMI_SERVICE_READY_EXT_EVENTID
:
6776 ath11k_service_ready_ext_event(ab
, skb
);
6778 case WMI_SERVICE_READY_EXT2_EVENTID
:
6779 ath11k_service_ready_ext2_event(ab
, skb
);
6781 case WMI_REG_CHAN_LIST_CC_EVENTID
:
6782 ath11k_reg_chan_list_event(ab
, skb
);
6784 case WMI_READY_EVENTID
:
6785 ath11k_ready_event(ab
, skb
);
6787 case WMI_PEER_DELETE_RESP_EVENTID
:
6788 ath11k_peer_delete_resp_event(ab
, skb
);
6790 case WMI_VDEV_START_RESP_EVENTID
:
6791 ath11k_vdev_start_resp_event(ab
, skb
);
6793 case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID
:
6794 ath11k_bcn_tx_status_event(ab
, skb
);
6796 case WMI_VDEV_STOPPED_EVENTID
:
6797 ath11k_vdev_stopped_event(ab
, skb
);
6799 case WMI_MGMT_RX_EVENTID
:
6800 ath11k_mgmt_rx_event(ab
, skb
);
6801 /* mgmt_rx_event() owns the skb now! */
6803 case WMI_MGMT_TX_COMPLETION_EVENTID
:
6804 ath11k_mgmt_tx_compl_event(ab
, skb
);
6806 case WMI_SCAN_EVENTID
:
6807 ath11k_scan_event(ab
, skb
);
6809 case WMI_PEER_STA_KICKOUT_EVENTID
:
6810 ath11k_peer_sta_kickout_event(ab
, skb
);
6812 case WMI_ROAM_EVENTID
:
6813 ath11k_roam_event(ab
, skb
);
6815 case WMI_CHAN_INFO_EVENTID
:
6816 ath11k_chan_info_event(ab
, skb
);
6818 case WMI_PDEV_BSS_CHAN_INFO_EVENTID
:
6819 ath11k_pdev_bss_chan_info_event(ab
, skb
);
6821 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID
:
6822 ath11k_vdev_install_key_compl_event(ab
, skb
);
6824 case WMI_SERVICE_AVAILABLE_EVENTID
:
6825 ath11k_service_available_event(ab
, skb
);
6827 case WMI_PEER_ASSOC_CONF_EVENTID
:
6828 ath11k_peer_assoc_conf_event(ab
, skb
);
6830 case WMI_UPDATE_STATS_EVENTID
:
6831 ath11k_update_stats_event(ab
, skb
);
6833 case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID
:
6834 ath11k_pdev_ctl_failsafe_check_event(ab
, skb
);
6836 case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID
:
6837 ath11k_wmi_pdev_csa_switch_count_status_event(ab
, skb
);
6839 case WMI_PDEV_TEMPERATURE_EVENTID
:
6840 ath11k_wmi_pdev_temperature_event(ab
, skb
);
6842 case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID
:
6843 ath11k_wmi_pdev_dma_ring_buf_release_event(ab
, skb
);
6845 case WMI_HOST_FILS_DISCOVERY_EVENTID
:
6846 ath11k_fils_discovery_event(ab
, skb
);
6848 case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID
:
6849 ath11k_probe_resp_tx_status_event(ab
, skb
);
6851 /* add Unsupported events here */
6852 case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID
:
6853 case WMI_PEER_OPER_MODE_CHANGE_EVENTID
:
6854 case WMI_TWT_ENABLE_EVENTID
:
6855 case WMI_TWT_DISABLE_EVENTID
:
6856 case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID
:
6857 ath11k_dbg(ab
, ATH11K_DBG_WMI
,
6858 "ignoring unsupported event 0x%x\n", id
);
6860 case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID
:
6861 ath11k_wmi_pdev_dfs_radar_detected_event(ab
, skb
);
6863 case WMI_VDEV_DELETE_RESP_EVENTID
:
6864 ath11k_vdev_delete_resp_event(ab
, skb
);
6866 case WMI_WOW_WAKEUP_HOST_EVENTID
:
6867 ath11k_wmi_event_wow_wakeup_host(ab
, skb
);
6869 /* TODO: Add remaining events */
6871 ath11k_dbg(ab
, ATH11K_DBG_WMI
, "Unknown eventid: 0x%x\n", id
);
6879 static int ath11k_connect_pdev_htc_service(struct ath11k_base
*ab
,
6883 u32 svc_id
[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL
,
6884 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1
,
6885 ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2
};
6887 struct ath11k_htc_svc_conn_req conn_req
;
6888 struct ath11k_htc_svc_conn_resp conn_resp
;
6890 memset(&conn_req
, 0, sizeof(conn_req
));
6891 memset(&conn_resp
, 0, sizeof(conn_resp
));
6893 /* these fields are the same for all service endpoints */
6894 conn_req
.ep_ops
.ep_tx_complete
= ath11k_wmi_htc_tx_complete
;
6895 conn_req
.ep_ops
.ep_rx_complete
= ath11k_wmi_tlv_op_rx
;
6896 conn_req
.ep_ops
.ep_tx_credits
= ath11k_wmi_op_ep_tx_credits
;
6898 /* connect to control service */
6899 conn_req
.service_id
= svc_id
[pdev_idx
];
6901 status
= ath11k_htc_connect_service(&ab
->htc
, &conn_req
, &conn_resp
);
6903 ath11k_warn(ab
, "failed to connect to WMI CONTROL service status: %d\n",
6908 ab
->wmi_ab
.wmi_endpoint_id
[pdev_idx
] = conn_resp
.eid
;
6909 ab
->wmi_ab
.wmi
[pdev_idx
].eid
= conn_resp
.eid
;
6910 ab
->wmi_ab
.max_msg_len
[pdev_idx
] = conn_resp
.max_msg_len
;
6916 ath11k_wmi_send_unit_test_cmd(struct ath11k
*ar
,
6917 struct wmi_unit_test_cmd ut_cmd
,
6920 struct ath11k_pdev_wmi
*wmi
= ar
->wmi
;
6921 struct wmi_unit_test_cmd
*cmd
;
6922 struct sk_buff
*skb
;
6923 struct wmi_tlv
*tlv
;
6926 int buf_len
, arg_len
;
6930 arg_len
= sizeof(u32
) * ut_cmd
.num_args
;
6931 buf_len
= sizeof(ut_cmd
) + arg_len
+ TLV_HDR_SIZE
;
6933 skb
= ath11k_wmi_alloc_skb(wmi
->wmi_ab
, buf_len
);
6937 cmd
= (struct wmi_unit_test_cmd
*)skb
->data
;
6938 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_UNIT_TEST_CMD
) |
6939 FIELD_PREP(WMI_TLV_LEN
, sizeof(ut_cmd
) - TLV_HDR_SIZE
);
6941 cmd
->vdev_id
= ut_cmd
.vdev_id
;
6942 cmd
->module_id
= ut_cmd
.module_id
;
6943 cmd
->num_args
= ut_cmd
.num_args
;
6944 cmd
->diag_token
= ut_cmd
.diag_token
;
6946 ptr
= skb
->data
+ sizeof(ut_cmd
);
6949 tlv
->header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_ARRAY_UINT32
) |
6950 FIELD_PREP(WMI_TLV_LEN
, arg_len
);
6952 ptr
+= TLV_HDR_SIZE
;
6955 for (i
= 0; i
< ut_cmd
.num_args
; i
++)
6956 ut_cmd_args
[i
] = test_args
[i
];
6958 ret
= ath11k_wmi_cmd_send(wmi
, skb
, WMI_UNIT_TEST_CMDID
);
6961 ath11k_warn(ar
->ab
, "failed to send WMI_UNIT_TEST CMD :%d\n",
6966 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
,
6967 "WMI unit test : module %d vdev %d n_args %d token %d\n",
6968 cmd
->module_id
, cmd
->vdev_id
, cmd
->num_args
,
6974 int ath11k_wmi_simulate_radar(struct ath11k
*ar
)
6976 struct ath11k_vif
*arvif
;
6977 u32 dfs_args
[DFS_MAX_TEST_ARGS
];
6978 struct wmi_unit_test_cmd wmi_ut
;
6979 bool arvif_found
= false;
6981 list_for_each_entry(arvif
, &ar
->arvifs
, list
) {
6982 if (arvif
->is_started
&& arvif
->vdev_type
== WMI_VDEV_TYPE_AP
) {
6991 dfs_args
[DFS_TEST_CMDID
] = 0;
6992 dfs_args
[DFS_TEST_PDEV_ID
] = ar
->pdev
->pdev_id
;
6993 /* Currently we could pass segment_id(b0 - b1), chirp(b2)
6994 * freq offset (b3 - b10) to unit test. For simulation
6995 * purpose this can be set to 0 which is valid.
6997 dfs_args
[DFS_TEST_RADAR_PARAM
] = 0;
6999 wmi_ut
.vdev_id
= arvif
->vdev_id
;
7000 wmi_ut
.module_id
= DFS_UNIT_TEST_MODULE
;
7001 wmi_ut
.num_args
= DFS_MAX_TEST_ARGS
;
7002 wmi_ut
.diag_token
= DFS_UNIT_TEST_TOKEN
;
7004 ath11k_dbg(ar
->ab
, ATH11K_DBG_REG
, "Triggering Radar Simulation\n");
7006 return ath11k_wmi_send_unit_test_cmd(ar
, wmi_ut
, dfs_args
);
7009 int ath11k_wmi_connect(struct ath11k_base
*ab
)
7014 wmi_ep_count
= ab
->htc
.wmi_ep_count
;
7015 if (wmi_ep_count
> ab
->hw_params
.max_radios
)
7018 for (i
= 0; i
< wmi_ep_count
; i
++)
7019 ath11k_connect_pdev_htc_service(ab
, i
);
7024 static void ath11k_wmi_pdev_detach(struct ath11k_base
*ab
, u8 pdev_id
)
7026 if (WARN_ON(pdev_id
>= MAX_RADIOS
))
7029 /* TODO: Deinit any pdev specific wmi resource */
7032 int ath11k_wmi_pdev_attach(struct ath11k_base
*ab
,
7035 struct ath11k_pdev_wmi
*wmi_handle
;
7037 if (pdev_id
>= ab
->hw_params
.max_radios
)
7040 wmi_handle
= &ab
->wmi_ab
.wmi
[pdev_id
];
7042 wmi_handle
->wmi_ab
= &ab
->wmi_ab
;
7045 /* TODO: Init remaining resource specific to pdev */
7050 int ath11k_wmi_attach(struct ath11k_base
*ab
)
7054 ret
= ath11k_wmi_pdev_attach(ab
, 0);
7059 ab
->wmi_ab
.preferred_hw_mode
= WMI_HOST_HW_MODE_MAX
;
7061 /* It's overwritten when service_ext_ready is handled */
7062 if (ab
->hw_params
.single_pdev_only
)
7063 ab
->wmi_ab
.preferred_hw_mode
= WMI_HOST_HW_MODE_SINGLE
;
7065 /* TODO: Init remaining wmi soc resources required */
7066 init_completion(&ab
->wmi_ab
.service_ready
);
7067 init_completion(&ab
->wmi_ab
.unified_ready
);
7072 void ath11k_wmi_detach(struct ath11k_base
*ab
)
7076 /* TODO: Deinit wmi resource specific to SOC as required */
7078 for (i
= 0; i
< ab
->htc
.wmi_ep_count
; i
++)
7079 ath11k_wmi_pdev_detach(ab
, i
);
7081 ath11k_wmi_free_dbring_caps(ab
);
7084 int ath11k_wmi_wow_host_wakeup_ind(struct ath11k
*ar
)
7086 struct wmi_wow_host_wakeup_ind
*cmd
;
7087 struct sk_buff
*skb
;
7091 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, len
);
7095 cmd
= (struct wmi_wow_host_wakeup_ind
*)skb
->data
;
7096 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
,
7097 WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD
) |
7098 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
7100 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "wmi tlv wow host wakeup ind\n");
7102 return ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID
);
7105 int ath11k_wmi_wow_enable(struct ath11k
*ar
)
7107 struct wmi_wow_enable_cmd
*cmd
;
7108 struct sk_buff
*skb
;
7112 skb
= ath11k_wmi_alloc_skb(ar
->wmi
->wmi_ab
, len
);
7116 cmd
= (struct wmi_wow_enable_cmd
*)skb
->data
;
7117 cmd
->tlv_header
= FIELD_PREP(WMI_TLV_TAG
, WMI_TAG_WOW_ENABLE_CMD
) |
7118 FIELD_PREP(WMI_TLV_LEN
, sizeof(*cmd
) - TLV_HDR_SIZE
);
7121 cmd
->pause_iface_config
= WOW_IFACE_PAUSE_ENABLED
;
7122 ath11k_dbg(ar
->ab
, ATH11K_DBG_WMI
, "wmi tlv wow enable\n");
7124 return ath11k_wmi_cmd_send(ar
->wmi
, skb
, WMI_WOW_ENABLE_CMDID
);