2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/skbuff.h>
19 #include <linux/ctype.h>
28 /* MAIN WMI cmd track */
29 static struct wmi_cmd_map wmi_cmd_map
= {
30 .init_cmdid
= WMI_INIT_CMDID
,
31 .start_scan_cmdid
= WMI_START_SCAN_CMDID
,
32 .stop_scan_cmdid
= WMI_STOP_SCAN_CMDID
,
33 .scan_chan_list_cmdid
= WMI_SCAN_CHAN_LIST_CMDID
,
34 .scan_sch_prio_tbl_cmdid
= WMI_SCAN_SCH_PRIO_TBL_CMDID
,
35 .pdev_set_regdomain_cmdid
= WMI_PDEV_SET_REGDOMAIN_CMDID
,
36 .pdev_set_channel_cmdid
= WMI_PDEV_SET_CHANNEL_CMDID
,
37 .pdev_set_param_cmdid
= WMI_PDEV_SET_PARAM_CMDID
,
38 .pdev_pktlog_enable_cmdid
= WMI_PDEV_PKTLOG_ENABLE_CMDID
,
39 .pdev_pktlog_disable_cmdid
= WMI_PDEV_PKTLOG_DISABLE_CMDID
,
40 .pdev_set_wmm_params_cmdid
= WMI_PDEV_SET_WMM_PARAMS_CMDID
,
41 .pdev_set_ht_cap_ie_cmdid
= WMI_PDEV_SET_HT_CAP_IE_CMDID
,
42 .pdev_set_vht_cap_ie_cmdid
= WMI_PDEV_SET_VHT_CAP_IE_CMDID
,
43 .pdev_set_dscp_tid_map_cmdid
= WMI_PDEV_SET_DSCP_TID_MAP_CMDID
,
44 .pdev_set_quiet_mode_cmdid
= WMI_PDEV_SET_QUIET_MODE_CMDID
,
45 .pdev_green_ap_ps_enable_cmdid
= WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID
,
46 .pdev_get_tpc_config_cmdid
= WMI_PDEV_GET_TPC_CONFIG_CMDID
,
47 .pdev_set_base_macaddr_cmdid
= WMI_PDEV_SET_BASE_MACADDR_CMDID
,
48 .vdev_create_cmdid
= WMI_VDEV_CREATE_CMDID
,
49 .vdev_delete_cmdid
= WMI_VDEV_DELETE_CMDID
,
50 .vdev_start_request_cmdid
= WMI_VDEV_START_REQUEST_CMDID
,
51 .vdev_restart_request_cmdid
= WMI_VDEV_RESTART_REQUEST_CMDID
,
52 .vdev_up_cmdid
= WMI_VDEV_UP_CMDID
,
53 .vdev_stop_cmdid
= WMI_VDEV_STOP_CMDID
,
54 .vdev_down_cmdid
= WMI_VDEV_DOWN_CMDID
,
55 .vdev_set_param_cmdid
= WMI_VDEV_SET_PARAM_CMDID
,
56 .vdev_install_key_cmdid
= WMI_VDEV_INSTALL_KEY_CMDID
,
57 .peer_create_cmdid
= WMI_PEER_CREATE_CMDID
,
58 .peer_delete_cmdid
= WMI_PEER_DELETE_CMDID
,
59 .peer_flush_tids_cmdid
= WMI_PEER_FLUSH_TIDS_CMDID
,
60 .peer_set_param_cmdid
= WMI_PEER_SET_PARAM_CMDID
,
61 .peer_assoc_cmdid
= WMI_PEER_ASSOC_CMDID
,
62 .peer_add_wds_entry_cmdid
= WMI_PEER_ADD_WDS_ENTRY_CMDID
,
63 .peer_remove_wds_entry_cmdid
= WMI_PEER_REMOVE_WDS_ENTRY_CMDID
,
64 .peer_mcast_group_cmdid
= WMI_PEER_MCAST_GROUP_CMDID
,
65 .bcn_tx_cmdid
= WMI_BCN_TX_CMDID
,
66 .pdev_send_bcn_cmdid
= WMI_PDEV_SEND_BCN_CMDID
,
67 .bcn_tmpl_cmdid
= WMI_BCN_TMPL_CMDID
,
68 .bcn_filter_rx_cmdid
= WMI_BCN_FILTER_RX_CMDID
,
69 .prb_req_filter_rx_cmdid
= WMI_PRB_REQ_FILTER_RX_CMDID
,
70 .mgmt_tx_cmdid
= WMI_MGMT_TX_CMDID
,
71 .prb_tmpl_cmdid
= WMI_PRB_TMPL_CMDID
,
72 .addba_clear_resp_cmdid
= WMI_ADDBA_CLEAR_RESP_CMDID
,
73 .addba_send_cmdid
= WMI_ADDBA_SEND_CMDID
,
74 .addba_status_cmdid
= WMI_ADDBA_STATUS_CMDID
,
75 .delba_send_cmdid
= WMI_DELBA_SEND_CMDID
,
76 .addba_set_resp_cmdid
= WMI_ADDBA_SET_RESP_CMDID
,
77 .send_singleamsdu_cmdid
= WMI_SEND_SINGLEAMSDU_CMDID
,
78 .sta_powersave_mode_cmdid
= WMI_STA_POWERSAVE_MODE_CMDID
,
79 .sta_powersave_param_cmdid
= WMI_STA_POWERSAVE_PARAM_CMDID
,
80 .sta_mimo_ps_mode_cmdid
= WMI_STA_MIMO_PS_MODE_CMDID
,
81 .pdev_dfs_enable_cmdid
= WMI_PDEV_DFS_ENABLE_CMDID
,
82 .pdev_dfs_disable_cmdid
= WMI_PDEV_DFS_DISABLE_CMDID
,
83 .roam_scan_mode
= WMI_ROAM_SCAN_MODE
,
84 .roam_scan_rssi_threshold
= WMI_ROAM_SCAN_RSSI_THRESHOLD
,
85 .roam_scan_period
= WMI_ROAM_SCAN_PERIOD
,
86 .roam_scan_rssi_change_threshold
= WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD
,
87 .roam_ap_profile
= WMI_ROAM_AP_PROFILE
,
88 .ofl_scan_add_ap_profile
= WMI_ROAM_AP_PROFILE
,
89 .ofl_scan_remove_ap_profile
= WMI_OFL_SCAN_REMOVE_AP_PROFILE
,
90 .ofl_scan_period
= WMI_OFL_SCAN_PERIOD
,
91 .p2p_dev_set_device_info
= WMI_P2P_DEV_SET_DEVICE_INFO
,
92 .p2p_dev_set_discoverability
= WMI_P2P_DEV_SET_DISCOVERABILITY
,
93 .p2p_go_set_beacon_ie
= WMI_P2P_GO_SET_BEACON_IE
,
94 .p2p_go_set_probe_resp_ie
= WMI_P2P_GO_SET_PROBE_RESP_IE
,
95 .p2p_set_vendor_ie_data_cmdid
= WMI_P2P_SET_VENDOR_IE_DATA_CMDID
,
96 .ap_ps_peer_param_cmdid
= WMI_AP_PS_PEER_PARAM_CMDID
,
97 .ap_ps_peer_uapsd_coex_cmdid
= WMI_AP_PS_PEER_UAPSD_COEX_CMDID
,
98 .peer_rate_retry_sched_cmdid
= WMI_PEER_RATE_RETRY_SCHED_CMDID
,
99 .wlan_profile_trigger_cmdid
= WMI_WLAN_PROFILE_TRIGGER_CMDID
,
100 .wlan_profile_set_hist_intvl_cmdid
=
101 WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID
,
102 .wlan_profile_get_profile_data_cmdid
=
103 WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID
,
104 .wlan_profile_enable_profile_id_cmdid
=
105 WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID
,
106 .wlan_profile_list_profile_id_cmdid
=
107 WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID
,
108 .pdev_suspend_cmdid
= WMI_PDEV_SUSPEND_CMDID
,
109 .pdev_resume_cmdid
= WMI_PDEV_RESUME_CMDID
,
110 .add_bcn_filter_cmdid
= WMI_ADD_BCN_FILTER_CMDID
,
111 .rmv_bcn_filter_cmdid
= WMI_RMV_BCN_FILTER_CMDID
,
112 .wow_add_wake_pattern_cmdid
= WMI_WOW_ADD_WAKE_PATTERN_CMDID
,
113 .wow_del_wake_pattern_cmdid
= WMI_WOW_DEL_WAKE_PATTERN_CMDID
,
114 .wow_enable_disable_wake_event_cmdid
=
115 WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID
,
116 .wow_enable_cmdid
= WMI_WOW_ENABLE_CMDID
,
117 .wow_hostwakeup_from_sleep_cmdid
= WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID
,
118 .rtt_measreq_cmdid
= WMI_RTT_MEASREQ_CMDID
,
119 .rtt_tsf_cmdid
= WMI_RTT_TSF_CMDID
,
120 .vdev_spectral_scan_configure_cmdid
=
121 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID
,
122 .vdev_spectral_scan_enable_cmdid
= WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID
,
123 .request_stats_cmdid
= WMI_REQUEST_STATS_CMDID
,
124 .set_arp_ns_offload_cmdid
= WMI_SET_ARP_NS_OFFLOAD_CMDID
,
125 .network_list_offload_config_cmdid
=
126 WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID
,
127 .gtk_offload_cmdid
= WMI_GTK_OFFLOAD_CMDID
,
128 .csa_offload_enable_cmdid
= WMI_CSA_OFFLOAD_ENABLE_CMDID
,
129 .csa_offload_chanswitch_cmdid
= WMI_CSA_OFFLOAD_CHANSWITCH_CMDID
,
130 .chatter_set_mode_cmdid
= WMI_CHATTER_SET_MODE_CMDID
,
131 .peer_tid_addba_cmdid
= WMI_PEER_TID_ADDBA_CMDID
,
132 .peer_tid_delba_cmdid
= WMI_PEER_TID_DELBA_CMDID
,
133 .sta_dtim_ps_method_cmdid
= WMI_STA_DTIM_PS_METHOD_CMDID
,
134 .sta_uapsd_auto_trig_cmdid
= WMI_STA_UAPSD_AUTO_TRIG_CMDID
,
135 .sta_keepalive_cmd
= WMI_STA_KEEPALIVE_CMD
,
136 .echo_cmdid
= WMI_ECHO_CMDID
,
137 .pdev_utf_cmdid
= WMI_PDEV_UTF_CMDID
,
138 .dbglog_cfg_cmdid
= WMI_DBGLOG_CFG_CMDID
,
139 .pdev_qvit_cmdid
= WMI_PDEV_QVIT_CMDID
,
140 .pdev_ftm_intg_cmdid
= WMI_PDEV_FTM_INTG_CMDID
,
141 .vdev_set_keepalive_cmdid
= WMI_VDEV_SET_KEEPALIVE_CMDID
,
142 .vdev_get_keepalive_cmdid
= WMI_VDEV_GET_KEEPALIVE_CMDID
,
143 .force_fw_hang_cmdid
= WMI_FORCE_FW_HANG_CMDID
,
144 .gpio_config_cmdid
= WMI_GPIO_CONFIG_CMDID
,
145 .gpio_output_cmdid
= WMI_GPIO_OUTPUT_CMDID
,
148 /* 10.X WMI cmd track */
149 static struct wmi_cmd_map wmi_10x_cmd_map
= {
150 .init_cmdid
= WMI_10X_INIT_CMDID
,
151 .start_scan_cmdid
= WMI_10X_START_SCAN_CMDID
,
152 .stop_scan_cmdid
= WMI_10X_STOP_SCAN_CMDID
,
153 .scan_chan_list_cmdid
= WMI_10X_SCAN_CHAN_LIST_CMDID
,
154 .scan_sch_prio_tbl_cmdid
= WMI_CMD_UNSUPPORTED
,
155 .pdev_set_regdomain_cmdid
= WMI_10X_PDEV_SET_REGDOMAIN_CMDID
,
156 .pdev_set_channel_cmdid
= WMI_10X_PDEV_SET_CHANNEL_CMDID
,
157 .pdev_set_param_cmdid
= WMI_10X_PDEV_SET_PARAM_CMDID
,
158 .pdev_pktlog_enable_cmdid
= WMI_10X_PDEV_PKTLOG_ENABLE_CMDID
,
159 .pdev_pktlog_disable_cmdid
= WMI_10X_PDEV_PKTLOG_DISABLE_CMDID
,
160 .pdev_set_wmm_params_cmdid
= WMI_10X_PDEV_SET_WMM_PARAMS_CMDID
,
161 .pdev_set_ht_cap_ie_cmdid
= WMI_10X_PDEV_SET_HT_CAP_IE_CMDID
,
162 .pdev_set_vht_cap_ie_cmdid
= WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID
,
163 .pdev_set_dscp_tid_map_cmdid
= WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID
,
164 .pdev_set_quiet_mode_cmdid
= WMI_10X_PDEV_SET_QUIET_MODE_CMDID
,
165 .pdev_green_ap_ps_enable_cmdid
= WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID
,
166 .pdev_get_tpc_config_cmdid
= WMI_10X_PDEV_GET_TPC_CONFIG_CMDID
,
167 .pdev_set_base_macaddr_cmdid
= WMI_10X_PDEV_SET_BASE_MACADDR_CMDID
,
168 .vdev_create_cmdid
= WMI_10X_VDEV_CREATE_CMDID
,
169 .vdev_delete_cmdid
= WMI_10X_VDEV_DELETE_CMDID
,
170 .vdev_start_request_cmdid
= WMI_10X_VDEV_START_REQUEST_CMDID
,
171 .vdev_restart_request_cmdid
= WMI_10X_VDEV_RESTART_REQUEST_CMDID
,
172 .vdev_up_cmdid
= WMI_10X_VDEV_UP_CMDID
,
173 .vdev_stop_cmdid
= WMI_10X_VDEV_STOP_CMDID
,
174 .vdev_down_cmdid
= WMI_10X_VDEV_DOWN_CMDID
,
175 .vdev_set_param_cmdid
= WMI_10X_VDEV_SET_PARAM_CMDID
,
176 .vdev_install_key_cmdid
= WMI_10X_VDEV_INSTALL_KEY_CMDID
,
177 .peer_create_cmdid
= WMI_10X_PEER_CREATE_CMDID
,
178 .peer_delete_cmdid
= WMI_10X_PEER_DELETE_CMDID
,
179 .peer_flush_tids_cmdid
= WMI_10X_PEER_FLUSH_TIDS_CMDID
,
180 .peer_set_param_cmdid
= WMI_10X_PEER_SET_PARAM_CMDID
,
181 .peer_assoc_cmdid
= WMI_10X_PEER_ASSOC_CMDID
,
182 .peer_add_wds_entry_cmdid
= WMI_10X_PEER_ADD_WDS_ENTRY_CMDID
,
183 .peer_remove_wds_entry_cmdid
= WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID
,
184 .peer_mcast_group_cmdid
= WMI_10X_PEER_MCAST_GROUP_CMDID
,
185 .bcn_tx_cmdid
= WMI_10X_BCN_TX_CMDID
,
186 .pdev_send_bcn_cmdid
= WMI_10X_PDEV_SEND_BCN_CMDID
,
187 .bcn_tmpl_cmdid
= WMI_CMD_UNSUPPORTED
,
188 .bcn_filter_rx_cmdid
= WMI_10X_BCN_FILTER_RX_CMDID
,
189 .prb_req_filter_rx_cmdid
= WMI_10X_PRB_REQ_FILTER_RX_CMDID
,
190 .mgmt_tx_cmdid
= WMI_10X_MGMT_TX_CMDID
,
191 .prb_tmpl_cmdid
= WMI_CMD_UNSUPPORTED
,
192 .addba_clear_resp_cmdid
= WMI_10X_ADDBA_CLEAR_RESP_CMDID
,
193 .addba_send_cmdid
= WMI_10X_ADDBA_SEND_CMDID
,
194 .addba_status_cmdid
= WMI_10X_ADDBA_STATUS_CMDID
,
195 .delba_send_cmdid
= WMI_10X_DELBA_SEND_CMDID
,
196 .addba_set_resp_cmdid
= WMI_10X_ADDBA_SET_RESP_CMDID
,
197 .send_singleamsdu_cmdid
= WMI_10X_SEND_SINGLEAMSDU_CMDID
,
198 .sta_powersave_mode_cmdid
= WMI_10X_STA_POWERSAVE_MODE_CMDID
,
199 .sta_powersave_param_cmdid
= WMI_10X_STA_POWERSAVE_PARAM_CMDID
,
200 .sta_mimo_ps_mode_cmdid
= WMI_10X_STA_MIMO_PS_MODE_CMDID
,
201 .pdev_dfs_enable_cmdid
= WMI_10X_PDEV_DFS_ENABLE_CMDID
,
202 .pdev_dfs_disable_cmdid
= WMI_10X_PDEV_DFS_DISABLE_CMDID
,
203 .roam_scan_mode
= WMI_10X_ROAM_SCAN_MODE
,
204 .roam_scan_rssi_threshold
= WMI_10X_ROAM_SCAN_RSSI_THRESHOLD
,
205 .roam_scan_period
= WMI_10X_ROAM_SCAN_PERIOD
,
206 .roam_scan_rssi_change_threshold
=
207 WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD
,
208 .roam_ap_profile
= WMI_10X_ROAM_AP_PROFILE
,
209 .ofl_scan_add_ap_profile
= WMI_10X_OFL_SCAN_ADD_AP_PROFILE
,
210 .ofl_scan_remove_ap_profile
= WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE
,
211 .ofl_scan_period
= WMI_10X_OFL_SCAN_PERIOD
,
212 .p2p_dev_set_device_info
= WMI_10X_P2P_DEV_SET_DEVICE_INFO
,
213 .p2p_dev_set_discoverability
= WMI_10X_P2P_DEV_SET_DISCOVERABILITY
,
214 .p2p_go_set_beacon_ie
= WMI_10X_P2P_GO_SET_BEACON_IE
,
215 .p2p_go_set_probe_resp_ie
= WMI_10X_P2P_GO_SET_PROBE_RESP_IE
,
216 .p2p_set_vendor_ie_data_cmdid
= WMI_CMD_UNSUPPORTED
,
217 .ap_ps_peer_param_cmdid
= WMI_10X_AP_PS_PEER_PARAM_CMDID
,
218 .ap_ps_peer_uapsd_coex_cmdid
= WMI_CMD_UNSUPPORTED
,
219 .peer_rate_retry_sched_cmdid
= WMI_10X_PEER_RATE_RETRY_SCHED_CMDID
,
220 .wlan_profile_trigger_cmdid
= WMI_10X_WLAN_PROFILE_TRIGGER_CMDID
,
221 .wlan_profile_set_hist_intvl_cmdid
=
222 WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID
,
223 .wlan_profile_get_profile_data_cmdid
=
224 WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID
,
225 .wlan_profile_enable_profile_id_cmdid
=
226 WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID
,
227 .wlan_profile_list_profile_id_cmdid
=
228 WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID
,
229 .pdev_suspend_cmdid
= WMI_10X_PDEV_SUSPEND_CMDID
,
230 .pdev_resume_cmdid
= WMI_10X_PDEV_RESUME_CMDID
,
231 .add_bcn_filter_cmdid
= WMI_10X_ADD_BCN_FILTER_CMDID
,
232 .rmv_bcn_filter_cmdid
= WMI_10X_RMV_BCN_FILTER_CMDID
,
233 .wow_add_wake_pattern_cmdid
= WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID
,
234 .wow_del_wake_pattern_cmdid
= WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID
,
235 .wow_enable_disable_wake_event_cmdid
=
236 WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID
,
237 .wow_enable_cmdid
= WMI_10X_WOW_ENABLE_CMDID
,
238 .wow_hostwakeup_from_sleep_cmdid
=
239 WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID
,
240 .rtt_measreq_cmdid
= WMI_10X_RTT_MEASREQ_CMDID
,
241 .rtt_tsf_cmdid
= WMI_10X_RTT_TSF_CMDID
,
242 .vdev_spectral_scan_configure_cmdid
=
243 WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID
,
244 .vdev_spectral_scan_enable_cmdid
=
245 WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID
,
246 .request_stats_cmdid
= WMI_10X_REQUEST_STATS_CMDID
,
247 .set_arp_ns_offload_cmdid
= WMI_CMD_UNSUPPORTED
,
248 .network_list_offload_config_cmdid
= WMI_CMD_UNSUPPORTED
,
249 .gtk_offload_cmdid
= WMI_CMD_UNSUPPORTED
,
250 .csa_offload_enable_cmdid
= WMI_CMD_UNSUPPORTED
,
251 .csa_offload_chanswitch_cmdid
= WMI_CMD_UNSUPPORTED
,
252 .chatter_set_mode_cmdid
= WMI_CMD_UNSUPPORTED
,
253 .peer_tid_addba_cmdid
= WMI_CMD_UNSUPPORTED
,
254 .peer_tid_delba_cmdid
= WMI_CMD_UNSUPPORTED
,
255 .sta_dtim_ps_method_cmdid
= WMI_CMD_UNSUPPORTED
,
256 .sta_uapsd_auto_trig_cmdid
= WMI_CMD_UNSUPPORTED
,
257 .sta_keepalive_cmd
= WMI_CMD_UNSUPPORTED
,
258 .echo_cmdid
= WMI_10X_ECHO_CMDID
,
259 .pdev_utf_cmdid
= WMI_10X_PDEV_UTF_CMDID
,
260 .dbglog_cfg_cmdid
= WMI_10X_DBGLOG_CFG_CMDID
,
261 .pdev_qvit_cmdid
= WMI_10X_PDEV_QVIT_CMDID
,
262 .pdev_ftm_intg_cmdid
= WMI_CMD_UNSUPPORTED
,
263 .vdev_set_keepalive_cmdid
= WMI_CMD_UNSUPPORTED
,
264 .vdev_get_keepalive_cmdid
= WMI_CMD_UNSUPPORTED
,
265 .force_fw_hang_cmdid
= WMI_CMD_UNSUPPORTED
,
266 .gpio_config_cmdid
= WMI_10X_GPIO_CONFIG_CMDID
,
267 .gpio_output_cmdid
= WMI_10X_GPIO_OUTPUT_CMDID
,
270 /* MAIN WMI VDEV param map */
271 static struct wmi_vdev_param_map wmi_vdev_param_map
= {
272 .rts_threshold
= WMI_VDEV_PARAM_RTS_THRESHOLD
,
273 .fragmentation_threshold
= WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD
,
274 .beacon_interval
= WMI_VDEV_PARAM_BEACON_INTERVAL
,
275 .listen_interval
= WMI_VDEV_PARAM_LISTEN_INTERVAL
,
276 .multicast_rate
= WMI_VDEV_PARAM_MULTICAST_RATE
,
277 .mgmt_tx_rate
= WMI_VDEV_PARAM_MGMT_TX_RATE
,
278 .slot_time
= WMI_VDEV_PARAM_SLOT_TIME
,
279 .preamble
= WMI_VDEV_PARAM_PREAMBLE
,
280 .swba_time
= WMI_VDEV_PARAM_SWBA_TIME
,
281 .wmi_vdev_stats_update_period
= WMI_VDEV_STATS_UPDATE_PERIOD
,
282 .wmi_vdev_pwrsave_ageout_time
= WMI_VDEV_PWRSAVE_AGEOUT_TIME
,
283 .wmi_vdev_host_swba_interval
= WMI_VDEV_HOST_SWBA_INTERVAL
,
284 .dtim_period
= WMI_VDEV_PARAM_DTIM_PERIOD
,
285 .wmi_vdev_oc_scheduler_air_time_limit
=
286 WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT
,
287 .wds
= WMI_VDEV_PARAM_WDS
,
288 .atim_window
= WMI_VDEV_PARAM_ATIM_WINDOW
,
289 .bmiss_count_max
= WMI_VDEV_PARAM_BMISS_COUNT_MAX
,
290 .bmiss_first_bcnt
= WMI_VDEV_PARAM_BMISS_FIRST_BCNT
,
291 .bmiss_final_bcnt
= WMI_VDEV_PARAM_BMISS_FINAL_BCNT
,
292 .feature_wmm
= WMI_VDEV_PARAM_FEATURE_WMM
,
293 .chwidth
= WMI_VDEV_PARAM_CHWIDTH
,
294 .chextoffset
= WMI_VDEV_PARAM_CHEXTOFFSET
,
295 .disable_htprotection
= WMI_VDEV_PARAM_DISABLE_HTPROTECTION
,
296 .sta_quickkickout
= WMI_VDEV_PARAM_STA_QUICKKICKOUT
,
297 .mgmt_rate
= WMI_VDEV_PARAM_MGMT_RATE
,
298 .protection_mode
= WMI_VDEV_PARAM_PROTECTION_MODE
,
299 .fixed_rate
= WMI_VDEV_PARAM_FIXED_RATE
,
300 .sgi
= WMI_VDEV_PARAM_SGI
,
301 .ldpc
= WMI_VDEV_PARAM_LDPC
,
302 .tx_stbc
= WMI_VDEV_PARAM_TX_STBC
,
303 .rx_stbc
= WMI_VDEV_PARAM_RX_STBC
,
304 .intra_bss_fwd
= WMI_VDEV_PARAM_INTRA_BSS_FWD
,
305 .def_keyid
= WMI_VDEV_PARAM_DEF_KEYID
,
306 .nss
= WMI_VDEV_PARAM_NSS
,
307 .bcast_data_rate
= WMI_VDEV_PARAM_BCAST_DATA_RATE
,
308 .mcast_data_rate
= WMI_VDEV_PARAM_MCAST_DATA_RATE
,
309 .mcast_indicate
= WMI_VDEV_PARAM_MCAST_INDICATE
,
310 .dhcp_indicate
= WMI_VDEV_PARAM_DHCP_INDICATE
,
311 .unknown_dest_indicate
= WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE
,
312 .ap_keepalive_min_idle_inactive_time_secs
=
313 WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS
,
314 .ap_keepalive_max_idle_inactive_time_secs
=
315 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS
,
316 .ap_keepalive_max_unresponsive_time_secs
=
317 WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS
,
318 .ap_enable_nawds
= WMI_VDEV_PARAM_AP_ENABLE_NAWDS
,
319 .mcast2ucast_set
= WMI_VDEV_PARAM_UNSUPPORTED
,
320 .enable_rtscts
= WMI_VDEV_PARAM_ENABLE_RTSCTS
,
321 .txbf
= WMI_VDEV_PARAM_TXBF
,
322 .packet_powersave
= WMI_VDEV_PARAM_PACKET_POWERSAVE
,
323 .drop_unencry
= WMI_VDEV_PARAM_DROP_UNENCRY
,
324 .tx_encap_type
= WMI_VDEV_PARAM_TX_ENCAP_TYPE
,
325 .ap_detect_out_of_sync_sleeping_sta_time_secs
=
326 WMI_VDEV_PARAM_UNSUPPORTED
,
329 /* 10.X WMI VDEV param map */
330 static struct wmi_vdev_param_map wmi_10x_vdev_param_map
= {
331 .rts_threshold
= WMI_10X_VDEV_PARAM_RTS_THRESHOLD
,
332 .fragmentation_threshold
= WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD
,
333 .beacon_interval
= WMI_10X_VDEV_PARAM_BEACON_INTERVAL
,
334 .listen_interval
= WMI_10X_VDEV_PARAM_LISTEN_INTERVAL
,
335 .multicast_rate
= WMI_10X_VDEV_PARAM_MULTICAST_RATE
,
336 .mgmt_tx_rate
= WMI_10X_VDEV_PARAM_MGMT_TX_RATE
,
337 .slot_time
= WMI_10X_VDEV_PARAM_SLOT_TIME
,
338 .preamble
= WMI_10X_VDEV_PARAM_PREAMBLE
,
339 .swba_time
= WMI_10X_VDEV_PARAM_SWBA_TIME
,
340 .wmi_vdev_stats_update_period
= WMI_10X_VDEV_STATS_UPDATE_PERIOD
,
341 .wmi_vdev_pwrsave_ageout_time
= WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME
,
342 .wmi_vdev_host_swba_interval
= WMI_10X_VDEV_HOST_SWBA_INTERVAL
,
343 .dtim_period
= WMI_10X_VDEV_PARAM_DTIM_PERIOD
,
344 .wmi_vdev_oc_scheduler_air_time_limit
=
345 WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT
,
346 .wds
= WMI_10X_VDEV_PARAM_WDS
,
347 .atim_window
= WMI_10X_VDEV_PARAM_ATIM_WINDOW
,
348 .bmiss_count_max
= WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX
,
349 .bmiss_first_bcnt
= WMI_VDEV_PARAM_UNSUPPORTED
,
350 .bmiss_final_bcnt
= WMI_VDEV_PARAM_UNSUPPORTED
,
351 .feature_wmm
= WMI_10X_VDEV_PARAM_FEATURE_WMM
,
352 .chwidth
= WMI_10X_VDEV_PARAM_CHWIDTH
,
353 .chextoffset
= WMI_10X_VDEV_PARAM_CHEXTOFFSET
,
354 .disable_htprotection
= WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION
,
355 .sta_quickkickout
= WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT
,
356 .mgmt_rate
= WMI_10X_VDEV_PARAM_MGMT_RATE
,
357 .protection_mode
= WMI_10X_VDEV_PARAM_PROTECTION_MODE
,
358 .fixed_rate
= WMI_10X_VDEV_PARAM_FIXED_RATE
,
359 .sgi
= WMI_10X_VDEV_PARAM_SGI
,
360 .ldpc
= WMI_10X_VDEV_PARAM_LDPC
,
361 .tx_stbc
= WMI_10X_VDEV_PARAM_TX_STBC
,
362 .rx_stbc
= WMI_10X_VDEV_PARAM_RX_STBC
,
363 .intra_bss_fwd
= WMI_10X_VDEV_PARAM_INTRA_BSS_FWD
,
364 .def_keyid
= WMI_10X_VDEV_PARAM_DEF_KEYID
,
365 .nss
= WMI_10X_VDEV_PARAM_NSS
,
366 .bcast_data_rate
= WMI_10X_VDEV_PARAM_BCAST_DATA_RATE
,
367 .mcast_data_rate
= WMI_10X_VDEV_PARAM_MCAST_DATA_RATE
,
368 .mcast_indicate
= WMI_10X_VDEV_PARAM_MCAST_INDICATE
,
369 .dhcp_indicate
= WMI_10X_VDEV_PARAM_DHCP_INDICATE
,
370 .unknown_dest_indicate
= WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE
,
371 .ap_keepalive_min_idle_inactive_time_secs
=
372 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS
,
373 .ap_keepalive_max_idle_inactive_time_secs
=
374 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS
,
375 .ap_keepalive_max_unresponsive_time_secs
=
376 WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS
,
377 .ap_enable_nawds
= WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS
,
378 .mcast2ucast_set
= WMI_10X_VDEV_PARAM_MCAST2UCAST_SET
,
379 .enable_rtscts
= WMI_10X_VDEV_PARAM_ENABLE_RTSCTS
,
380 .txbf
= WMI_VDEV_PARAM_UNSUPPORTED
,
381 .packet_powersave
= WMI_VDEV_PARAM_UNSUPPORTED
,
382 .drop_unencry
= WMI_VDEV_PARAM_UNSUPPORTED
,
383 .tx_encap_type
= WMI_VDEV_PARAM_UNSUPPORTED
,
384 .ap_detect_out_of_sync_sleeping_sta_time_secs
=
385 WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS
,
388 static struct wmi_pdev_param_map wmi_pdev_param_map
= {
389 .tx_chain_mask
= WMI_PDEV_PARAM_TX_CHAIN_MASK
,
390 .rx_chain_mask
= WMI_PDEV_PARAM_RX_CHAIN_MASK
,
391 .txpower_limit2g
= WMI_PDEV_PARAM_TXPOWER_LIMIT2G
,
392 .txpower_limit5g
= WMI_PDEV_PARAM_TXPOWER_LIMIT5G
,
393 .txpower_scale
= WMI_PDEV_PARAM_TXPOWER_SCALE
,
394 .beacon_gen_mode
= WMI_PDEV_PARAM_BEACON_GEN_MODE
,
395 .beacon_tx_mode
= WMI_PDEV_PARAM_BEACON_TX_MODE
,
396 .resmgr_offchan_mode
= WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE
,
397 .protection_mode
= WMI_PDEV_PARAM_PROTECTION_MODE
,
398 .dynamic_bw
= WMI_PDEV_PARAM_DYNAMIC_BW
,
399 .non_agg_sw_retry_th
= WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH
,
400 .agg_sw_retry_th
= WMI_PDEV_PARAM_AGG_SW_RETRY_TH
,
401 .sta_kickout_th
= WMI_PDEV_PARAM_STA_KICKOUT_TH
,
402 .ac_aggrsize_scaling
= WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING
,
403 .ltr_enable
= WMI_PDEV_PARAM_LTR_ENABLE
,
404 .ltr_ac_latency_be
= WMI_PDEV_PARAM_LTR_AC_LATENCY_BE
,
405 .ltr_ac_latency_bk
= WMI_PDEV_PARAM_LTR_AC_LATENCY_BK
,
406 .ltr_ac_latency_vi
= WMI_PDEV_PARAM_LTR_AC_LATENCY_VI
,
407 .ltr_ac_latency_vo
= WMI_PDEV_PARAM_LTR_AC_LATENCY_VO
,
408 .ltr_ac_latency_timeout
= WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT
,
409 .ltr_sleep_override
= WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE
,
410 .ltr_rx_override
= WMI_PDEV_PARAM_LTR_RX_OVERRIDE
,
411 .ltr_tx_activity_timeout
= WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT
,
412 .l1ss_enable
= WMI_PDEV_PARAM_L1SS_ENABLE
,
413 .dsleep_enable
= WMI_PDEV_PARAM_DSLEEP_ENABLE
,
414 .pcielp_txbuf_flush
= WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH
,
415 .pcielp_txbuf_watermark
= WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN
,
416 .pcielp_txbuf_tmo_en
= WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN
,
417 .pcielp_txbuf_tmo_value
= WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE
,
418 .pdev_stats_update_period
= WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD
,
419 .vdev_stats_update_period
= WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD
,
420 .peer_stats_update_period
= WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD
,
421 .bcnflt_stats_update_period
= WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD
,
422 .pmf_qos
= WMI_PDEV_PARAM_PMF_QOS
,
423 .arp_ac_override
= WMI_PDEV_PARAM_ARP_AC_OVERRIDE
,
424 .dcs
= WMI_PDEV_PARAM_DCS
,
425 .ani_enable
= WMI_PDEV_PARAM_ANI_ENABLE
,
426 .ani_poll_period
= WMI_PDEV_PARAM_ANI_POLL_PERIOD
,
427 .ani_listen_period
= WMI_PDEV_PARAM_ANI_LISTEN_PERIOD
,
428 .ani_ofdm_level
= WMI_PDEV_PARAM_ANI_OFDM_LEVEL
,
429 .ani_cck_level
= WMI_PDEV_PARAM_ANI_CCK_LEVEL
,
430 .dyntxchain
= WMI_PDEV_PARAM_DYNTXCHAIN
,
431 .proxy_sta
= WMI_PDEV_PARAM_PROXY_STA
,
432 .idle_ps_config
= WMI_PDEV_PARAM_IDLE_PS_CONFIG
,
433 .power_gating_sleep
= WMI_PDEV_PARAM_POWER_GATING_SLEEP
,
434 .fast_channel_reset
= WMI_PDEV_PARAM_UNSUPPORTED
,
435 .burst_dur
= WMI_PDEV_PARAM_UNSUPPORTED
,
436 .burst_enable
= WMI_PDEV_PARAM_UNSUPPORTED
,
439 static struct wmi_pdev_param_map wmi_10x_pdev_param_map
= {
440 .tx_chain_mask
= WMI_10X_PDEV_PARAM_TX_CHAIN_MASK
,
441 .rx_chain_mask
= WMI_10X_PDEV_PARAM_RX_CHAIN_MASK
,
442 .txpower_limit2g
= WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G
,
443 .txpower_limit5g
= WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G
,
444 .txpower_scale
= WMI_10X_PDEV_PARAM_TXPOWER_SCALE
,
445 .beacon_gen_mode
= WMI_10X_PDEV_PARAM_BEACON_GEN_MODE
,
446 .beacon_tx_mode
= WMI_10X_PDEV_PARAM_BEACON_TX_MODE
,
447 .resmgr_offchan_mode
= WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE
,
448 .protection_mode
= WMI_10X_PDEV_PARAM_PROTECTION_MODE
,
449 .dynamic_bw
= WMI_10X_PDEV_PARAM_DYNAMIC_BW
,
450 .non_agg_sw_retry_th
= WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH
,
451 .agg_sw_retry_th
= WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH
,
452 .sta_kickout_th
= WMI_10X_PDEV_PARAM_STA_KICKOUT_TH
,
453 .ac_aggrsize_scaling
= WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING
,
454 .ltr_enable
= WMI_10X_PDEV_PARAM_LTR_ENABLE
,
455 .ltr_ac_latency_be
= WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE
,
456 .ltr_ac_latency_bk
= WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK
,
457 .ltr_ac_latency_vi
= WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI
,
458 .ltr_ac_latency_vo
= WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO
,
459 .ltr_ac_latency_timeout
= WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT
,
460 .ltr_sleep_override
= WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE
,
461 .ltr_rx_override
= WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE
,
462 .ltr_tx_activity_timeout
= WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT
,
463 .l1ss_enable
= WMI_10X_PDEV_PARAM_L1SS_ENABLE
,
464 .dsleep_enable
= WMI_10X_PDEV_PARAM_DSLEEP_ENABLE
,
465 .pcielp_txbuf_flush
= WMI_PDEV_PARAM_UNSUPPORTED
,
466 .pcielp_txbuf_watermark
= WMI_PDEV_PARAM_UNSUPPORTED
,
467 .pcielp_txbuf_tmo_en
= WMI_PDEV_PARAM_UNSUPPORTED
,
468 .pcielp_txbuf_tmo_value
= WMI_PDEV_PARAM_UNSUPPORTED
,
469 .pdev_stats_update_period
= WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD
,
470 .vdev_stats_update_period
= WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD
,
471 .peer_stats_update_period
= WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD
,
472 .bcnflt_stats_update_period
=
473 WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD
,
474 .pmf_qos
= WMI_10X_PDEV_PARAM_PMF_QOS
,
475 .arp_ac_override
= WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE
,
476 .dcs
= WMI_10X_PDEV_PARAM_DCS
,
477 .ani_enable
= WMI_10X_PDEV_PARAM_ANI_ENABLE
,
478 .ani_poll_period
= WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD
,
479 .ani_listen_period
= WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD
,
480 .ani_ofdm_level
= WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL
,
481 .ani_cck_level
= WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL
,
482 .dyntxchain
= WMI_10X_PDEV_PARAM_DYNTXCHAIN
,
483 .proxy_sta
= WMI_PDEV_PARAM_UNSUPPORTED
,
484 .idle_ps_config
= WMI_PDEV_PARAM_UNSUPPORTED
,
485 .power_gating_sleep
= WMI_PDEV_PARAM_UNSUPPORTED
,
486 .fast_channel_reset
= WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET
,
487 .burst_dur
= WMI_10X_PDEV_PARAM_BURST_DUR
,
488 .burst_enable
= WMI_10X_PDEV_PARAM_BURST_ENABLE
,
491 /* firmware 10.2 specific mappings */
492 static struct wmi_cmd_map wmi_10_2_cmd_map
= {
493 .init_cmdid
= WMI_10_2_INIT_CMDID
,
494 .start_scan_cmdid
= WMI_10_2_START_SCAN_CMDID
,
495 .stop_scan_cmdid
= WMI_10_2_STOP_SCAN_CMDID
,
496 .scan_chan_list_cmdid
= WMI_10_2_SCAN_CHAN_LIST_CMDID
,
497 .scan_sch_prio_tbl_cmdid
= WMI_CMD_UNSUPPORTED
,
498 .pdev_set_regdomain_cmdid
= WMI_10_2_PDEV_SET_REGDOMAIN_CMDID
,
499 .pdev_set_channel_cmdid
= WMI_10_2_PDEV_SET_CHANNEL_CMDID
,
500 .pdev_set_param_cmdid
= WMI_10_2_PDEV_SET_PARAM_CMDID
,
501 .pdev_pktlog_enable_cmdid
= WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID
,
502 .pdev_pktlog_disable_cmdid
= WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID
,
503 .pdev_set_wmm_params_cmdid
= WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID
,
504 .pdev_set_ht_cap_ie_cmdid
= WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID
,
505 .pdev_set_vht_cap_ie_cmdid
= WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID
,
506 .pdev_set_quiet_mode_cmdid
= WMI_10_2_PDEV_SET_QUIET_MODE_CMDID
,
507 .pdev_green_ap_ps_enable_cmdid
= WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID
,
508 .pdev_get_tpc_config_cmdid
= WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID
,
509 .pdev_set_base_macaddr_cmdid
= WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID
,
510 .vdev_create_cmdid
= WMI_10_2_VDEV_CREATE_CMDID
,
511 .vdev_delete_cmdid
= WMI_10_2_VDEV_DELETE_CMDID
,
512 .vdev_start_request_cmdid
= WMI_10_2_VDEV_START_REQUEST_CMDID
,
513 .vdev_restart_request_cmdid
= WMI_10_2_VDEV_RESTART_REQUEST_CMDID
,
514 .vdev_up_cmdid
= WMI_10_2_VDEV_UP_CMDID
,
515 .vdev_stop_cmdid
= WMI_10_2_VDEV_STOP_CMDID
,
516 .vdev_down_cmdid
= WMI_10_2_VDEV_DOWN_CMDID
,
517 .vdev_set_param_cmdid
= WMI_10_2_VDEV_SET_PARAM_CMDID
,
518 .vdev_install_key_cmdid
= WMI_10_2_VDEV_INSTALL_KEY_CMDID
,
519 .peer_create_cmdid
= WMI_10_2_PEER_CREATE_CMDID
,
520 .peer_delete_cmdid
= WMI_10_2_PEER_DELETE_CMDID
,
521 .peer_flush_tids_cmdid
= WMI_10_2_PEER_FLUSH_TIDS_CMDID
,
522 .peer_set_param_cmdid
= WMI_10_2_PEER_SET_PARAM_CMDID
,
523 .peer_assoc_cmdid
= WMI_10_2_PEER_ASSOC_CMDID
,
524 .peer_add_wds_entry_cmdid
= WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID
,
525 .peer_remove_wds_entry_cmdid
= WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID
,
526 .peer_mcast_group_cmdid
= WMI_10_2_PEER_MCAST_GROUP_CMDID
,
527 .bcn_tx_cmdid
= WMI_10_2_BCN_TX_CMDID
,
528 .pdev_send_bcn_cmdid
= WMI_10_2_PDEV_SEND_BCN_CMDID
,
529 .bcn_tmpl_cmdid
= WMI_CMD_UNSUPPORTED
,
530 .bcn_filter_rx_cmdid
= WMI_10_2_BCN_FILTER_RX_CMDID
,
531 .prb_req_filter_rx_cmdid
= WMI_10_2_PRB_REQ_FILTER_RX_CMDID
,
532 .mgmt_tx_cmdid
= WMI_10_2_MGMT_TX_CMDID
,
533 .prb_tmpl_cmdid
= WMI_CMD_UNSUPPORTED
,
534 .addba_clear_resp_cmdid
= WMI_10_2_ADDBA_CLEAR_RESP_CMDID
,
535 .addba_send_cmdid
= WMI_10_2_ADDBA_SEND_CMDID
,
536 .addba_status_cmdid
= WMI_10_2_ADDBA_STATUS_CMDID
,
537 .delba_send_cmdid
= WMI_10_2_DELBA_SEND_CMDID
,
538 .addba_set_resp_cmdid
= WMI_10_2_ADDBA_SET_RESP_CMDID
,
539 .send_singleamsdu_cmdid
= WMI_10_2_SEND_SINGLEAMSDU_CMDID
,
540 .sta_powersave_mode_cmdid
= WMI_10_2_STA_POWERSAVE_MODE_CMDID
,
541 .sta_powersave_param_cmdid
= WMI_10_2_STA_POWERSAVE_PARAM_CMDID
,
542 .sta_mimo_ps_mode_cmdid
= WMI_10_2_STA_MIMO_PS_MODE_CMDID
,
543 .pdev_dfs_enable_cmdid
= WMI_10_2_PDEV_DFS_ENABLE_CMDID
,
544 .pdev_dfs_disable_cmdid
= WMI_10_2_PDEV_DFS_DISABLE_CMDID
,
545 .roam_scan_mode
= WMI_10_2_ROAM_SCAN_MODE
,
546 .roam_scan_rssi_threshold
= WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD
,
547 .roam_scan_period
= WMI_10_2_ROAM_SCAN_PERIOD
,
548 .roam_scan_rssi_change_threshold
=
549 WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD
,
550 .roam_ap_profile
= WMI_10_2_ROAM_AP_PROFILE
,
551 .ofl_scan_add_ap_profile
= WMI_10_2_OFL_SCAN_ADD_AP_PROFILE
,
552 .ofl_scan_remove_ap_profile
= WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE
,
553 .ofl_scan_period
= WMI_10_2_OFL_SCAN_PERIOD
,
554 .p2p_dev_set_device_info
= WMI_10_2_P2P_DEV_SET_DEVICE_INFO
,
555 .p2p_dev_set_discoverability
= WMI_10_2_P2P_DEV_SET_DISCOVERABILITY
,
556 .p2p_go_set_beacon_ie
= WMI_10_2_P2P_GO_SET_BEACON_IE
,
557 .p2p_go_set_probe_resp_ie
= WMI_10_2_P2P_GO_SET_PROBE_RESP_IE
,
558 .p2p_set_vendor_ie_data_cmdid
= WMI_CMD_UNSUPPORTED
,
559 .ap_ps_peer_param_cmdid
= WMI_10_2_AP_PS_PEER_PARAM_CMDID
,
560 .ap_ps_peer_uapsd_coex_cmdid
= WMI_CMD_UNSUPPORTED
,
561 .peer_rate_retry_sched_cmdid
= WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID
,
562 .wlan_profile_trigger_cmdid
= WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID
,
563 .wlan_profile_set_hist_intvl_cmdid
=
564 WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID
,
565 .wlan_profile_get_profile_data_cmdid
=
566 WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID
,
567 .wlan_profile_enable_profile_id_cmdid
=
568 WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID
,
569 .wlan_profile_list_profile_id_cmdid
=
570 WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID
,
571 .pdev_suspend_cmdid
= WMI_10_2_PDEV_SUSPEND_CMDID
,
572 .pdev_resume_cmdid
= WMI_10_2_PDEV_RESUME_CMDID
,
573 .add_bcn_filter_cmdid
= WMI_10_2_ADD_BCN_FILTER_CMDID
,
574 .rmv_bcn_filter_cmdid
= WMI_10_2_RMV_BCN_FILTER_CMDID
,
575 .wow_add_wake_pattern_cmdid
= WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID
,
576 .wow_del_wake_pattern_cmdid
= WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID
,
577 .wow_enable_disable_wake_event_cmdid
=
578 WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID
,
579 .wow_enable_cmdid
= WMI_10_2_WOW_ENABLE_CMDID
,
580 .wow_hostwakeup_from_sleep_cmdid
=
581 WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID
,
582 .rtt_measreq_cmdid
= WMI_10_2_RTT_MEASREQ_CMDID
,
583 .rtt_tsf_cmdid
= WMI_10_2_RTT_TSF_CMDID
,
584 .vdev_spectral_scan_configure_cmdid
=
585 WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID
,
586 .vdev_spectral_scan_enable_cmdid
=
587 WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID
,
588 .request_stats_cmdid
= WMI_10_2_REQUEST_STATS_CMDID
,
589 .set_arp_ns_offload_cmdid
= WMI_CMD_UNSUPPORTED
,
590 .network_list_offload_config_cmdid
= WMI_CMD_UNSUPPORTED
,
591 .gtk_offload_cmdid
= WMI_CMD_UNSUPPORTED
,
592 .csa_offload_enable_cmdid
= WMI_CMD_UNSUPPORTED
,
593 .csa_offload_chanswitch_cmdid
= WMI_CMD_UNSUPPORTED
,
594 .chatter_set_mode_cmdid
= WMI_CMD_UNSUPPORTED
,
595 .peer_tid_addba_cmdid
= WMI_CMD_UNSUPPORTED
,
596 .peer_tid_delba_cmdid
= WMI_CMD_UNSUPPORTED
,
597 .sta_dtim_ps_method_cmdid
= WMI_CMD_UNSUPPORTED
,
598 .sta_uapsd_auto_trig_cmdid
= WMI_CMD_UNSUPPORTED
,
599 .sta_keepalive_cmd
= WMI_CMD_UNSUPPORTED
,
600 .echo_cmdid
= WMI_10_2_ECHO_CMDID
,
601 .pdev_utf_cmdid
= WMI_10_2_PDEV_UTF_CMDID
,
602 .dbglog_cfg_cmdid
= WMI_10_2_DBGLOG_CFG_CMDID
,
603 .pdev_qvit_cmdid
= WMI_10_2_PDEV_QVIT_CMDID
,
604 .pdev_ftm_intg_cmdid
= WMI_CMD_UNSUPPORTED
,
605 .vdev_set_keepalive_cmdid
= WMI_CMD_UNSUPPORTED
,
606 .vdev_get_keepalive_cmdid
= WMI_CMD_UNSUPPORTED
,
607 .force_fw_hang_cmdid
= WMI_CMD_UNSUPPORTED
,
608 .gpio_config_cmdid
= WMI_10_2_GPIO_CONFIG_CMDID
,
609 .gpio_output_cmdid
= WMI_10_2_GPIO_OUTPUT_CMDID
,
613 ath10k_wmi_put_wmi_channel(struct wmi_channel
*ch
,
614 const struct wmi_channel_arg
*arg
)
618 memset(ch
, 0, sizeof(*ch
));
621 flags
|= WMI_CHAN_FLAG_PASSIVE
;
623 flags
|= WMI_CHAN_FLAG_ADHOC_ALLOWED
;
625 flags
|= WMI_CHAN_FLAG_ALLOW_HT
;
627 flags
|= WMI_CHAN_FLAG_ALLOW_VHT
;
629 flags
|= WMI_CHAN_FLAG_HT40_PLUS
;
631 flags
|= WMI_CHAN_FLAG_DFS
;
633 ch
->mhz
= __cpu_to_le32(arg
->freq
);
634 ch
->band_center_freq1
= __cpu_to_le32(arg
->band_center_freq1
);
635 ch
->band_center_freq2
= 0;
636 ch
->min_power
= arg
->min_power
;
637 ch
->max_power
= arg
->max_power
;
638 ch
->reg_power
= arg
->max_reg_power
;
639 ch
->antenna_max
= arg
->max_antenna_gain
;
641 /* mode & flags share storage */
642 ch
->mode
= arg
->mode
;
643 ch
->flags
|= __cpu_to_le32(flags
);
646 int ath10k_wmi_wait_for_service_ready(struct ath10k
*ar
)
650 ret
= wait_for_completion_timeout(&ar
->wmi
.service_ready
,
651 WMI_SERVICE_READY_TIMEOUT_HZ
);
655 int ath10k_wmi_wait_for_unified_ready(struct ath10k
*ar
)
659 ret
= wait_for_completion_timeout(&ar
->wmi
.unified_ready
,
660 WMI_UNIFIED_READY_TIMEOUT_HZ
);
664 struct sk_buff
*ath10k_wmi_alloc_skb(struct ath10k
*ar
, u32 len
)
667 u32 round_len
= roundup(len
, 4);
669 skb
= ath10k_htc_alloc_skb(ar
, WMI_SKB_HEADROOM
+ round_len
);
673 skb_reserve(skb
, WMI_SKB_HEADROOM
);
674 if (!IS_ALIGNED((unsigned long)skb
->data
, 4))
675 ath10k_warn(ar
, "Unaligned WMI skb\n");
677 skb_put(skb
, round_len
);
678 memset(skb
->data
, 0, round_len
);
683 static void ath10k_wmi_htc_tx_complete(struct ath10k
*ar
, struct sk_buff
*skb
)
688 static int ath10k_wmi_cmd_send_nowait(struct ath10k
*ar
, struct sk_buff
*skb
,
691 struct ath10k_skb_cb
*skb_cb
= ATH10K_SKB_CB(skb
);
692 struct wmi_cmd_hdr
*cmd_hdr
;
696 if (skb_push(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
699 cmd
|= SM(cmd_id
, WMI_CMD_HDR_CMD_ID
);
701 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
702 cmd_hdr
->cmd_id
= __cpu_to_le32(cmd
);
704 memset(skb_cb
, 0, sizeof(*skb_cb
));
705 ret
= ath10k_htc_send(&ar
->htc
, ar
->wmi
.eid
, skb
);
706 trace_ath10k_wmi_cmd(ar
, cmd_id
, skb
->data
, skb
->len
, ret
);
714 skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
718 static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif
*arvif
)
722 lockdep_assert_held(&arvif
->ar
->data_lock
);
724 if (arvif
->beacon
== NULL
)
727 if (arvif
->beacon_sent
)
730 ret
= ath10k_wmi_beacon_send_ref_nowait(arvif
);
734 /* We need to retain the arvif->beacon reference for DMA unmapping and
735 * freeing the skbuff later. */
736 arvif
->beacon_sent
= true;
739 static void ath10k_wmi_tx_beacons_iter(void *data
, u8
*mac
,
740 struct ieee80211_vif
*vif
)
742 struct ath10k_vif
*arvif
= ath10k_vif_to_arvif(vif
);
744 ath10k_wmi_tx_beacon_nowait(arvif
);
747 static void ath10k_wmi_tx_beacons_nowait(struct ath10k
*ar
)
749 spin_lock_bh(&ar
->data_lock
);
750 ieee80211_iterate_active_interfaces_atomic(ar
->hw
,
751 IEEE80211_IFACE_ITER_NORMAL
,
752 ath10k_wmi_tx_beacons_iter
,
754 spin_unlock_bh(&ar
->data_lock
);
757 static void ath10k_wmi_op_ep_tx_credits(struct ath10k
*ar
)
759 /* try to send pending beacons first. they take priority */
760 ath10k_wmi_tx_beacons_nowait(ar
);
762 wake_up(&ar
->wmi
.tx_credits_wq
);
765 int ath10k_wmi_cmd_send(struct ath10k
*ar
, struct sk_buff
*skb
, u32 cmd_id
)
767 int ret
= -EOPNOTSUPP
;
771 if (cmd_id
== WMI_CMD_UNSUPPORTED
) {
772 ath10k_warn(ar
, "wmi command %d is not supported by firmware\n",
777 wait_event_timeout(ar
->wmi
.tx_credits_wq
, ({
778 /* try to send pending beacons first. they take priority */
779 ath10k_wmi_tx_beacons_nowait(ar
);
781 ret
= ath10k_wmi_cmd_send_nowait(ar
, skb
, cmd_id
);
783 if (ret
&& test_bit(ATH10K_FLAG_CRASH_FLUSH
, &ar
->dev_flags
))
790 dev_kfree_skb_any(skb
);
795 int ath10k_wmi_mgmt_tx(struct ath10k
*ar
, struct sk_buff
*skb
)
798 struct wmi_mgmt_tx_cmd
*cmd
;
799 struct ieee80211_hdr
*hdr
;
800 struct sk_buff
*wmi_skb
;
801 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
803 u32 buf_len
= skb
->len
;
806 hdr
= (struct ieee80211_hdr
*)skb
->data
;
807 fc
= le16_to_cpu(hdr
->frame_control
);
809 if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr
->frame_control
)))
812 len
= sizeof(cmd
->hdr
) + skb
->len
;
814 if ((ieee80211_is_action(hdr
->frame_control
) ||
815 ieee80211_is_deauth(hdr
->frame_control
) ||
816 ieee80211_is_disassoc(hdr
->frame_control
)) &&
817 ieee80211_has_protected(hdr
->frame_control
)) {
818 len
+= IEEE80211_CCMP_MIC_LEN
;
819 buf_len
+= IEEE80211_CCMP_MIC_LEN
;
822 len
= round_up(len
, 4);
824 wmi_skb
= ath10k_wmi_alloc_skb(ar
, len
);
828 cmd
= (struct wmi_mgmt_tx_cmd
*)wmi_skb
->data
;
830 cmd
->hdr
.vdev_id
= __cpu_to_le32(ATH10K_SKB_CB(skb
)->vdev_id
);
831 cmd
->hdr
.tx_rate
= 0;
832 cmd
->hdr
.tx_power
= 0;
833 cmd
->hdr
.buf_len
= __cpu_to_le32(buf_len
);
835 ether_addr_copy(cmd
->hdr
.peer_macaddr
.addr
, ieee80211_get_DA(hdr
));
836 memcpy(cmd
->buf
, skb
->data
, skb
->len
);
838 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
839 wmi_skb
, wmi_skb
->len
, fc
& IEEE80211_FCTL_FTYPE
,
840 fc
& IEEE80211_FCTL_STYPE
);
841 trace_ath10k_tx_hdr(ar
, skb
->data
, skb
->len
);
842 trace_ath10k_tx_payload(ar
, skb
->data
, skb
->len
);
844 /* Send the management frame buffer to the target */
845 ret
= ath10k_wmi_cmd_send(ar
, wmi_skb
, ar
->wmi
.cmd
->mgmt_tx_cmdid
);
849 /* TODO: report tx status to mac80211 - temporary just ACK */
850 info
->flags
|= IEEE80211_TX_STAT_ACK
;
851 ieee80211_tx_status_irqsafe(ar
->hw
, skb
);
856 static void ath10k_wmi_event_scan_started(struct ath10k
*ar
)
858 lockdep_assert_held(&ar
->data_lock
);
860 switch (ar
->scan
.state
) {
861 case ATH10K_SCAN_IDLE
:
862 case ATH10K_SCAN_RUNNING
:
863 case ATH10K_SCAN_ABORTING
:
864 ath10k_warn(ar
, "received scan started event in an invalid scan state: %s (%d)\n",
865 ath10k_scan_state_str(ar
->scan
.state
),
868 case ATH10K_SCAN_STARTING
:
869 ar
->scan
.state
= ATH10K_SCAN_RUNNING
;
872 ieee80211_ready_on_channel(ar
->hw
);
874 complete(&ar
->scan
.started
);
879 static void ath10k_wmi_event_scan_completed(struct ath10k
*ar
)
881 lockdep_assert_held(&ar
->data_lock
);
883 switch (ar
->scan
.state
) {
884 case ATH10K_SCAN_IDLE
:
885 case ATH10K_SCAN_STARTING
:
886 /* One suspected reason scan can be completed while starting is
887 * if firmware fails to deliver all scan events to the host,
888 * e.g. when transport pipe is full. This has been observed
889 * with spectral scan phyerr events starving wmi transport
890 * pipe. In such case the "scan completed" event should be (and
891 * is) ignored by the host as it may be just firmware's scan
892 * state machine recovering.
894 ath10k_warn(ar
, "received scan completed event in an invalid scan state: %s (%d)\n",
895 ath10k_scan_state_str(ar
->scan
.state
),
898 case ATH10K_SCAN_RUNNING
:
899 case ATH10K_SCAN_ABORTING
:
900 __ath10k_scan_finish(ar
);
905 static void ath10k_wmi_event_scan_bss_chan(struct ath10k
*ar
)
907 lockdep_assert_held(&ar
->data_lock
);
909 switch (ar
->scan
.state
) {
910 case ATH10K_SCAN_IDLE
:
911 case ATH10K_SCAN_STARTING
:
912 ath10k_warn(ar
, "received scan bss chan event in an invalid scan state: %s (%d)\n",
913 ath10k_scan_state_str(ar
->scan
.state
),
916 case ATH10K_SCAN_RUNNING
:
917 case ATH10K_SCAN_ABORTING
:
918 ar
->scan_channel
= NULL
;
923 static void ath10k_wmi_event_scan_foreign_chan(struct ath10k
*ar
, u32 freq
)
925 lockdep_assert_held(&ar
->data_lock
);
927 switch (ar
->scan
.state
) {
928 case ATH10K_SCAN_IDLE
:
929 case ATH10K_SCAN_STARTING
:
930 ath10k_warn(ar
, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
931 ath10k_scan_state_str(ar
->scan
.state
),
934 case ATH10K_SCAN_RUNNING
:
935 case ATH10K_SCAN_ABORTING
:
936 ar
->scan_channel
= ieee80211_get_channel(ar
->hw
->wiphy
, freq
);
938 if (ar
->scan
.is_roc
&& ar
->scan
.roc_freq
== freq
)
939 complete(&ar
->scan
.on_channel
);
945 ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type
,
946 enum wmi_scan_completion_reason reason
)
949 case WMI_SCAN_EVENT_STARTED
:
951 case WMI_SCAN_EVENT_COMPLETED
:
953 case WMI_SCAN_REASON_COMPLETED
:
955 case WMI_SCAN_REASON_CANCELLED
:
956 return "completed [cancelled]";
957 case WMI_SCAN_REASON_PREEMPTED
:
958 return "completed [preempted]";
959 case WMI_SCAN_REASON_TIMEDOUT
:
960 return "completed [timedout]";
961 case WMI_SCAN_REASON_MAX
:
964 return "completed [unknown]";
965 case WMI_SCAN_EVENT_BSS_CHANNEL
:
966 return "bss channel";
967 case WMI_SCAN_EVENT_FOREIGN_CHANNEL
:
968 return "foreign channel";
969 case WMI_SCAN_EVENT_DEQUEUED
:
971 case WMI_SCAN_EVENT_PREEMPTED
:
973 case WMI_SCAN_EVENT_START_FAILED
:
974 return "start failed";
980 static int ath10k_wmi_event_scan(struct ath10k
*ar
, struct sk_buff
*skb
)
982 struct wmi_scan_event
*event
= (struct wmi_scan_event
*)skb
->data
;
983 enum wmi_scan_event_type event_type
;
984 enum wmi_scan_completion_reason reason
;
990 event_type
= __le32_to_cpu(event
->event_type
);
991 reason
= __le32_to_cpu(event
->reason
);
992 freq
= __le32_to_cpu(event
->channel_freq
);
993 req_id
= __le32_to_cpu(event
->scan_req_id
);
994 scan_id
= __le32_to_cpu(event
->scan_id
);
995 vdev_id
= __le32_to_cpu(event
->vdev_id
);
997 spin_lock_bh(&ar
->data_lock
);
999 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
1000 "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
1001 ath10k_wmi_event_scan_type_str(event_type
, reason
),
1002 event_type
, reason
, freq
, req_id
, scan_id
, vdev_id
,
1003 ath10k_scan_state_str(ar
->scan
.state
), ar
->scan
.state
);
1005 switch (event_type
) {
1006 case WMI_SCAN_EVENT_STARTED
:
1007 ath10k_wmi_event_scan_started(ar
);
1009 case WMI_SCAN_EVENT_COMPLETED
:
1010 ath10k_wmi_event_scan_completed(ar
);
1012 case WMI_SCAN_EVENT_BSS_CHANNEL
:
1013 ath10k_wmi_event_scan_bss_chan(ar
);
1015 case WMI_SCAN_EVENT_FOREIGN_CHANNEL
:
1016 ath10k_wmi_event_scan_foreign_chan(ar
, freq
);
1018 case WMI_SCAN_EVENT_START_FAILED
:
1019 ath10k_warn(ar
, "received scan start failure event\n");
1021 case WMI_SCAN_EVENT_DEQUEUED
:
1022 case WMI_SCAN_EVENT_PREEMPTED
:
1027 spin_unlock_bh(&ar
->data_lock
);
1031 static inline enum ieee80211_band
phy_mode_to_band(u32 phy_mode
)
1033 enum ieee80211_band band
;
1037 case MODE_11NA_HT20
:
1038 case MODE_11NA_HT40
:
1039 case MODE_11AC_VHT20
:
1040 case MODE_11AC_VHT40
:
1041 case MODE_11AC_VHT80
:
1042 band
= IEEE80211_BAND_5GHZ
;
1047 case MODE_11NG_HT20
:
1048 case MODE_11NG_HT40
:
1049 case MODE_11AC_VHT20_2G
:
1050 case MODE_11AC_VHT40_2G
:
1051 case MODE_11AC_VHT80_2G
:
1053 band
= IEEE80211_BAND_2GHZ
;
1059 static inline u8
get_rate_idx(u32 rate
, enum ieee80211_band band
)
1105 if (band
== IEEE80211_BAND_5GHZ
) {
1107 /* Omit CCK rates */
1116 /* If keys are configured, HW decrypts all frames
1117 * with protected bit set. Mark such frames as decrypted.
1119 static void ath10k_wmi_handle_wep_reauth(struct ath10k
*ar
,
1120 struct sk_buff
*skb
,
1121 struct ieee80211_rx_status
*status
)
1123 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
1124 unsigned int hdrlen
;
1128 if (!ieee80211_is_auth(hdr
->frame_control
) ||
1129 !ieee80211_has_protected(hdr
->frame_control
))
1132 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
1133 if (skb
->len
< (hdrlen
+ IEEE80211_WEP_IV_LEN
))
1136 keyidx
= skb
->data
[hdrlen
+ (IEEE80211_WEP_IV_LEN
- 1)] >> WEP_KEYID_SHIFT
;
1137 addr
= ieee80211_get_SA(hdr
);
1139 spin_lock_bh(&ar
->data_lock
);
1140 peer_key
= ath10k_mac_is_peer_wep_key_set(ar
, addr
, keyidx
);
1141 spin_unlock_bh(&ar
->data_lock
);
1144 ath10k_dbg(ar
, ATH10K_DBG_MAC
,
1145 "mac wep key present for peer %pM\n", addr
);
1146 status
->flag
|= RX_FLAG_DECRYPTED
;
1150 static int ath10k_wmi_event_mgmt_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
1152 struct wmi_mgmt_rx_event_v1
*ev_v1
;
1153 struct wmi_mgmt_rx_event_v2
*ev_v2
;
1154 struct wmi_mgmt_rx_hdr_v1
*ev_hdr
;
1155 struct ieee80211_rx_status
*status
= IEEE80211_SKB_RXCB(skb
);
1156 struct ieee80211_hdr
*hdr
;
1166 if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX
, ar
->fw_features
)) {
1167 ev_v2
= (struct wmi_mgmt_rx_event_v2
*)skb
->data
;
1168 ev_hdr
= &ev_v2
->hdr
.v1
;
1169 pull_len
= sizeof(*ev_v2
);
1171 ev_v1
= (struct wmi_mgmt_rx_event_v1
*)skb
->data
;
1172 ev_hdr
= &ev_v1
->hdr
;
1173 pull_len
= sizeof(*ev_v1
);
1176 channel
= __le32_to_cpu(ev_hdr
->channel
);
1177 buf_len
= __le32_to_cpu(ev_hdr
->buf_len
);
1178 rx_status
= __le32_to_cpu(ev_hdr
->status
);
1179 snr
= __le32_to_cpu(ev_hdr
->snr
);
1180 phy_mode
= __le32_to_cpu(ev_hdr
->phy_mode
);
1181 rate
= __le32_to_cpu(ev_hdr
->rate
);
1183 memset(status
, 0, sizeof(*status
));
1185 ath10k_dbg(ar
, ATH10K_DBG_MGMT
,
1186 "event mgmt rx status %08x\n", rx_status
);
1188 if (test_bit(ATH10K_CAC_RUNNING
, &ar
->dev_flags
)) {
1193 if (rx_status
& WMI_RX_STATUS_ERR_DECRYPT
) {
1198 if (rx_status
& WMI_RX_STATUS_ERR_KEY_CACHE_MISS
) {
1203 if (rx_status
& WMI_RX_STATUS_ERR_CRC
) {
1208 if (rx_status
& WMI_RX_STATUS_ERR_MIC
)
1209 status
->flag
|= RX_FLAG_MMIC_ERROR
;
1211 /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
1212 * MODE_11B. This means phy_mode is not a reliable source for the band
1215 if (channel
>= 1 && channel
<= 14) {
1216 status
->band
= IEEE80211_BAND_2GHZ
;
1217 } else if (channel
>= 36 && channel
<= 165) {
1218 status
->band
= IEEE80211_BAND_5GHZ
;
1220 /* Shouldn't happen unless list of advertised channels to
1221 * mac80211 has been changed.
1228 if (phy_mode
== MODE_11B
&& status
->band
== IEEE80211_BAND_5GHZ
)
1229 ath10k_dbg(ar
, ATH10K_DBG_MGMT
, "wmi mgmt rx 11b (CCK) on 5GHz\n");
1231 status
->freq
= ieee80211_channel_to_frequency(channel
, status
->band
);
1232 status
->signal
= snr
+ ATH10K_DEFAULT_NOISE_FLOOR
;
1233 status
->rate_idx
= get_rate_idx(rate
, status
->band
);
1235 skb_pull(skb
, pull_len
);
1237 hdr
= (struct ieee80211_hdr
*)skb
->data
;
1238 fc
= le16_to_cpu(hdr
->frame_control
);
1240 ath10k_wmi_handle_wep_reauth(ar
, skb
, status
);
1242 /* FW delivers WEP Shared Auth frame with Protected Bit set and
1243 * encrypted payload. However in case of PMF it delivers decrypted
1244 * frames with Protected Bit set. */
1245 if (ieee80211_has_protected(hdr
->frame_control
) &&
1246 !ieee80211_is_auth(hdr
->frame_control
)) {
1247 status
->flag
|= RX_FLAG_DECRYPTED
;
1249 if (!ieee80211_is_action(hdr
->frame_control
) &&
1250 !ieee80211_is_deauth(hdr
->frame_control
) &&
1251 !ieee80211_is_disassoc(hdr
->frame_control
)) {
1252 status
->flag
|= RX_FLAG_IV_STRIPPED
|
1253 RX_FLAG_MMIC_STRIPPED
;
1254 hdr
->frame_control
= __cpu_to_le16(fc
&
1255 ~IEEE80211_FCTL_PROTECTED
);
1259 ath10k_dbg(ar
, ATH10K_DBG_MGMT
,
1260 "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
1262 fc
& IEEE80211_FCTL_FTYPE
, fc
& IEEE80211_FCTL_STYPE
);
1264 ath10k_dbg(ar
, ATH10K_DBG_MGMT
,
1265 "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
1266 status
->freq
, status
->band
, status
->signal
,
1270 * packets from HTC come aligned to 4byte boundaries
1271 * because they can originally come in along with a trailer
1273 skb_trim(skb
, buf_len
);
1275 ieee80211_rx(ar
->hw
, skb
);
1279 static int freq_to_idx(struct ath10k
*ar
, int freq
)
1281 struct ieee80211_supported_band
*sband
;
1282 int band
, ch
, idx
= 0;
1284 for (band
= IEEE80211_BAND_2GHZ
; band
< IEEE80211_NUM_BANDS
; band
++) {
1285 sband
= ar
->hw
->wiphy
->bands
[band
];
1289 for (ch
= 0; ch
< sband
->n_channels
; ch
++, idx
++)
1290 if (sband
->channels
[ch
].center_freq
== freq
)
1298 static void ath10k_wmi_event_chan_info(struct ath10k
*ar
, struct sk_buff
*skb
)
1300 struct wmi_chan_info_event
*ev
;
1301 struct survey_info
*survey
;
1302 u32 err_code
, freq
, cmd_flags
, noise_floor
, rx_clear_count
, cycle_count
;
1305 ev
= (struct wmi_chan_info_event
*)skb
->data
;
1307 err_code
= __le32_to_cpu(ev
->err_code
);
1308 freq
= __le32_to_cpu(ev
->freq
);
1309 cmd_flags
= __le32_to_cpu(ev
->cmd_flags
);
1310 noise_floor
= __le32_to_cpu(ev
->noise_floor
);
1311 rx_clear_count
= __le32_to_cpu(ev
->rx_clear_count
);
1312 cycle_count
= __le32_to_cpu(ev
->cycle_count
);
1314 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
1315 "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
1316 err_code
, freq
, cmd_flags
, noise_floor
, rx_clear_count
,
1319 spin_lock_bh(&ar
->data_lock
);
1321 switch (ar
->scan
.state
) {
1322 case ATH10K_SCAN_IDLE
:
1323 case ATH10K_SCAN_STARTING
:
1324 ath10k_warn(ar
, "received chan info event without a scan request, ignoring\n");
1326 case ATH10K_SCAN_RUNNING
:
1327 case ATH10K_SCAN_ABORTING
:
1331 idx
= freq_to_idx(ar
, freq
);
1332 if (idx
>= ARRAY_SIZE(ar
->survey
)) {
1333 ath10k_warn(ar
, "chan info: invalid frequency %d (idx %d out of bounds)\n",
1338 if (cmd_flags
& WMI_CHAN_INFO_FLAG_COMPLETE
) {
1339 /* During scanning chan info is reported twice for each
1340 * visited channel. The reported cycle count is global
1341 * and per-channel cycle count must be calculated */
1343 cycle_count
-= ar
->survey_last_cycle_count
;
1344 rx_clear_count
-= ar
->survey_last_rx_clear_count
;
1346 survey
= &ar
->survey
[idx
];
1347 survey
->channel_time
= WMI_CHAN_INFO_MSEC(cycle_count
);
1348 survey
->channel_time_rx
= WMI_CHAN_INFO_MSEC(rx_clear_count
);
1349 survey
->noise
= noise_floor
;
1350 survey
->filled
= SURVEY_INFO_CHANNEL_TIME
|
1351 SURVEY_INFO_CHANNEL_TIME_RX
|
1352 SURVEY_INFO_NOISE_DBM
;
1355 ar
->survey_last_rx_clear_count
= rx_clear_count
;
1356 ar
->survey_last_cycle_count
= cycle_count
;
1359 spin_unlock_bh(&ar
->data_lock
);
1362 static void ath10k_wmi_event_echo(struct ath10k
*ar
, struct sk_buff
*skb
)
1364 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_ECHO_EVENTID\n");
1367 static int ath10k_wmi_event_debug_mesg(struct ath10k
*ar
, struct sk_buff
*skb
)
1369 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi event debug mesg len %d\n",
1372 trace_ath10k_wmi_dbglog(ar
, skb
->data
, skb
->len
);
1377 static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats
*src
,
1378 struct ath10k_fw_stats_pdev
*dst
)
1380 const struct wal_dbg_tx_stats
*tx
= &src
->wal
.tx
;
1381 const struct wal_dbg_rx_stats
*rx
= &src
->wal
.rx
;
1383 dst
->ch_noise_floor
= __le32_to_cpu(src
->chan_nf
);
1384 dst
->tx_frame_count
= __le32_to_cpu(src
->tx_frame_count
);
1385 dst
->rx_frame_count
= __le32_to_cpu(src
->rx_frame_count
);
1386 dst
->rx_clear_count
= __le32_to_cpu(src
->rx_clear_count
);
1387 dst
->cycle_count
= __le32_to_cpu(src
->cycle_count
);
1388 dst
->phy_err_count
= __le32_to_cpu(src
->phy_err_count
);
1389 dst
->chan_tx_power
= __le32_to_cpu(src
->chan_tx_pwr
);
1391 dst
->comp_queued
= __le32_to_cpu(tx
->comp_queued
);
1392 dst
->comp_delivered
= __le32_to_cpu(tx
->comp_delivered
);
1393 dst
->msdu_enqued
= __le32_to_cpu(tx
->msdu_enqued
);
1394 dst
->mpdu_enqued
= __le32_to_cpu(tx
->mpdu_enqued
);
1395 dst
->wmm_drop
= __le32_to_cpu(tx
->wmm_drop
);
1396 dst
->local_enqued
= __le32_to_cpu(tx
->local_enqued
);
1397 dst
->local_freed
= __le32_to_cpu(tx
->local_freed
);
1398 dst
->hw_queued
= __le32_to_cpu(tx
->hw_queued
);
1399 dst
->hw_reaped
= __le32_to_cpu(tx
->hw_reaped
);
1400 dst
->underrun
= __le32_to_cpu(tx
->underrun
);
1401 dst
->tx_abort
= __le32_to_cpu(tx
->tx_abort
);
1402 dst
->mpdus_requed
= __le32_to_cpu(tx
->mpdus_requed
);
1403 dst
->tx_ko
= __le32_to_cpu(tx
->tx_ko
);
1404 dst
->data_rc
= __le32_to_cpu(tx
->data_rc
);
1405 dst
->self_triggers
= __le32_to_cpu(tx
->self_triggers
);
1406 dst
->sw_retry_failure
= __le32_to_cpu(tx
->sw_retry_failure
);
1407 dst
->illgl_rate_phy_err
= __le32_to_cpu(tx
->illgl_rate_phy_err
);
1408 dst
->pdev_cont_xretry
= __le32_to_cpu(tx
->pdev_cont_xretry
);
1409 dst
->pdev_tx_timeout
= __le32_to_cpu(tx
->pdev_tx_timeout
);
1410 dst
->pdev_resets
= __le32_to_cpu(tx
->pdev_resets
);
1411 dst
->phy_underrun
= __le32_to_cpu(tx
->phy_underrun
);
1412 dst
->txop_ovf
= __le32_to_cpu(tx
->txop_ovf
);
1414 dst
->mid_ppdu_route_change
= __le32_to_cpu(rx
->mid_ppdu_route_change
);
1415 dst
->status_rcvd
= __le32_to_cpu(rx
->status_rcvd
);
1416 dst
->r0_frags
= __le32_to_cpu(rx
->r0_frags
);
1417 dst
->r1_frags
= __le32_to_cpu(rx
->r1_frags
);
1418 dst
->r2_frags
= __le32_to_cpu(rx
->r2_frags
);
1419 dst
->r3_frags
= __le32_to_cpu(rx
->r3_frags
);
1420 dst
->htt_msdus
= __le32_to_cpu(rx
->htt_msdus
);
1421 dst
->htt_mpdus
= __le32_to_cpu(rx
->htt_mpdus
);
1422 dst
->loc_msdus
= __le32_to_cpu(rx
->loc_msdus
);
1423 dst
->loc_mpdus
= __le32_to_cpu(rx
->loc_mpdus
);
1424 dst
->oversize_amsdu
= __le32_to_cpu(rx
->oversize_amsdu
);
1425 dst
->phy_errs
= __le32_to_cpu(rx
->phy_errs
);
1426 dst
->phy_err_drop
= __le32_to_cpu(rx
->phy_err_drop
);
1427 dst
->mpdu_errs
= __le32_to_cpu(rx
->mpdu_errs
);
1430 static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats
*src
,
1431 struct ath10k_fw_stats_peer
*dst
)
1433 ether_addr_copy(dst
->peer_macaddr
, src
->peer_macaddr
.addr
);
1434 dst
->peer_rssi
= __le32_to_cpu(src
->peer_rssi
);
1435 dst
->peer_tx_rate
= __le32_to_cpu(src
->peer_tx_rate
);
1438 static int ath10k_wmi_main_pull_fw_stats(struct ath10k
*ar
,
1439 struct sk_buff
*skb
,
1440 struct ath10k_fw_stats
*stats
)
1442 const struct wmi_stats_event
*ev
= (void *)skb
->data
;
1443 u32 num_pdev_stats
, num_vdev_stats
, num_peer_stats
;
1446 if (!skb_pull(skb
, sizeof(*ev
)))
1449 num_pdev_stats
= __le32_to_cpu(ev
->num_pdev_stats
);
1450 num_vdev_stats
= __le32_to_cpu(ev
->num_vdev_stats
);
1451 num_peer_stats
= __le32_to_cpu(ev
->num_peer_stats
);
1453 for (i
= 0; i
< num_pdev_stats
; i
++) {
1454 const struct wmi_pdev_stats
*src
;
1455 struct ath10k_fw_stats_pdev
*dst
;
1457 src
= (void *)skb
->data
;
1458 if (!skb_pull(skb
, sizeof(*src
)))
1461 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
1465 ath10k_wmi_pull_pdev_stats(src
, dst
);
1466 list_add_tail(&dst
->list
, &stats
->pdevs
);
1469 /* fw doesn't implement vdev stats */
1471 for (i
= 0; i
< num_peer_stats
; i
++) {
1472 const struct wmi_peer_stats
*src
;
1473 struct ath10k_fw_stats_peer
*dst
;
1475 src
= (void *)skb
->data
;
1476 if (!skb_pull(skb
, sizeof(*src
)))
1479 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
1483 ath10k_wmi_pull_peer_stats(src
, dst
);
1484 list_add_tail(&dst
->list
, &stats
->peers
);
1490 static int ath10k_wmi_10x_pull_fw_stats(struct ath10k
*ar
,
1491 struct sk_buff
*skb
,
1492 struct ath10k_fw_stats
*stats
)
1494 const struct wmi_stats_event
*ev
= (void *)skb
->data
;
1495 u32 num_pdev_stats
, num_vdev_stats
, num_peer_stats
;
1498 if (!skb_pull(skb
, sizeof(*ev
)))
1501 num_pdev_stats
= __le32_to_cpu(ev
->num_pdev_stats
);
1502 num_vdev_stats
= __le32_to_cpu(ev
->num_vdev_stats
);
1503 num_peer_stats
= __le32_to_cpu(ev
->num_peer_stats
);
1505 for (i
= 0; i
< num_pdev_stats
; i
++) {
1506 const struct wmi_10x_pdev_stats
*src
;
1507 struct ath10k_fw_stats_pdev
*dst
;
1509 src
= (void *)skb
->data
;
1510 if (!skb_pull(skb
, sizeof(*src
)))
1513 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
1517 ath10k_wmi_pull_pdev_stats(&src
->old
, dst
);
1519 dst
->ack_rx_bad
= __le32_to_cpu(src
->ack_rx_bad
);
1520 dst
->rts_bad
= __le32_to_cpu(src
->rts_bad
);
1521 dst
->rts_good
= __le32_to_cpu(src
->rts_good
);
1522 dst
->fcs_bad
= __le32_to_cpu(src
->fcs_bad
);
1523 dst
->no_beacons
= __le32_to_cpu(src
->no_beacons
);
1524 dst
->mib_int_count
= __le32_to_cpu(src
->mib_int_count
);
1526 list_add_tail(&dst
->list
, &stats
->pdevs
);
1529 /* fw doesn't implement vdev stats */
1531 for (i
= 0; i
< num_peer_stats
; i
++) {
1532 const struct wmi_10x_peer_stats
*src
;
1533 struct ath10k_fw_stats_peer
*dst
;
1535 src
= (void *)skb
->data
;
1536 if (!skb_pull(skb
, sizeof(*src
)))
1539 dst
= kzalloc(sizeof(*dst
), GFP_ATOMIC
);
1543 ath10k_wmi_pull_peer_stats(&src
->old
, dst
);
1545 dst
->peer_rx_rate
= __le32_to_cpu(src
->peer_rx_rate
);
1547 list_add_tail(&dst
->list
, &stats
->peers
);
1553 int ath10k_wmi_pull_fw_stats(struct ath10k
*ar
, struct sk_buff
*skb
,
1554 struct ath10k_fw_stats
*stats
)
1556 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
))
1557 return ath10k_wmi_10x_pull_fw_stats(ar
, skb
, stats
);
1559 return ath10k_wmi_main_pull_fw_stats(ar
, skb
, stats
);
1562 static void ath10k_wmi_event_update_stats(struct ath10k
*ar
,
1563 struct sk_buff
*skb
)
1565 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_UPDATE_STATS_EVENTID\n");
1566 ath10k_debug_fw_stats_process(ar
, skb
);
1569 static void ath10k_wmi_event_vdev_start_resp(struct ath10k
*ar
,
1570 struct sk_buff
*skb
)
1572 struct wmi_vdev_start_response_event
*ev
;
1574 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_VDEV_START_RESP_EVENTID\n");
1576 ev
= (struct wmi_vdev_start_response_event
*)skb
->data
;
1578 if (WARN_ON(__le32_to_cpu(ev
->status
)))
1581 complete(&ar
->vdev_setup_done
);
1584 static void ath10k_wmi_event_vdev_stopped(struct ath10k
*ar
,
1585 struct sk_buff
*skb
)
1587 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_VDEV_STOPPED_EVENTID\n");
1588 complete(&ar
->vdev_setup_done
);
1591 static void ath10k_wmi_event_peer_sta_kickout(struct ath10k
*ar
,
1592 struct sk_buff
*skb
)
1594 struct wmi_peer_sta_kickout_event
*ev
;
1595 struct ieee80211_sta
*sta
;
1597 ev
= (struct wmi_peer_sta_kickout_event
*)skb
->data
;
1599 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi event peer sta kickout %pM\n",
1600 ev
->peer_macaddr
.addr
);
1604 sta
= ieee80211_find_sta_by_ifaddr(ar
->hw
, ev
->peer_macaddr
.addr
, NULL
);
1606 ath10k_warn(ar
, "Spurious quick kickout for STA %pM\n",
1607 ev
->peer_macaddr
.addr
);
1611 ieee80211_report_low_ack(sta
, 10);
1620 * We don't report to mac80211 sleep state of connected
1621 * stations. Due to this mac80211 can't fill in TIM IE
1624 * I know of no way of getting nullfunc frames that contain
1625 * sleep transition from connected stations - these do not
1626 * seem to be sent from the target to the host. There also
1627 * doesn't seem to be a dedicated event for that. So the
1628 * only way left to do this would be to read tim_bitmap
1631 * We could probably try using tim_bitmap from SWBA to tell
1632 * mac80211 which stations are asleep and which are not. The
1633 * problem here is calling mac80211 functions so many times
1634 * could take too long and make us miss the time to submit
1635 * the beacon to the target.
1637 * So as a workaround we try to extend the TIM IE if there
1638 * is unicast buffered for stations with aid > 7 and fill it
1641 static void ath10k_wmi_update_tim(struct ath10k
*ar
,
1642 struct ath10k_vif
*arvif
,
1643 struct sk_buff
*bcn
,
1644 struct wmi_bcn_info
*bcn_info
)
1646 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)bcn
->data
;
1647 struct ieee80211_tim_ie
*tim
;
1653 /* if next SWBA has no tim_changed the tim_bitmap is garbage.
1654 * we must copy the bitmap upon change and reuse it later */
1655 if (__le32_to_cpu(bcn_info
->tim_info
.tim_changed
)) {
1658 BUILD_BUG_ON(sizeof(arvif
->u
.ap
.tim_bitmap
) !=
1659 sizeof(bcn_info
->tim_info
.tim_bitmap
));
1661 for (i
= 0; i
< sizeof(arvif
->u
.ap
.tim_bitmap
); i
++) {
1662 t
= bcn_info
->tim_info
.tim_bitmap
[i
/ 4];
1663 v
= __le32_to_cpu(t
);
1664 arvif
->u
.ap
.tim_bitmap
[i
] = (v
>> ((i
% 4) * 8)) & 0xFF;
1667 /* FW reports either length 0 or 16
1668 * so we calculate this on our own */
1669 arvif
->u
.ap
.tim_len
= 0;
1670 for (i
= 0; i
< sizeof(arvif
->u
.ap
.tim_bitmap
); i
++)
1671 if (arvif
->u
.ap
.tim_bitmap
[i
])
1672 arvif
->u
.ap
.tim_len
= i
;
1674 arvif
->u
.ap
.tim_len
++;
1678 ies
+= ieee80211_hdrlen(hdr
->frame_control
);
1679 ies
+= 12; /* fixed parameters */
1681 ie
= (u8
*)cfg80211_find_ie(WLAN_EID_TIM
, ies
,
1682 (u8
*)skb_tail_pointer(bcn
) - ies
);
1684 if (arvif
->vdev_type
!= WMI_VDEV_TYPE_IBSS
)
1685 ath10k_warn(ar
, "no tim ie found;\n");
1689 tim
= (void *)ie
+ 2;
1691 pvm_len
= ie_len
- 3; /* exclude dtim count, dtim period, bmap ctl */
1693 if (pvm_len
< arvif
->u
.ap
.tim_len
) {
1694 int expand_size
= sizeof(arvif
->u
.ap
.tim_bitmap
) - pvm_len
;
1695 int move_size
= skb_tail_pointer(bcn
) - (ie
+ 2 + ie_len
);
1696 void *next_ie
= ie
+ 2 + ie_len
;
1698 if (skb_put(bcn
, expand_size
)) {
1699 memmove(next_ie
+ expand_size
, next_ie
, move_size
);
1701 ie
[1] += expand_size
;
1702 ie_len
+= expand_size
;
1703 pvm_len
+= expand_size
;
1705 ath10k_warn(ar
, "tim expansion failed\n");
1709 if (pvm_len
> sizeof(arvif
->u
.ap
.tim_bitmap
)) {
1710 ath10k_warn(ar
, "tim pvm length is too great (%d)\n", pvm_len
);
1714 tim
->bitmap_ctrl
= !!__le32_to_cpu(bcn_info
->tim_info
.tim_mcast
);
1715 memcpy(tim
->virtual_map
, arvif
->u
.ap
.tim_bitmap
, pvm_len
);
1717 if (tim
->dtim_count
== 0) {
1718 ATH10K_SKB_CB(bcn
)->bcn
.dtim_zero
= true;
1720 if (__le32_to_cpu(bcn_info
->tim_info
.tim_mcast
) == 1)
1721 ATH10K_SKB_CB(bcn
)->bcn
.deliver_cab
= true;
1724 ath10k_dbg(ar
, ATH10K_DBG_MGMT
, "dtim %d/%d mcast %d pvmlen %d\n",
1725 tim
->dtim_count
, tim
->dtim_period
,
1726 tim
->bitmap_ctrl
, pvm_len
);
1729 static void ath10k_p2p_fill_noa_ie(u8
*data
, u32 len
,
1730 struct wmi_p2p_noa_info
*noa
)
1732 struct ieee80211_p2p_noa_attr
*noa_attr
;
1733 u8 ctwindow_oppps
= noa
->ctwindow_oppps
;
1734 u8 ctwindow
= ctwindow_oppps
>> WMI_P2P_OPPPS_CTWINDOW_OFFSET
;
1735 bool oppps
= !!(ctwindow_oppps
& WMI_P2P_OPPPS_ENABLE_BIT
);
1736 __le16
*noa_attr_len
;
1738 u8 noa_descriptors
= noa
->num_descriptors
;
1742 data
[0] = WLAN_EID_VENDOR_SPECIFIC
;
1744 data
[2] = (WLAN_OUI_WFA
>> 16) & 0xff;
1745 data
[3] = (WLAN_OUI_WFA
>> 8) & 0xff;
1746 data
[4] = (WLAN_OUI_WFA
>> 0) & 0xff;
1747 data
[5] = WLAN_OUI_TYPE_WFA_P2P
;
1750 data
[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE
;
1751 noa_attr_len
= (__le16
*)&data
[7]; /* 2 bytes */
1752 noa_attr
= (struct ieee80211_p2p_noa_attr
*)&data
[9];
1754 noa_attr
->index
= noa
->index
;
1755 noa_attr
->oppps_ctwindow
= ctwindow
;
1757 noa_attr
->oppps_ctwindow
|= IEEE80211_P2P_OPPPS_ENABLE_BIT
;
1759 for (i
= 0; i
< noa_descriptors
; i
++) {
1760 noa_attr
->desc
[i
].count
=
1761 __le32_to_cpu(noa
->descriptors
[i
].type_count
);
1762 noa_attr
->desc
[i
].duration
= noa
->descriptors
[i
].duration
;
1763 noa_attr
->desc
[i
].interval
= noa
->descriptors
[i
].interval
;
1764 noa_attr
->desc
[i
].start_time
= noa
->descriptors
[i
].start_time
;
1767 attr_len
= 2; /* index + oppps_ctwindow */
1768 attr_len
+= noa_descriptors
* sizeof(struct ieee80211_p2p_noa_desc
);
1769 *noa_attr_len
= __cpu_to_le16(attr_len
);
1772 static u32
ath10k_p2p_calc_noa_ie_len(struct wmi_p2p_noa_info
*noa
)
1775 u8 noa_descriptors
= noa
->num_descriptors
;
1776 u8 opp_ps_info
= noa
->ctwindow_oppps
;
1777 bool opps_enabled
= !!(opp_ps_info
& WMI_P2P_OPPPS_ENABLE_BIT
);
1779 if (!noa_descriptors
&& !opps_enabled
)
1782 len
+= 1 + 1 + 4; /* EID + len + OUI */
1783 len
+= 1 + 2; /* noa attr + attr len */
1784 len
+= 1 + 1; /* index + oppps_ctwindow */
1785 len
+= noa_descriptors
* sizeof(struct ieee80211_p2p_noa_desc
);
1790 static void ath10k_wmi_update_noa(struct ath10k
*ar
, struct ath10k_vif
*arvif
,
1791 struct sk_buff
*bcn
,
1792 struct wmi_bcn_info
*bcn_info
)
1794 struct wmi_p2p_noa_info
*noa
= &bcn_info
->p2p_noa_info
;
1795 u8
*new_data
, *old_data
= arvif
->u
.ap
.noa_data
;
1798 if (arvif
->vdev_subtype
!= WMI_VDEV_SUBTYPE_P2P_GO
)
1801 ath10k_dbg(ar
, ATH10K_DBG_MGMT
, "noa changed: %d\n", noa
->changed
);
1802 if (noa
->changed
& WMI_P2P_NOA_CHANGED_BIT
) {
1803 new_len
= ath10k_p2p_calc_noa_ie_len(noa
);
1807 new_data
= kmalloc(new_len
, GFP_ATOMIC
);
1811 ath10k_p2p_fill_noa_ie(new_data
, new_len
, noa
);
1813 spin_lock_bh(&ar
->data_lock
);
1814 arvif
->u
.ap
.noa_data
= new_data
;
1815 arvif
->u
.ap
.noa_len
= new_len
;
1816 spin_unlock_bh(&ar
->data_lock
);
1820 if (arvif
->u
.ap
.noa_data
)
1821 if (!pskb_expand_head(bcn
, 0, arvif
->u
.ap
.noa_len
, GFP_ATOMIC
))
1822 memcpy(skb_put(bcn
, arvif
->u
.ap
.noa_len
),
1823 arvif
->u
.ap
.noa_data
,
1824 arvif
->u
.ap
.noa_len
);
1828 spin_lock_bh(&ar
->data_lock
);
1829 arvif
->u
.ap
.noa_data
= NULL
;
1830 arvif
->u
.ap
.noa_len
= 0;
1831 spin_unlock_bh(&ar
->data_lock
);
1835 static void ath10k_wmi_event_host_swba(struct ath10k
*ar
, struct sk_buff
*skb
)
1837 struct wmi_host_swba_event
*ev
;
1840 struct wmi_bcn_info
*bcn_info
;
1841 struct ath10k_vif
*arvif
;
1842 struct sk_buff
*bcn
;
1844 int ret
, vdev_id
= 0;
1846 ev
= (struct wmi_host_swba_event
*)skb
->data
;
1847 map
= __le32_to_cpu(ev
->vdev_map
);
1849 ath10k_dbg(ar
, ATH10K_DBG_MGMT
, "mgmt swba vdev_map 0x%x\n",
1852 for (; map
; map
>>= 1, vdev_id
++) {
1858 if (i
>= WMI_MAX_AP_VDEV
) {
1859 ath10k_warn(ar
, "swba has corrupted vdev map\n");
1863 bcn_info
= &ev
->bcn_info
[i
];
1865 ath10k_dbg(ar
, ATH10K_DBG_MGMT
,
1866 "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
1868 __le32_to_cpu(bcn_info
->tim_info
.tim_len
),
1869 __le32_to_cpu(bcn_info
->tim_info
.tim_mcast
),
1870 __le32_to_cpu(bcn_info
->tim_info
.tim_changed
),
1871 __le32_to_cpu(bcn_info
->tim_info
.tim_num_ps_pending
),
1872 __le32_to_cpu(bcn_info
->tim_info
.tim_bitmap
[3]),
1873 __le32_to_cpu(bcn_info
->tim_info
.tim_bitmap
[2]),
1874 __le32_to_cpu(bcn_info
->tim_info
.tim_bitmap
[1]),
1875 __le32_to_cpu(bcn_info
->tim_info
.tim_bitmap
[0]));
1877 arvif
= ath10k_get_arvif(ar
, vdev_id
);
1878 if (arvif
== NULL
) {
1879 ath10k_warn(ar
, "no vif for vdev_id %d found\n",
1884 /* There are no completions for beacons so wait for next SWBA
1885 * before telling mac80211 to decrement CSA counter
1887 * Once CSA counter is completed stop sending beacons until
1888 * actual channel switch is done */
1889 if (arvif
->vif
->csa_active
&&
1890 ieee80211_csa_is_complete(arvif
->vif
)) {
1891 ieee80211_csa_finish(arvif
->vif
);
1895 bcn
= ieee80211_beacon_get(ar
->hw
, arvif
->vif
);
1897 ath10k_warn(ar
, "could not get mac80211 beacon\n");
1901 ath10k_tx_h_seq_no(arvif
->vif
, bcn
);
1902 ath10k_wmi_update_tim(ar
, arvif
, bcn
, bcn_info
);
1903 ath10k_wmi_update_noa(ar
, arvif
, bcn
, bcn_info
);
1905 spin_lock_bh(&ar
->data_lock
);
1907 if (arvif
->beacon
) {
1908 if (!arvif
->beacon_sent
)
1909 ath10k_warn(ar
, "SWBA overrun on vdev %d\n",
1912 ath10k_mac_vif_beacon_free(arvif
);
1915 if (!arvif
->beacon_buf
) {
1916 paddr
= dma_map_single(arvif
->ar
->dev
, bcn
->data
,
1917 bcn
->len
, DMA_TO_DEVICE
);
1918 ret
= dma_mapping_error(arvif
->ar
->dev
, paddr
);
1920 ath10k_warn(ar
, "failed to map beacon: %d\n",
1922 dev_kfree_skb_any(bcn
);
1926 ATH10K_SKB_CB(bcn
)->paddr
= paddr
;
1928 if (bcn
->len
> IEEE80211_MAX_FRAME_LEN
) {
1929 ath10k_warn(ar
, "trimming beacon %d -> %d bytes!\n",
1930 bcn
->len
, IEEE80211_MAX_FRAME_LEN
);
1931 skb_trim(bcn
, IEEE80211_MAX_FRAME_LEN
);
1933 memcpy(arvif
->beacon_buf
, bcn
->data
, bcn
->len
);
1934 ATH10K_SKB_CB(bcn
)->paddr
= arvif
->beacon_paddr
;
1937 arvif
->beacon
= bcn
;
1938 arvif
->beacon_sent
= false;
1940 trace_ath10k_tx_hdr(ar
, bcn
->data
, bcn
->len
);
1941 trace_ath10k_tx_payload(ar
, bcn
->data
, bcn
->len
);
1943 ath10k_wmi_tx_beacon_nowait(arvif
);
1945 spin_unlock_bh(&ar
->data_lock
);
1949 static void ath10k_wmi_event_tbttoffset_update(struct ath10k
*ar
,
1950 struct sk_buff
*skb
)
1952 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
1955 static void ath10k_dfs_radar_report(struct ath10k
*ar
,
1956 const struct wmi_phyerr
*phyerr
,
1957 const struct phyerr_radar_report
*rr
,
1960 u32 reg0
, reg1
, tsf32l
;
1961 struct pulse_event pe
;
1965 reg0
= __le32_to_cpu(rr
->reg0
);
1966 reg1
= __le32_to_cpu(rr
->reg1
);
1968 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
1969 "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
1970 MS(reg0
, RADAR_REPORT_REG0_PULSE_IS_CHIRP
),
1971 MS(reg0
, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH
),
1972 MS(reg0
, RADAR_REPORT_REG0_AGC_TOTAL_GAIN
),
1973 MS(reg0
, RADAR_REPORT_REG0_PULSE_DELTA_DIFF
));
1974 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
1975 "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
1976 MS(reg0
, RADAR_REPORT_REG0_PULSE_DELTA_PEAK
),
1977 MS(reg0
, RADAR_REPORT_REG0_PULSE_SIDX
),
1978 MS(reg1
, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID
),
1979 MS(reg1
, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN
),
1980 MS(reg1
, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK
));
1981 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
1982 "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
1983 MS(reg1
, RADAR_REPORT_REG1_PULSE_TSF_OFFSET
),
1984 MS(reg1
, RADAR_REPORT_REG1_PULSE_DUR
));
1986 if (!ar
->dfs_detector
)
1989 /* report event to DFS pattern detector */
1990 tsf32l
= __le32_to_cpu(phyerr
->tsf_timestamp
);
1991 tsf64
= tsf
& (~0xFFFFFFFFULL
);
1994 width
= MS(reg1
, RADAR_REPORT_REG1_PULSE_DUR
);
1995 rssi
= phyerr
->rssi_combined
;
1997 /* hardware store this as 8 bit signed value,
1998 * set to zero if negative number
2004 pe
.freq
= ar
->hw
->conf
.chandef
.chan
->center_freq
;
2008 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
2009 "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
2010 pe
.freq
, pe
.width
, pe
.rssi
, pe
.ts
);
2012 ATH10K_DFS_STAT_INC(ar
, pulses_detected
);
2014 if (!ar
->dfs_detector
->add_pulse(ar
->dfs_detector
, &pe
)) {
2015 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
2016 "dfs no pulse pattern detected, yet\n");
2020 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
, "dfs radar detected\n");
2021 ATH10K_DFS_STAT_INC(ar
, radar_detected
);
2023 /* Control radar events reporting in debugfs file
2024 dfs_block_radar_events */
2025 if (ar
->dfs_block_radar_events
) {
2026 ath10k_info(ar
, "DFS Radar detected, but ignored as requested\n");
2030 ieee80211_radar_detected(ar
->hw
);
2033 static int ath10k_dfs_fft_report(struct ath10k
*ar
,
2034 const struct wmi_phyerr
*phyerr
,
2035 const struct phyerr_fft_report
*fftr
,
2041 reg0
= __le32_to_cpu(fftr
->reg0
);
2042 reg1
= __le32_to_cpu(fftr
->reg1
);
2043 rssi
= phyerr
->rssi_combined
;
2045 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
2046 "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
2047 MS(reg0
, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB
),
2048 MS(reg0
, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB
),
2049 MS(reg0
, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX
),
2050 MS(reg0
, SEARCH_FFT_REPORT_REG0_PEAK_SIDX
));
2051 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
2052 "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
2053 MS(reg1
, SEARCH_FFT_REPORT_REG1_RELPWR_DB
),
2054 MS(reg1
, SEARCH_FFT_REPORT_REG1_AVGPWR_DB
),
2055 MS(reg1
, SEARCH_FFT_REPORT_REG1_PEAK_MAG
),
2056 MS(reg1
, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB
));
2058 peak_mag
= MS(reg1
, SEARCH_FFT_REPORT_REG1_PEAK_MAG
);
2060 /* false event detection */
2061 if (rssi
== DFS_RSSI_POSSIBLY_FALSE
&&
2062 peak_mag
< 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE
) {
2063 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
, "dfs false pulse detected\n");
2064 ATH10K_DFS_STAT_INC(ar
, pulses_discarded
);
2071 static void ath10k_wmi_event_dfs(struct ath10k
*ar
,
2072 const struct wmi_phyerr
*phyerr
,
2075 int buf_len
, tlv_len
, res
, i
= 0;
2076 const struct phyerr_tlv
*tlv
;
2077 const struct phyerr_radar_report
*rr
;
2078 const struct phyerr_fft_report
*fftr
;
2081 buf_len
= __le32_to_cpu(phyerr
->buf_len
);
2082 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
2083 "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
2084 phyerr
->phy_err_code
, phyerr
->rssi_combined
,
2085 __le32_to_cpu(phyerr
->tsf_timestamp
), tsf
, buf_len
);
2087 /* Skip event if DFS disabled */
2088 if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED
))
2091 ATH10K_DFS_STAT_INC(ar
, pulses_total
);
2093 while (i
< buf_len
) {
2094 if (i
+ sizeof(*tlv
) > buf_len
) {
2095 ath10k_warn(ar
, "too short buf for tlv header (%d)\n",
2100 tlv
= (struct phyerr_tlv
*)&phyerr
->buf
[i
];
2101 tlv_len
= __le16_to_cpu(tlv
->len
);
2102 tlv_buf
= &phyerr
->buf
[i
+ sizeof(*tlv
)];
2103 ath10k_dbg(ar
, ATH10K_DBG_REGULATORY
,
2104 "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
2105 tlv_len
, tlv
->tag
, tlv
->sig
);
2108 case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY
:
2109 if (i
+ sizeof(*tlv
) + sizeof(*rr
) > buf_len
) {
2110 ath10k_warn(ar
, "too short radar pulse summary (%d)\n",
2115 rr
= (struct phyerr_radar_report
*)tlv_buf
;
2116 ath10k_dfs_radar_report(ar
, phyerr
, rr
, tsf
);
2118 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT
:
2119 if (i
+ sizeof(*tlv
) + sizeof(*fftr
) > buf_len
) {
2120 ath10k_warn(ar
, "too short fft report (%d)\n",
2125 fftr
= (struct phyerr_fft_report
*)tlv_buf
;
2126 res
= ath10k_dfs_fft_report(ar
, phyerr
, fftr
, tsf
);
2132 i
+= sizeof(*tlv
) + tlv_len
;
2137 ath10k_wmi_event_spectral_scan(struct ath10k
*ar
,
2138 const struct wmi_phyerr
*phyerr
,
2141 int buf_len
, tlv_len
, res
, i
= 0;
2142 struct phyerr_tlv
*tlv
;
2143 const void *tlv_buf
;
2144 const struct phyerr_fft_report
*fftr
;
2147 buf_len
= __le32_to_cpu(phyerr
->buf_len
);
2149 while (i
< buf_len
) {
2150 if (i
+ sizeof(*tlv
) > buf_len
) {
2151 ath10k_warn(ar
, "failed to parse phyerr tlv header at byte %d\n",
2156 tlv
= (struct phyerr_tlv
*)&phyerr
->buf
[i
];
2157 tlv_len
= __le16_to_cpu(tlv
->len
);
2158 tlv_buf
= &phyerr
->buf
[i
+ sizeof(*tlv
)];
2160 if (i
+ sizeof(*tlv
) + tlv_len
> buf_len
) {
2161 ath10k_warn(ar
, "failed to parse phyerr tlv payload at byte %d\n",
2167 case PHYERR_TLV_TAG_SEARCH_FFT_REPORT
:
2168 if (sizeof(*fftr
) > tlv_len
) {
2169 ath10k_warn(ar
, "failed to parse fft report at byte %d\n",
2174 fftr_len
= tlv_len
- sizeof(*fftr
);
2176 res
= ath10k_spectral_process_fft(ar
, phyerr
,
2180 ath10k_warn(ar
, "failed to process fft report: %d\n",
2187 i
+= sizeof(*tlv
) + tlv_len
;
2191 static void ath10k_wmi_event_phyerr(struct ath10k
*ar
, struct sk_buff
*skb
)
2193 const struct wmi_phyerr_event
*ev
;
2194 const struct wmi_phyerr
*phyerr
;
2195 u32 count
, i
, buf_len
, phy_err_code
;
2197 int left_len
= skb
->len
;
2199 ATH10K_DFS_STAT_INC(ar
, phy_errors
);
2201 /* Check if combined event available */
2202 if (left_len
< sizeof(*ev
)) {
2203 ath10k_warn(ar
, "wmi phyerr combined event wrong len\n");
2207 left_len
-= sizeof(*ev
);
2209 /* Check number of included events */
2210 ev
= (const struct wmi_phyerr_event
*)skb
->data
;
2211 count
= __le32_to_cpu(ev
->num_phyerrs
);
2213 tsf
= __le32_to_cpu(ev
->tsf_u32
);
2215 tsf
|= __le32_to_cpu(ev
->tsf_l32
);
2217 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
2218 "wmi event phyerr count %d tsf64 0x%llX\n",
2221 phyerr
= ev
->phyerrs
;
2222 for (i
= 0; i
< count
; i
++) {
2223 /* Check if we can read event header */
2224 if (left_len
< sizeof(*phyerr
)) {
2225 ath10k_warn(ar
, "single event (%d) wrong head len\n",
2230 left_len
-= sizeof(*phyerr
);
2232 buf_len
= __le32_to_cpu(phyerr
->buf_len
);
2233 phy_err_code
= phyerr
->phy_err_code
;
2235 if (left_len
< buf_len
) {
2236 ath10k_warn(ar
, "single event (%d) wrong buf len\n", i
);
2240 left_len
-= buf_len
;
2242 switch (phy_err_code
) {
2243 case PHY_ERROR_RADAR
:
2244 ath10k_wmi_event_dfs(ar
, phyerr
, tsf
);
2246 case PHY_ERROR_SPECTRAL_SCAN
:
2247 ath10k_wmi_event_spectral_scan(ar
, phyerr
, tsf
);
2249 case PHY_ERROR_FALSE_RADAR_EXT
:
2250 ath10k_wmi_event_dfs(ar
, phyerr
, tsf
);
2251 ath10k_wmi_event_spectral_scan(ar
, phyerr
, tsf
);
2257 phyerr
= (void *)phyerr
+ sizeof(*phyerr
) + buf_len
;
2261 static void ath10k_wmi_event_roam(struct ath10k
*ar
, struct sk_buff
*skb
)
2263 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_ROAM_EVENTID\n");
2266 static void ath10k_wmi_event_profile_match(struct ath10k
*ar
,
2267 struct sk_buff
*skb
)
2269 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_PROFILE_MATCH\n");
2272 static void ath10k_wmi_event_debug_print(struct ath10k
*ar
,
2273 struct sk_buff
*skb
)
2278 for (i
= 0; i
< sizeof(buf
) - 1; i
++) {
2287 if (isascii(c
) && isprint(c
))
2293 if (i
== sizeof(buf
) - 1)
2294 ath10k_warn(ar
, "wmi debug print truncated: %d\n", skb
->len
);
2296 /* for some reason the debug prints end with \n, remove that */
2297 if (skb
->data
[i
- 1] == '\n')
2300 /* the last byte is always reserved for the null character */
2303 ath10k_dbg(ar
, ATH10K_DBG_WMI_PRINT
, "wmi print '%s'\n", buf
);
2306 static void ath10k_wmi_event_pdev_qvit(struct ath10k
*ar
, struct sk_buff
*skb
)
2308 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_PDEV_QVIT_EVENTID\n");
2311 static void ath10k_wmi_event_wlan_profile_data(struct ath10k
*ar
,
2312 struct sk_buff
*skb
)
2314 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
2317 static void ath10k_wmi_event_rtt_measurement_report(struct ath10k
*ar
,
2318 struct sk_buff
*skb
)
2320 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
2323 static void ath10k_wmi_event_tsf_measurement_report(struct ath10k
*ar
,
2324 struct sk_buff
*skb
)
2326 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
2329 static void ath10k_wmi_event_rtt_error_report(struct ath10k
*ar
,
2330 struct sk_buff
*skb
)
2332 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_RTT_ERROR_REPORT_EVENTID\n");
2335 static void ath10k_wmi_event_wow_wakeup_host(struct ath10k
*ar
,
2336 struct sk_buff
*skb
)
2338 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_WOW_WAKEUP_HOST_EVENTID\n");
2341 static void ath10k_wmi_event_dcs_interference(struct ath10k
*ar
,
2342 struct sk_buff
*skb
)
2344 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_DCS_INTERFERENCE_EVENTID\n");
2347 static void ath10k_wmi_event_pdev_tpc_config(struct ath10k
*ar
,
2348 struct sk_buff
*skb
)
2350 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_PDEV_TPC_CONFIG_EVENTID\n");
2353 static void ath10k_wmi_event_pdev_ftm_intg(struct ath10k
*ar
,
2354 struct sk_buff
*skb
)
2356 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_PDEV_FTM_INTG_EVENTID\n");
2359 static void ath10k_wmi_event_gtk_offload_status(struct ath10k
*ar
,
2360 struct sk_buff
*skb
)
2362 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
2365 static void ath10k_wmi_event_gtk_rekey_fail(struct ath10k
*ar
,
2366 struct sk_buff
*skb
)
2368 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_GTK_REKEY_FAIL_EVENTID\n");
2371 static void ath10k_wmi_event_delba_complete(struct ath10k
*ar
,
2372 struct sk_buff
*skb
)
2374 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
2377 static void ath10k_wmi_event_addba_complete(struct ath10k
*ar
,
2378 struct sk_buff
*skb
)
2380 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
2383 static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k
*ar
,
2384 struct sk_buff
*skb
)
2386 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
2389 static void ath10k_wmi_event_inst_rssi_stats(struct ath10k
*ar
,
2390 struct sk_buff
*skb
)
2392 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_INST_RSSI_STATS_EVENTID\n");
2395 static void ath10k_wmi_event_vdev_standby_req(struct ath10k
*ar
,
2396 struct sk_buff
*skb
)
2398 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
2401 static void ath10k_wmi_event_vdev_resume_req(struct ath10k
*ar
,
2402 struct sk_buff
*skb
)
2404 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "WMI_VDEV_RESUME_REQ_EVENTID\n");
2407 static int ath10k_wmi_alloc_host_mem(struct ath10k
*ar
, u32 req_id
,
2408 u32 num_units
, u32 unit_len
)
2412 int idx
= ar
->wmi
.num_mem_chunks
;
2414 pool_size
= num_units
* round_up(unit_len
, 4);
2419 ar
->wmi
.mem_chunks
[idx
].vaddr
= dma_alloc_coherent(ar
->dev
,
2423 if (!ar
->wmi
.mem_chunks
[idx
].vaddr
) {
2424 ath10k_warn(ar
, "failed to allocate memory chunk\n");
2428 memset(ar
->wmi
.mem_chunks
[idx
].vaddr
, 0, pool_size
);
2430 ar
->wmi
.mem_chunks
[idx
].paddr
= paddr
;
2431 ar
->wmi
.mem_chunks
[idx
].len
= pool_size
;
2432 ar
->wmi
.mem_chunks
[idx
].req_id
= req_id
;
2433 ar
->wmi
.num_mem_chunks
++;
2438 static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff
*skb
,
2439 struct wmi_svc_rdy_ev_arg
*arg
)
2441 struct wmi_service_ready_event
*ev
;
2444 if (skb
->len
< sizeof(*ev
))
2447 ev
= (void *)skb
->data
;
2448 skb_pull(skb
, sizeof(*ev
));
2449 arg
->min_tx_power
= ev
->hw_min_tx_power
;
2450 arg
->max_tx_power
= ev
->hw_max_tx_power
;
2451 arg
->ht_cap
= ev
->ht_cap_info
;
2452 arg
->vht_cap
= ev
->vht_cap_info
;
2453 arg
->sw_ver0
= ev
->sw_version
;
2454 arg
->sw_ver1
= ev
->sw_version_1
;
2455 arg
->phy_capab
= ev
->phy_capability
;
2456 arg
->num_rf_chains
= ev
->num_rf_chains
;
2457 arg
->eeprom_rd
= ev
->hal_reg_capabilities
.eeprom_rd
;
2458 arg
->num_mem_reqs
= ev
->num_mem_reqs
;
2459 arg
->service_map
= ev
->wmi_service_bitmap
;
2460 arg
->service_map_len
= sizeof(ev
->wmi_service_bitmap
);
2462 n
= min_t(size_t, __le32_to_cpu(arg
->num_mem_reqs
),
2463 ARRAY_SIZE(arg
->mem_reqs
));
2464 for (i
= 0; i
< n
; i
++)
2465 arg
->mem_reqs
[i
] = &ev
->mem_reqs
[i
];
2468 __le32_to_cpu(arg
->num_mem_reqs
) * sizeof(arg
->mem_reqs
[0]))
2474 static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff
*skb
,
2475 struct wmi_svc_rdy_ev_arg
*arg
)
2477 struct wmi_10x_service_ready_event
*ev
;
2480 if (skb
->len
< sizeof(*ev
))
2483 ev
= (void *)skb
->data
;
2484 skb_pull(skb
, sizeof(*ev
));
2485 arg
->min_tx_power
= ev
->hw_min_tx_power
;
2486 arg
->max_tx_power
= ev
->hw_max_tx_power
;
2487 arg
->ht_cap
= ev
->ht_cap_info
;
2488 arg
->vht_cap
= ev
->vht_cap_info
;
2489 arg
->sw_ver0
= ev
->sw_version
;
2490 arg
->phy_capab
= ev
->phy_capability
;
2491 arg
->num_rf_chains
= ev
->num_rf_chains
;
2492 arg
->eeprom_rd
= ev
->hal_reg_capabilities
.eeprom_rd
;
2493 arg
->num_mem_reqs
= ev
->num_mem_reqs
;
2494 arg
->service_map
= ev
->wmi_service_bitmap
;
2495 arg
->service_map_len
= sizeof(ev
->wmi_service_bitmap
);
2497 n
= min_t(size_t, __le32_to_cpu(arg
->num_mem_reqs
),
2498 ARRAY_SIZE(arg
->mem_reqs
));
2499 for (i
= 0; i
< n
; i
++)
2500 arg
->mem_reqs
[i
] = &ev
->mem_reqs
[i
];
2503 __le32_to_cpu(arg
->num_mem_reqs
) * sizeof(arg
->mem_reqs
[0]))
2509 static void ath10k_wmi_event_service_ready(struct ath10k
*ar
,
2510 struct sk_buff
*skb
)
2512 struct wmi_svc_rdy_ev_arg arg
= {};
2513 u32 num_units
, req_id
, unit_size
, num_mem_reqs
, num_unit_info
, i
;
2516 memset(&ar
->wmi
.svc_map
, 0, sizeof(ar
->wmi
.svc_map
));
2518 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
2519 ret
= ath10k_wmi_10x_pull_svc_rdy_ev(skb
, &arg
);
2520 wmi_10x_svc_map(arg
.service_map
, ar
->wmi
.svc_map
,
2521 arg
.service_map_len
);
2523 ret
= ath10k_wmi_main_pull_svc_rdy_ev(skb
, &arg
);
2524 wmi_main_svc_map(arg
.service_map
, ar
->wmi
.svc_map
,
2525 arg
.service_map_len
);
2529 ath10k_warn(ar
, "failed to parse service ready: %d\n", ret
);
2533 ar
->hw_min_tx_power
= __le32_to_cpu(arg
.min_tx_power
);
2534 ar
->hw_max_tx_power
= __le32_to_cpu(arg
.max_tx_power
);
2535 ar
->ht_cap_info
= __le32_to_cpu(arg
.ht_cap
);
2536 ar
->vht_cap_info
= __le32_to_cpu(arg
.vht_cap
);
2537 ar
->fw_version_major
=
2538 (__le32_to_cpu(arg
.sw_ver0
) & 0xff000000) >> 24;
2539 ar
->fw_version_minor
= (__le32_to_cpu(arg
.sw_ver0
) & 0x00ffffff);
2540 ar
->fw_version_release
=
2541 (__le32_to_cpu(arg
.sw_ver1
) & 0xffff0000) >> 16;
2542 ar
->fw_version_build
= (__le32_to_cpu(arg
.sw_ver1
) & 0x0000ffff);
2543 ar
->phy_capability
= __le32_to_cpu(arg
.phy_capab
);
2544 ar
->num_rf_chains
= __le32_to_cpu(arg
.num_rf_chains
);
2545 ar
->ath_common
.regulatory
.current_rd
= __le32_to_cpu(arg
.eeprom_rd
);
2547 ath10k_dbg_dump(ar
, ATH10K_DBG_WMI
, NULL
, "wmi svc: ",
2548 arg
.service_map
, arg
.service_map_len
);
2550 /* only manually set fw features when not using FW IE format */
2551 if (ar
->fw_api
== 1 && ar
->fw_version_build
> 636)
2552 set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX
, ar
->fw_features
);
2554 if (ar
->num_rf_chains
> WMI_MAX_SPATIAL_STREAM
) {
2555 ath10k_warn(ar
, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
2556 ar
->num_rf_chains
, WMI_MAX_SPATIAL_STREAM
);
2557 ar
->num_rf_chains
= WMI_MAX_SPATIAL_STREAM
;
2560 ar
->supp_tx_chainmask
= (1 << ar
->num_rf_chains
) - 1;
2561 ar
->supp_rx_chainmask
= (1 << ar
->num_rf_chains
) - 1;
2563 if (strlen(ar
->hw
->wiphy
->fw_version
) == 0) {
2564 snprintf(ar
->hw
->wiphy
->fw_version
,
2565 sizeof(ar
->hw
->wiphy
->fw_version
),
2567 ar
->fw_version_major
,
2568 ar
->fw_version_minor
,
2569 ar
->fw_version_release
,
2570 ar
->fw_version_build
);
2573 num_mem_reqs
= __le32_to_cpu(arg
.num_mem_reqs
);
2574 if (num_mem_reqs
> WMI_MAX_MEM_REQS
) {
2575 ath10k_warn(ar
, "requested memory chunks number (%d) exceeds the limit\n",
2580 for (i
= 0; i
< num_mem_reqs
; ++i
) {
2581 req_id
= __le32_to_cpu(arg
.mem_reqs
[i
]->req_id
);
2582 num_units
= __le32_to_cpu(arg
.mem_reqs
[i
]->num_units
);
2583 unit_size
= __le32_to_cpu(arg
.mem_reqs
[i
]->unit_size
);
2584 num_unit_info
= __le32_to_cpu(arg
.mem_reqs
[i
]->num_unit_info
);
2586 if (num_unit_info
& NUM_UNITS_IS_NUM_PEERS
)
2587 /* number of units to allocate is number of
2588 * peers, 1 extra for self peer on target */
2589 /* this needs to be tied, host and target
2590 * can get out of sync */
2591 num_units
= TARGET_10X_NUM_PEERS
+ 1;
2592 else if (num_unit_info
& NUM_UNITS_IS_NUM_VDEVS
)
2593 num_units
= TARGET_10X_NUM_VDEVS
+ 1;
2595 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
2596 "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
2598 __le32_to_cpu(arg
.mem_reqs
[i
]->num_units
),
2603 ret
= ath10k_wmi_alloc_host_mem(ar
, req_id
, num_units
,
2609 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
2610 "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
2611 __le32_to_cpu(arg
.min_tx_power
),
2612 __le32_to_cpu(arg
.max_tx_power
),
2613 __le32_to_cpu(arg
.ht_cap
),
2614 __le32_to_cpu(arg
.vht_cap
),
2615 __le32_to_cpu(arg
.sw_ver0
),
2616 __le32_to_cpu(arg
.sw_ver1
),
2617 __le32_to_cpu(arg
.phy_capab
),
2618 __le32_to_cpu(arg
.num_rf_chains
),
2619 __le32_to_cpu(arg
.eeprom_rd
),
2620 __le32_to_cpu(arg
.num_mem_reqs
));
2622 complete(&ar
->wmi
.service_ready
);
2625 static int ath10k_wmi_event_ready(struct ath10k
*ar
, struct sk_buff
*skb
)
2627 struct wmi_ready_event
*ev
= (struct wmi_ready_event
*)skb
->data
;
2629 if (WARN_ON(skb
->len
< sizeof(*ev
)))
2632 ether_addr_copy(ar
->mac_addr
, ev
->mac_addr
.addr
);
2634 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
2635 "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d skb->len %i ev-sz %zu\n",
2636 __le32_to_cpu(ev
->sw_version
),
2637 __le32_to_cpu(ev
->abi_version
),
2639 __le32_to_cpu(ev
->status
), skb
->len
, sizeof(*ev
));
2641 complete(&ar
->wmi
.unified_ready
);
2645 static void ath10k_wmi_main_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
2647 struct wmi_cmd_hdr
*cmd_hdr
;
2648 enum wmi_event_id id
;
2650 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
2651 id
= MS(__le32_to_cpu(cmd_hdr
->cmd_id
), WMI_CMD_HDR_CMD_ID
);
2653 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
2656 trace_ath10k_wmi_event(ar
, id
, skb
->data
, skb
->len
);
2659 case WMI_MGMT_RX_EVENTID
:
2660 ath10k_wmi_event_mgmt_rx(ar
, skb
);
2661 /* mgmt_rx() owns the skb now! */
2663 case WMI_SCAN_EVENTID
:
2664 ath10k_wmi_event_scan(ar
, skb
);
2666 case WMI_CHAN_INFO_EVENTID
:
2667 ath10k_wmi_event_chan_info(ar
, skb
);
2669 case WMI_ECHO_EVENTID
:
2670 ath10k_wmi_event_echo(ar
, skb
);
2672 case WMI_DEBUG_MESG_EVENTID
:
2673 ath10k_wmi_event_debug_mesg(ar
, skb
);
2675 case WMI_UPDATE_STATS_EVENTID
:
2676 ath10k_wmi_event_update_stats(ar
, skb
);
2678 case WMI_VDEV_START_RESP_EVENTID
:
2679 ath10k_wmi_event_vdev_start_resp(ar
, skb
);
2681 case WMI_VDEV_STOPPED_EVENTID
:
2682 ath10k_wmi_event_vdev_stopped(ar
, skb
);
2684 case WMI_PEER_STA_KICKOUT_EVENTID
:
2685 ath10k_wmi_event_peer_sta_kickout(ar
, skb
);
2687 case WMI_HOST_SWBA_EVENTID
:
2688 ath10k_wmi_event_host_swba(ar
, skb
);
2690 case WMI_TBTTOFFSET_UPDATE_EVENTID
:
2691 ath10k_wmi_event_tbttoffset_update(ar
, skb
);
2693 case WMI_PHYERR_EVENTID
:
2694 ath10k_wmi_event_phyerr(ar
, skb
);
2696 case WMI_ROAM_EVENTID
:
2697 ath10k_wmi_event_roam(ar
, skb
);
2699 case WMI_PROFILE_MATCH
:
2700 ath10k_wmi_event_profile_match(ar
, skb
);
2702 case WMI_DEBUG_PRINT_EVENTID
:
2703 ath10k_wmi_event_debug_print(ar
, skb
);
2705 case WMI_PDEV_QVIT_EVENTID
:
2706 ath10k_wmi_event_pdev_qvit(ar
, skb
);
2708 case WMI_WLAN_PROFILE_DATA_EVENTID
:
2709 ath10k_wmi_event_wlan_profile_data(ar
, skb
);
2711 case WMI_RTT_MEASUREMENT_REPORT_EVENTID
:
2712 ath10k_wmi_event_rtt_measurement_report(ar
, skb
);
2714 case WMI_TSF_MEASUREMENT_REPORT_EVENTID
:
2715 ath10k_wmi_event_tsf_measurement_report(ar
, skb
);
2717 case WMI_RTT_ERROR_REPORT_EVENTID
:
2718 ath10k_wmi_event_rtt_error_report(ar
, skb
);
2720 case WMI_WOW_WAKEUP_HOST_EVENTID
:
2721 ath10k_wmi_event_wow_wakeup_host(ar
, skb
);
2723 case WMI_DCS_INTERFERENCE_EVENTID
:
2724 ath10k_wmi_event_dcs_interference(ar
, skb
);
2726 case WMI_PDEV_TPC_CONFIG_EVENTID
:
2727 ath10k_wmi_event_pdev_tpc_config(ar
, skb
);
2729 case WMI_PDEV_FTM_INTG_EVENTID
:
2730 ath10k_wmi_event_pdev_ftm_intg(ar
, skb
);
2732 case WMI_GTK_OFFLOAD_STATUS_EVENTID
:
2733 ath10k_wmi_event_gtk_offload_status(ar
, skb
);
2735 case WMI_GTK_REKEY_FAIL_EVENTID
:
2736 ath10k_wmi_event_gtk_rekey_fail(ar
, skb
);
2738 case WMI_TX_DELBA_COMPLETE_EVENTID
:
2739 ath10k_wmi_event_delba_complete(ar
, skb
);
2741 case WMI_TX_ADDBA_COMPLETE_EVENTID
:
2742 ath10k_wmi_event_addba_complete(ar
, skb
);
2744 case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID
:
2745 ath10k_wmi_event_vdev_install_key_complete(ar
, skb
);
2747 case WMI_SERVICE_READY_EVENTID
:
2748 ath10k_wmi_event_service_ready(ar
, skb
);
2750 case WMI_READY_EVENTID
:
2751 ath10k_wmi_event_ready(ar
, skb
);
2754 ath10k_warn(ar
, "Unknown eventid: %d\n", id
);
2761 static void ath10k_wmi_10x_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
2763 struct wmi_cmd_hdr
*cmd_hdr
;
2764 enum wmi_10x_event_id id
;
2767 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
2768 id
= MS(__le32_to_cpu(cmd_hdr
->cmd_id
), WMI_CMD_HDR_CMD_ID
);
2770 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
2773 trace_ath10k_wmi_event(ar
, id
, skb
->data
, skb
->len
);
2775 consumed
= ath10k_tm_event_wmi(ar
, id
, skb
);
2777 /* Ready event must be handled normally also in UTF mode so that we
2778 * know the UTF firmware has booted, others we are just bypass WMI
2779 * events to testmode.
2781 if (consumed
&& id
!= WMI_10X_READY_EVENTID
) {
2782 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
2783 "wmi testmode consumed 0x%x\n", id
);
2788 case WMI_10X_MGMT_RX_EVENTID
:
2789 ath10k_wmi_event_mgmt_rx(ar
, skb
);
2790 /* mgmt_rx() owns the skb now! */
2792 case WMI_10X_SCAN_EVENTID
:
2793 ath10k_wmi_event_scan(ar
, skb
);
2795 case WMI_10X_CHAN_INFO_EVENTID
:
2796 ath10k_wmi_event_chan_info(ar
, skb
);
2798 case WMI_10X_ECHO_EVENTID
:
2799 ath10k_wmi_event_echo(ar
, skb
);
2801 case WMI_10X_DEBUG_MESG_EVENTID
:
2802 ath10k_wmi_event_debug_mesg(ar
, skb
);
2804 case WMI_10X_UPDATE_STATS_EVENTID
:
2805 ath10k_wmi_event_update_stats(ar
, skb
);
2807 case WMI_10X_VDEV_START_RESP_EVENTID
:
2808 ath10k_wmi_event_vdev_start_resp(ar
, skb
);
2810 case WMI_10X_VDEV_STOPPED_EVENTID
:
2811 ath10k_wmi_event_vdev_stopped(ar
, skb
);
2813 case WMI_10X_PEER_STA_KICKOUT_EVENTID
:
2814 ath10k_wmi_event_peer_sta_kickout(ar
, skb
);
2816 case WMI_10X_HOST_SWBA_EVENTID
:
2817 ath10k_wmi_event_host_swba(ar
, skb
);
2819 case WMI_10X_TBTTOFFSET_UPDATE_EVENTID
:
2820 ath10k_wmi_event_tbttoffset_update(ar
, skb
);
2822 case WMI_10X_PHYERR_EVENTID
:
2823 ath10k_wmi_event_phyerr(ar
, skb
);
2825 case WMI_10X_ROAM_EVENTID
:
2826 ath10k_wmi_event_roam(ar
, skb
);
2828 case WMI_10X_PROFILE_MATCH
:
2829 ath10k_wmi_event_profile_match(ar
, skb
);
2831 case WMI_10X_DEBUG_PRINT_EVENTID
:
2832 ath10k_wmi_event_debug_print(ar
, skb
);
2834 case WMI_10X_PDEV_QVIT_EVENTID
:
2835 ath10k_wmi_event_pdev_qvit(ar
, skb
);
2837 case WMI_10X_WLAN_PROFILE_DATA_EVENTID
:
2838 ath10k_wmi_event_wlan_profile_data(ar
, skb
);
2840 case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID
:
2841 ath10k_wmi_event_rtt_measurement_report(ar
, skb
);
2843 case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID
:
2844 ath10k_wmi_event_tsf_measurement_report(ar
, skb
);
2846 case WMI_10X_RTT_ERROR_REPORT_EVENTID
:
2847 ath10k_wmi_event_rtt_error_report(ar
, skb
);
2849 case WMI_10X_WOW_WAKEUP_HOST_EVENTID
:
2850 ath10k_wmi_event_wow_wakeup_host(ar
, skb
);
2852 case WMI_10X_DCS_INTERFERENCE_EVENTID
:
2853 ath10k_wmi_event_dcs_interference(ar
, skb
);
2855 case WMI_10X_PDEV_TPC_CONFIG_EVENTID
:
2856 ath10k_wmi_event_pdev_tpc_config(ar
, skb
);
2858 case WMI_10X_INST_RSSI_STATS_EVENTID
:
2859 ath10k_wmi_event_inst_rssi_stats(ar
, skb
);
2861 case WMI_10X_VDEV_STANDBY_REQ_EVENTID
:
2862 ath10k_wmi_event_vdev_standby_req(ar
, skb
);
2864 case WMI_10X_VDEV_RESUME_REQ_EVENTID
:
2865 ath10k_wmi_event_vdev_resume_req(ar
, skb
);
2867 case WMI_10X_SERVICE_READY_EVENTID
:
2868 ath10k_wmi_event_service_ready(ar
, skb
);
2870 case WMI_10X_READY_EVENTID
:
2871 ath10k_wmi_event_ready(ar
, skb
);
2873 case WMI_10X_PDEV_UTF_EVENTID
:
2874 /* ignore utf events */
2877 ath10k_warn(ar
, "Unknown eventid: %d\n", id
);
2885 static void ath10k_wmi_10_2_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
2887 struct wmi_cmd_hdr
*cmd_hdr
;
2888 enum wmi_10_2_event_id id
;
2890 cmd_hdr
= (struct wmi_cmd_hdr
*)skb
->data
;
2891 id
= MS(__le32_to_cpu(cmd_hdr
->cmd_id
), WMI_CMD_HDR_CMD_ID
);
2893 if (skb_pull(skb
, sizeof(struct wmi_cmd_hdr
)) == NULL
)
2896 trace_ath10k_wmi_event(ar
, id
, skb
->data
, skb
->len
);
2899 case WMI_10_2_MGMT_RX_EVENTID
:
2900 ath10k_wmi_event_mgmt_rx(ar
, skb
);
2901 /* mgmt_rx() owns the skb now! */
2903 case WMI_10_2_SCAN_EVENTID
:
2904 ath10k_wmi_event_scan(ar
, skb
);
2906 case WMI_10_2_CHAN_INFO_EVENTID
:
2907 ath10k_wmi_event_chan_info(ar
, skb
);
2909 case WMI_10_2_ECHO_EVENTID
:
2910 ath10k_wmi_event_echo(ar
, skb
);
2912 case WMI_10_2_DEBUG_MESG_EVENTID
:
2913 ath10k_wmi_event_debug_mesg(ar
, skb
);
2915 case WMI_10_2_UPDATE_STATS_EVENTID
:
2916 ath10k_wmi_event_update_stats(ar
, skb
);
2918 case WMI_10_2_VDEV_START_RESP_EVENTID
:
2919 ath10k_wmi_event_vdev_start_resp(ar
, skb
);
2921 case WMI_10_2_VDEV_STOPPED_EVENTID
:
2922 ath10k_wmi_event_vdev_stopped(ar
, skb
);
2924 case WMI_10_2_PEER_STA_KICKOUT_EVENTID
:
2925 ath10k_wmi_event_peer_sta_kickout(ar
, skb
);
2927 case WMI_10_2_HOST_SWBA_EVENTID
:
2928 ath10k_wmi_event_host_swba(ar
, skb
);
2930 case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID
:
2931 ath10k_wmi_event_tbttoffset_update(ar
, skb
);
2933 case WMI_10_2_PHYERR_EVENTID
:
2934 ath10k_wmi_event_phyerr(ar
, skb
);
2936 case WMI_10_2_ROAM_EVENTID
:
2937 ath10k_wmi_event_roam(ar
, skb
);
2939 case WMI_10_2_PROFILE_MATCH
:
2940 ath10k_wmi_event_profile_match(ar
, skb
);
2942 case WMI_10_2_DEBUG_PRINT_EVENTID
:
2943 ath10k_wmi_event_debug_print(ar
, skb
);
2945 case WMI_10_2_PDEV_QVIT_EVENTID
:
2946 ath10k_wmi_event_pdev_qvit(ar
, skb
);
2948 case WMI_10_2_WLAN_PROFILE_DATA_EVENTID
:
2949 ath10k_wmi_event_wlan_profile_data(ar
, skb
);
2951 case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID
:
2952 ath10k_wmi_event_rtt_measurement_report(ar
, skb
);
2954 case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID
:
2955 ath10k_wmi_event_tsf_measurement_report(ar
, skb
);
2957 case WMI_10_2_RTT_ERROR_REPORT_EVENTID
:
2958 ath10k_wmi_event_rtt_error_report(ar
, skb
);
2960 case WMI_10_2_WOW_WAKEUP_HOST_EVENTID
:
2961 ath10k_wmi_event_wow_wakeup_host(ar
, skb
);
2963 case WMI_10_2_DCS_INTERFERENCE_EVENTID
:
2964 ath10k_wmi_event_dcs_interference(ar
, skb
);
2966 case WMI_10_2_PDEV_TPC_CONFIG_EVENTID
:
2967 ath10k_wmi_event_pdev_tpc_config(ar
, skb
);
2969 case WMI_10_2_INST_RSSI_STATS_EVENTID
:
2970 ath10k_wmi_event_inst_rssi_stats(ar
, skb
);
2972 case WMI_10_2_VDEV_STANDBY_REQ_EVENTID
:
2973 ath10k_wmi_event_vdev_standby_req(ar
, skb
);
2975 case WMI_10_2_VDEV_RESUME_REQ_EVENTID
:
2976 ath10k_wmi_event_vdev_resume_req(ar
, skb
);
2978 case WMI_10_2_SERVICE_READY_EVENTID
:
2979 ath10k_wmi_event_service_ready(ar
, skb
);
2981 case WMI_10_2_READY_EVENTID
:
2982 ath10k_wmi_event_ready(ar
, skb
);
2984 case WMI_10_2_RTT_KEEPALIVE_EVENTID
:
2985 case WMI_10_2_GPIO_INPUT_EVENTID
:
2986 case WMI_10_2_PEER_RATECODE_LIST_EVENTID
:
2987 case WMI_10_2_GENERIC_BUFFER_EVENTID
:
2988 case WMI_10_2_MCAST_BUF_RELEASE_EVENTID
:
2989 case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID
:
2990 case WMI_10_2_WDS_PEER_EVENTID
:
2991 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
2992 "received event id %d not implemented\n", id
);
2995 ath10k_warn(ar
, "Unknown eventid: %d\n", id
);
3002 static void ath10k_wmi_process_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
3004 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
3005 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2
, ar
->fw_features
))
3006 ath10k_wmi_10_2_process_rx(ar
, skb
);
3008 ath10k_wmi_10x_process_rx(ar
, skb
);
3010 ath10k_wmi_main_process_rx(ar
, skb
);
3014 int ath10k_wmi_connect(struct ath10k
*ar
)
3017 struct ath10k_htc_svc_conn_req conn_req
;
3018 struct ath10k_htc_svc_conn_resp conn_resp
;
3020 memset(&conn_req
, 0, sizeof(conn_req
));
3021 memset(&conn_resp
, 0, sizeof(conn_resp
));
3023 /* these fields are the same for all service endpoints */
3024 conn_req
.ep_ops
.ep_tx_complete
= ath10k_wmi_htc_tx_complete
;
3025 conn_req
.ep_ops
.ep_rx_complete
= ath10k_wmi_process_rx
;
3026 conn_req
.ep_ops
.ep_tx_credits
= ath10k_wmi_op_ep_tx_credits
;
3028 /* connect to control service */
3029 conn_req
.service_id
= ATH10K_HTC_SVC_ID_WMI_CONTROL
;
3031 status
= ath10k_htc_connect_service(&ar
->htc
, &conn_req
, &conn_resp
);
3033 ath10k_warn(ar
, "failed to connect to WMI CONTROL service status: %d\n",
3038 ar
->wmi
.eid
= conn_resp
.eid
;
3042 static int ath10k_wmi_main_pdev_set_regdomain(struct ath10k
*ar
, u16 rd
,
3043 u16 rd2g
, u16 rd5g
, u16 ctl2g
,
3046 struct wmi_pdev_set_regdomain_cmd
*cmd
;
3047 struct sk_buff
*skb
;
3049 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3053 cmd
= (struct wmi_pdev_set_regdomain_cmd
*)skb
->data
;
3054 cmd
->reg_domain
= __cpu_to_le32(rd
);
3055 cmd
->reg_domain_2G
= __cpu_to_le32(rd2g
);
3056 cmd
->reg_domain_5G
= __cpu_to_le32(rd5g
);
3057 cmd
->conformance_test_limit_2G
= __cpu_to_le32(ctl2g
);
3058 cmd
->conformance_test_limit_5G
= __cpu_to_le32(ctl5g
);
3060 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3061 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
3062 rd
, rd2g
, rd5g
, ctl2g
, ctl5g
);
3064 return ath10k_wmi_cmd_send(ar
, skb
,
3065 ar
->wmi
.cmd
->pdev_set_regdomain_cmdid
);
3068 static int ath10k_wmi_10x_pdev_set_regdomain(struct ath10k
*ar
, u16 rd
,
3070 u16 ctl2g
, u16 ctl5g
,
3071 enum wmi_dfs_region dfs_reg
)
3073 struct wmi_pdev_set_regdomain_cmd_10x
*cmd
;
3074 struct sk_buff
*skb
;
3076 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3080 cmd
= (struct wmi_pdev_set_regdomain_cmd_10x
*)skb
->data
;
3081 cmd
->reg_domain
= __cpu_to_le32(rd
);
3082 cmd
->reg_domain_2G
= __cpu_to_le32(rd2g
);
3083 cmd
->reg_domain_5G
= __cpu_to_le32(rd5g
);
3084 cmd
->conformance_test_limit_2G
= __cpu_to_le32(ctl2g
);
3085 cmd
->conformance_test_limit_5G
= __cpu_to_le32(ctl5g
);
3086 cmd
->dfs_domain
= __cpu_to_le32(dfs_reg
);
3088 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3089 "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
3090 rd
, rd2g
, rd5g
, ctl2g
, ctl5g
, dfs_reg
);
3092 return ath10k_wmi_cmd_send(ar
, skb
,
3093 ar
->wmi
.cmd
->pdev_set_regdomain_cmdid
);
3096 int ath10k_wmi_pdev_set_regdomain(struct ath10k
*ar
, u16 rd
, u16 rd2g
,
3097 u16 rd5g
, u16 ctl2g
, u16 ctl5g
,
3098 enum wmi_dfs_region dfs_reg
)
3100 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
))
3101 return ath10k_wmi_10x_pdev_set_regdomain(ar
, rd
, rd2g
, rd5g
,
3102 ctl2g
, ctl5g
, dfs_reg
);
3104 return ath10k_wmi_main_pdev_set_regdomain(ar
, rd
, rd2g
, rd5g
,
3108 int ath10k_wmi_pdev_suspend_target(struct ath10k
*ar
, u32 suspend_opt
)
3110 struct wmi_pdev_suspend_cmd
*cmd
;
3111 struct sk_buff
*skb
;
3113 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3117 cmd
= (struct wmi_pdev_suspend_cmd
*)skb
->data
;
3118 cmd
->suspend_opt
= __cpu_to_le32(suspend_opt
);
3120 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_suspend_cmdid
);
3123 int ath10k_wmi_pdev_resume_target(struct ath10k
*ar
)
3125 struct sk_buff
*skb
;
3127 skb
= ath10k_wmi_alloc_skb(ar
, 0);
3131 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_resume_cmdid
);
3134 int ath10k_wmi_pdev_set_param(struct ath10k
*ar
, u32 id
, u32 value
)
3136 struct wmi_pdev_set_param_cmd
*cmd
;
3137 struct sk_buff
*skb
;
3139 if (id
== WMI_PDEV_PARAM_UNSUPPORTED
) {
3140 ath10k_warn(ar
, "pdev param %d not supported by firmware\n",
3145 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3149 cmd
= (struct wmi_pdev_set_param_cmd
*)skb
->data
;
3150 cmd
->param_id
= __cpu_to_le32(id
);
3151 cmd
->param_value
= __cpu_to_le32(value
);
3153 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi pdev set param %d value %d\n",
3155 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_set_param_cmdid
);
3158 static void ath10k_wmi_put_host_mem_chunks(struct ath10k
*ar
,
3159 struct wmi_host_mem_chunks
*chunks
)
3161 struct host_memory_chunk
*chunk
;
3164 chunks
->count
= __cpu_to_le32(ar
->wmi
.num_mem_chunks
);
3166 for (i
= 0; i
< ar
->wmi
.num_mem_chunks
; i
++) {
3167 chunk
= &chunks
->items
[i
];
3168 chunk
->ptr
= __cpu_to_le32(ar
->wmi
.mem_chunks
[i
].paddr
);
3169 chunk
->size
= __cpu_to_le32(ar
->wmi
.mem_chunks
[i
].len
);
3170 chunk
->req_id
= __cpu_to_le32(ar
->wmi
.mem_chunks
[i
].req_id
);
3172 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3173 "wmi chunk %d len %d requested, addr 0x%llx\n",
3175 ar
->wmi
.mem_chunks
[i
].len
,
3176 (unsigned long long)ar
->wmi
.mem_chunks
[i
].paddr
);
3180 static int ath10k_wmi_main_cmd_init(struct ath10k
*ar
)
3182 struct wmi_init_cmd
*cmd
;
3183 struct sk_buff
*buf
;
3184 struct wmi_resource_config config
= {};
3187 config
.num_vdevs
= __cpu_to_le32(TARGET_NUM_VDEVS
);
3188 config
.num_peers
= __cpu_to_le32(TARGET_NUM_PEERS
);
3189 config
.num_offload_peers
= __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS
);
3191 config
.num_offload_reorder_bufs
=
3192 __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS
);
3194 config
.num_peer_keys
= __cpu_to_le32(TARGET_NUM_PEER_KEYS
);
3195 config
.num_tids
= __cpu_to_le32(TARGET_NUM_TIDS
);
3196 config
.ast_skid_limit
= __cpu_to_le32(TARGET_AST_SKID_LIMIT
);
3197 config
.tx_chain_mask
= __cpu_to_le32(TARGET_TX_CHAIN_MASK
);
3198 config
.rx_chain_mask
= __cpu_to_le32(TARGET_RX_CHAIN_MASK
);
3199 config
.rx_timeout_pri_vo
= __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI
);
3200 config
.rx_timeout_pri_vi
= __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI
);
3201 config
.rx_timeout_pri_be
= __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI
);
3202 config
.rx_timeout_pri_bk
= __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI
);
3203 config
.rx_decap_mode
= __cpu_to_le32(TARGET_RX_DECAP_MODE
);
3205 config
.scan_max_pending_reqs
=
3206 __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS
);
3208 config
.bmiss_offload_max_vdev
=
3209 __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV
);
3211 config
.roam_offload_max_vdev
=
3212 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV
);
3214 config
.roam_offload_max_ap_profiles
=
3215 __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES
);
3217 config
.num_mcast_groups
= __cpu_to_le32(TARGET_NUM_MCAST_GROUPS
);
3218 config
.num_mcast_table_elems
=
3219 __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS
);
3221 config
.mcast2ucast_mode
= __cpu_to_le32(TARGET_MCAST2UCAST_MODE
);
3222 config
.tx_dbg_log_size
= __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE
);
3223 config
.num_wds_entries
= __cpu_to_le32(TARGET_NUM_WDS_ENTRIES
);
3224 config
.dma_burst_size
= __cpu_to_le32(TARGET_DMA_BURST_SIZE
);
3225 config
.mac_aggr_delim
= __cpu_to_le32(TARGET_MAC_AGGR_DELIM
);
3227 val
= TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
;
3228 config
.rx_skip_defrag_timeout_dup_detection_check
= __cpu_to_le32(val
);
3230 config
.vow_config
= __cpu_to_le32(TARGET_VOW_CONFIG
);
3232 config
.gtk_offload_max_vdev
=
3233 __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV
);
3235 config
.num_msdu_desc
= __cpu_to_le32(TARGET_NUM_MSDU_DESC
);
3236 config
.max_frag_entries
= __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES
);
3238 len
= sizeof(*cmd
) +
3239 (sizeof(struct host_memory_chunk
) * ar
->wmi
.num_mem_chunks
);
3241 buf
= ath10k_wmi_alloc_skb(ar
, len
);
3245 cmd
= (struct wmi_init_cmd
*)buf
->data
;
3247 memcpy(&cmd
->resource_config
, &config
, sizeof(config
));
3248 ath10k_wmi_put_host_mem_chunks(ar
, &cmd
->mem_chunks
);
3250 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi init\n");
3251 return ath10k_wmi_cmd_send(ar
, buf
, ar
->wmi
.cmd
->init_cmdid
);
3254 static int ath10k_wmi_10x_cmd_init(struct ath10k
*ar
)
3256 struct wmi_init_cmd_10x
*cmd
;
3257 struct sk_buff
*buf
;
3258 struct wmi_resource_config_10x config
= {};
3261 config
.num_vdevs
= __cpu_to_le32(TARGET_10X_NUM_VDEVS
);
3262 config
.num_peers
= __cpu_to_le32(TARGET_10X_NUM_PEERS
);
3263 config
.num_peer_keys
= __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS
);
3264 config
.num_tids
= __cpu_to_le32(TARGET_10X_NUM_TIDS
);
3265 config
.ast_skid_limit
= __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT
);
3266 config
.tx_chain_mask
= __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK
);
3267 config
.rx_chain_mask
= __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK
);
3268 config
.rx_timeout_pri_vo
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI
);
3269 config
.rx_timeout_pri_vi
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI
);
3270 config
.rx_timeout_pri_be
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI
);
3271 config
.rx_timeout_pri_bk
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI
);
3272 config
.rx_decap_mode
= __cpu_to_le32(TARGET_10X_RX_DECAP_MODE
);
3274 config
.scan_max_pending_reqs
=
3275 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS
);
3277 config
.bmiss_offload_max_vdev
=
3278 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV
);
3280 config
.roam_offload_max_vdev
=
3281 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV
);
3283 config
.roam_offload_max_ap_profiles
=
3284 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES
);
3286 config
.num_mcast_groups
= __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS
);
3287 config
.num_mcast_table_elems
=
3288 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS
);
3290 config
.mcast2ucast_mode
= __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE
);
3291 config
.tx_dbg_log_size
= __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE
);
3292 config
.num_wds_entries
= __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES
);
3293 config
.dma_burst_size
= __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE
);
3294 config
.mac_aggr_delim
= __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM
);
3296 val
= TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
;
3297 config
.rx_skip_defrag_timeout_dup_detection_check
= __cpu_to_le32(val
);
3299 config
.vow_config
= __cpu_to_le32(TARGET_10X_VOW_CONFIG
);
3301 config
.num_msdu_desc
= __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC
);
3302 config
.max_frag_entries
= __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES
);
3304 len
= sizeof(*cmd
) +
3305 (sizeof(struct host_memory_chunk
) * ar
->wmi
.num_mem_chunks
);
3307 buf
= ath10k_wmi_alloc_skb(ar
, len
);
3311 cmd
= (struct wmi_init_cmd_10x
*)buf
->data
;
3313 memcpy(&cmd
->resource_config
, &config
, sizeof(config
));
3314 ath10k_wmi_put_host_mem_chunks(ar
, &cmd
->mem_chunks
);
3316 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi init 10x\n");
3317 return ath10k_wmi_cmd_send(ar
, buf
, ar
->wmi
.cmd
->init_cmdid
);
3320 static int ath10k_wmi_10_2_cmd_init(struct ath10k
*ar
)
3322 struct wmi_init_cmd_10_2
*cmd
;
3323 struct sk_buff
*buf
;
3324 struct wmi_resource_config_10x config
= {};
3327 config
.num_vdevs
= __cpu_to_le32(TARGET_10X_NUM_VDEVS
);
3328 config
.num_peers
= __cpu_to_le32(TARGET_10X_NUM_PEERS
);
3329 config
.num_peer_keys
= __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS
);
3330 config
.num_tids
= __cpu_to_le32(TARGET_10X_NUM_TIDS
);
3331 config
.ast_skid_limit
= __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT
);
3332 config
.tx_chain_mask
= __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK
);
3333 config
.rx_chain_mask
= __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK
);
3334 config
.rx_timeout_pri_vo
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI
);
3335 config
.rx_timeout_pri_vi
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI
);
3336 config
.rx_timeout_pri_be
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI
);
3337 config
.rx_timeout_pri_bk
= __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI
);
3338 config
.rx_decap_mode
= __cpu_to_le32(TARGET_10X_RX_DECAP_MODE
);
3340 config
.scan_max_pending_reqs
=
3341 __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS
);
3343 config
.bmiss_offload_max_vdev
=
3344 __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV
);
3346 config
.roam_offload_max_vdev
=
3347 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV
);
3349 config
.roam_offload_max_ap_profiles
=
3350 __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES
);
3352 config
.num_mcast_groups
= __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS
);
3353 config
.num_mcast_table_elems
=
3354 __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS
);
3356 config
.mcast2ucast_mode
= __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE
);
3357 config
.tx_dbg_log_size
= __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE
);
3358 config
.num_wds_entries
= __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES
);
3359 config
.dma_burst_size
= __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE
);
3360 config
.mac_aggr_delim
= __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM
);
3362 val
= TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK
;
3363 config
.rx_skip_defrag_timeout_dup_detection_check
= __cpu_to_le32(val
);
3365 config
.vow_config
= __cpu_to_le32(TARGET_10X_VOW_CONFIG
);
3367 config
.num_msdu_desc
= __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC
);
3368 config
.max_frag_entries
= __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES
);
3370 len
= sizeof(*cmd
) +
3371 (sizeof(struct host_memory_chunk
) * ar
->wmi
.num_mem_chunks
);
3373 buf
= ath10k_wmi_alloc_skb(ar
, len
);
3377 cmd
= (struct wmi_init_cmd_10_2
*)buf
->data
;
3379 memcpy(&cmd
->resource_config
.common
, &config
, sizeof(config
));
3380 ath10k_wmi_put_host_mem_chunks(ar
, &cmd
->mem_chunks
);
3382 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi init 10.2\n");
3383 return ath10k_wmi_cmd_send(ar
, buf
, ar
->wmi
.cmd
->init_cmdid
);
3386 int ath10k_wmi_cmd_init(struct ath10k
*ar
)
3390 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
3391 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2
, ar
->fw_features
))
3392 ret
= ath10k_wmi_10_2_cmd_init(ar
);
3394 ret
= ath10k_wmi_10x_cmd_init(ar
);
3396 ret
= ath10k_wmi_main_cmd_init(ar
);
3402 static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg
*arg
)
3404 if (arg
->ie_len
&& !arg
->ie
)
3406 if (arg
->n_channels
&& !arg
->channels
)
3408 if (arg
->n_ssids
&& !arg
->ssids
)
3410 if (arg
->n_bssids
&& !arg
->bssids
)
3413 if (arg
->ie_len
> WLAN_SCAN_PARAMS_MAX_IE_LEN
)
3415 if (arg
->n_channels
> ARRAY_SIZE(arg
->channels
))
3417 if (arg
->n_ssids
> WLAN_SCAN_PARAMS_MAX_SSID
)
3419 if (arg
->n_bssids
> WLAN_SCAN_PARAMS_MAX_BSSID
)
3426 ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg
*arg
)
3431 len
+= sizeof(struct wmi_ie_data
);
3432 len
+= roundup(arg
->ie_len
, 4);
3435 if (arg
->n_channels
) {
3436 len
+= sizeof(struct wmi_chan_list
);
3437 len
+= sizeof(__le32
) * arg
->n_channels
;
3441 len
+= sizeof(struct wmi_ssid_list
);
3442 len
+= sizeof(struct wmi_ssid
) * arg
->n_ssids
;
3445 if (arg
->n_bssids
) {
3446 len
+= sizeof(struct wmi_bssid_list
);
3447 len
+= sizeof(struct wmi_mac_addr
) * arg
->n_bssids
;
3454 ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common
*cmn
,
3455 const struct wmi_start_scan_arg
*arg
)
3460 scan_id
= WMI_HOST_SCAN_REQ_ID_PREFIX
;
3461 scan_id
|= arg
->scan_id
;
3463 scan_req_id
= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX
;
3464 scan_req_id
|= arg
->scan_req_id
;
3466 cmn
->scan_id
= __cpu_to_le32(scan_id
);
3467 cmn
->scan_req_id
= __cpu_to_le32(scan_req_id
);
3468 cmn
->vdev_id
= __cpu_to_le32(arg
->vdev_id
);
3469 cmn
->scan_priority
= __cpu_to_le32(arg
->scan_priority
);
3470 cmn
->notify_scan_events
= __cpu_to_le32(arg
->notify_scan_events
);
3471 cmn
->dwell_time_active
= __cpu_to_le32(arg
->dwell_time_active
);
3472 cmn
->dwell_time_passive
= __cpu_to_le32(arg
->dwell_time_passive
);
3473 cmn
->min_rest_time
= __cpu_to_le32(arg
->min_rest_time
);
3474 cmn
->max_rest_time
= __cpu_to_le32(arg
->max_rest_time
);
3475 cmn
->repeat_probe_time
= __cpu_to_le32(arg
->repeat_probe_time
);
3476 cmn
->probe_spacing_time
= __cpu_to_le32(arg
->probe_spacing_time
);
3477 cmn
->idle_time
= __cpu_to_le32(arg
->idle_time
);
3478 cmn
->max_scan_time
= __cpu_to_le32(arg
->max_scan_time
);
3479 cmn
->probe_delay
= __cpu_to_le32(arg
->probe_delay
);
3480 cmn
->scan_ctrl_flags
= __cpu_to_le32(arg
->scan_ctrl_flags
);
3484 ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs
*tlvs
,
3485 const struct wmi_start_scan_arg
*arg
)
3487 struct wmi_ie_data
*ie
;
3488 struct wmi_chan_list
*channels
;
3489 struct wmi_ssid_list
*ssids
;
3490 struct wmi_bssid_list
*bssids
;
3491 void *ptr
= tlvs
->tlvs
;
3494 if (arg
->n_channels
) {
3496 channels
->tag
= __cpu_to_le32(WMI_CHAN_LIST_TAG
);
3497 channels
->num_chan
= __cpu_to_le32(arg
->n_channels
);
3499 for (i
= 0; i
< arg
->n_channels
; i
++)
3500 channels
->channel_list
[i
].freq
=
3501 __cpu_to_le16(arg
->channels
[i
]);
3503 ptr
+= sizeof(*channels
);
3504 ptr
+= sizeof(__le32
) * arg
->n_channels
;
3509 ssids
->tag
= __cpu_to_le32(WMI_SSID_LIST_TAG
);
3510 ssids
->num_ssids
= __cpu_to_le32(arg
->n_ssids
);
3512 for (i
= 0; i
< arg
->n_ssids
; i
++) {
3513 ssids
->ssids
[i
].ssid_len
=
3514 __cpu_to_le32(arg
->ssids
[i
].len
);
3515 memcpy(&ssids
->ssids
[i
].ssid
,
3520 ptr
+= sizeof(*ssids
);
3521 ptr
+= sizeof(struct wmi_ssid
) * arg
->n_ssids
;
3524 if (arg
->n_bssids
) {
3526 bssids
->tag
= __cpu_to_le32(WMI_BSSID_LIST_TAG
);
3527 bssids
->num_bssid
= __cpu_to_le32(arg
->n_bssids
);
3529 for (i
= 0; i
< arg
->n_bssids
; i
++)
3530 memcpy(&bssids
->bssid_list
[i
],
3531 arg
->bssids
[i
].bssid
,
3534 ptr
+= sizeof(*bssids
);
3535 ptr
+= sizeof(struct wmi_mac_addr
) * arg
->n_bssids
;
3540 ie
->tag
= __cpu_to_le32(WMI_IE_TAG
);
3541 ie
->ie_len
= __cpu_to_le32(arg
->ie_len
);
3542 memcpy(ie
->ie_data
, arg
->ie
, arg
->ie_len
);
3545 ptr
+= roundup(arg
->ie_len
, 4);
3549 int ath10k_wmi_start_scan(struct ath10k
*ar
,
3550 const struct wmi_start_scan_arg
*arg
)
3552 struct sk_buff
*skb
;
3556 ret
= ath10k_wmi_start_scan_verify(arg
);
3560 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
))
3561 len
= sizeof(struct wmi_10x_start_scan_cmd
) +
3562 ath10k_wmi_start_scan_tlvs_len(arg
);
3564 len
= sizeof(struct wmi_start_scan_cmd
) +
3565 ath10k_wmi_start_scan_tlvs_len(arg
);
3567 skb
= ath10k_wmi_alloc_skb(ar
, len
);
3571 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
3572 struct wmi_10x_start_scan_cmd
*cmd
;
3574 cmd
= (struct wmi_10x_start_scan_cmd
*)skb
->data
;
3575 ath10k_wmi_put_start_scan_common(&cmd
->common
, arg
);
3576 ath10k_wmi_put_start_scan_tlvs(&cmd
->tlvs
, arg
);
3578 struct wmi_start_scan_cmd
*cmd
;
3580 cmd
= (struct wmi_start_scan_cmd
*)skb
->data
;
3581 cmd
->burst_duration_ms
= __cpu_to_le32(0);
3583 ath10k_wmi_put_start_scan_common(&cmd
->common
, arg
);
3584 ath10k_wmi_put_start_scan_tlvs(&cmd
->tlvs
, arg
);
3587 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi start scan\n");
3588 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->start_scan_cmdid
);
3591 void ath10k_wmi_start_scan_init(struct ath10k
*ar
,
3592 struct wmi_start_scan_arg
*arg
)
3594 /* setup commonly used values */
3595 arg
->scan_req_id
= 1;
3596 arg
->scan_priority
= WMI_SCAN_PRIORITY_LOW
;
3597 arg
->dwell_time_active
= 50;
3598 arg
->dwell_time_passive
= 150;
3599 arg
->min_rest_time
= 50;
3600 arg
->max_rest_time
= 500;
3601 arg
->repeat_probe_time
= 0;
3602 arg
->probe_spacing_time
= 0;
3604 arg
->max_scan_time
= 20000;
3605 arg
->probe_delay
= 5;
3606 arg
->notify_scan_events
= WMI_SCAN_EVENT_STARTED
3607 | WMI_SCAN_EVENT_COMPLETED
3608 | WMI_SCAN_EVENT_BSS_CHANNEL
3609 | WMI_SCAN_EVENT_FOREIGN_CHANNEL
3610 | WMI_SCAN_EVENT_DEQUEUED
;
3611 arg
->scan_ctrl_flags
|= WMI_SCAN_ADD_OFDM_RATES
;
3612 arg
->scan_ctrl_flags
|= WMI_SCAN_CHAN_STAT_EVENT
;
3614 arg
->bssids
[0].bssid
= "\xFF\xFF\xFF\xFF\xFF\xFF";
3617 int ath10k_wmi_stop_scan(struct ath10k
*ar
, const struct wmi_stop_scan_arg
*arg
)
3619 struct wmi_stop_scan_cmd
*cmd
;
3620 struct sk_buff
*skb
;
3624 if (arg
->req_id
> 0xFFF)
3626 if (arg
->req_type
== WMI_SCAN_STOP_ONE
&& arg
->u
.scan_id
> 0xFFF)
3629 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3633 scan_id
= arg
->u
.scan_id
;
3634 scan_id
|= WMI_HOST_SCAN_REQ_ID_PREFIX
;
3636 req_id
= arg
->req_id
;
3637 req_id
|= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX
;
3639 cmd
= (struct wmi_stop_scan_cmd
*)skb
->data
;
3640 cmd
->req_type
= __cpu_to_le32(arg
->req_type
);
3641 cmd
->vdev_id
= __cpu_to_le32(arg
->u
.vdev_id
);
3642 cmd
->scan_id
= __cpu_to_le32(scan_id
);
3643 cmd
->scan_req_id
= __cpu_to_le32(req_id
);
3645 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3646 "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
3647 arg
->req_id
, arg
->req_type
, arg
->u
.scan_id
);
3648 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->stop_scan_cmdid
);
3651 int ath10k_wmi_vdev_create(struct ath10k
*ar
, u32 vdev_id
,
3652 enum wmi_vdev_type type
,
3653 enum wmi_vdev_subtype subtype
,
3654 const u8 macaddr
[ETH_ALEN
])
3656 struct wmi_vdev_create_cmd
*cmd
;
3657 struct sk_buff
*skb
;
3659 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3663 cmd
= (struct wmi_vdev_create_cmd
*)skb
->data
;
3664 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3665 cmd
->vdev_type
= __cpu_to_le32(type
);
3666 cmd
->vdev_subtype
= __cpu_to_le32(subtype
);
3667 ether_addr_copy(cmd
->vdev_macaddr
.addr
, macaddr
);
3669 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3670 "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
3671 vdev_id
, type
, subtype
, macaddr
);
3673 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_create_cmdid
);
3676 int ath10k_wmi_vdev_delete(struct ath10k
*ar
, u32 vdev_id
)
3678 struct wmi_vdev_delete_cmd
*cmd
;
3679 struct sk_buff
*skb
;
3681 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3685 cmd
= (struct wmi_vdev_delete_cmd
*)skb
->data
;
3686 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3688 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3689 "WMI vdev delete id %d\n", vdev_id
);
3691 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_delete_cmdid
);
3695 ath10k_wmi_vdev_start_restart(struct ath10k
*ar
,
3696 const struct wmi_vdev_start_request_arg
*arg
,
3699 struct wmi_vdev_start_request_cmd
*cmd
;
3700 struct sk_buff
*skb
;
3701 const char *cmdname
;
3704 if (cmd_id
!= ar
->wmi
.cmd
->vdev_start_request_cmdid
&&
3705 cmd_id
!= ar
->wmi
.cmd
->vdev_restart_request_cmdid
)
3707 if (WARN_ON(arg
->ssid
&& arg
->ssid_len
== 0))
3709 if (WARN_ON(arg
->hidden_ssid
&& !arg
->ssid
))
3711 if (WARN_ON(arg
->ssid_len
> sizeof(cmd
->ssid
.ssid
)))
3714 if (cmd_id
== ar
->wmi
.cmd
->vdev_start_request_cmdid
)
3716 else if (cmd_id
== ar
->wmi
.cmd
->vdev_restart_request_cmdid
)
3717 cmdname
= "restart";
3719 return -EINVAL
; /* should not happen, we already check cmd_id */
3721 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3725 if (arg
->hidden_ssid
)
3726 flags
|= WMI_VDEV_START_HIDDEN_SSID
;
3727 if (arg
->pmf_enabled
)
3728 flags
|= WMI_VDEV_START_PMF_ENABLED
;
3730 cmd
= (struct wmi_vdev_start_request_cmd
*)skb
->data
;
3731 cmd
->vdev_id
= __cpu_to_le32(arg
->vdev_id
);
3732 cmd
->disable_hw_ack
= __cpu_to_le32(arg
->disable_hw_ack
);
3733 cmd
->beacon_interval
= __cpu_to_le32(arg
->bcn_intval
);
3734 cmd
->dtim_period
= __cpu_to_le32(arg
->dtim_period
);
3735 cmd
->flags
= __cpu_to_le32(flags
);
3736 cmd
->bcn_tx_rate
= __cpu_to_le32(arg
->bcn_tx_rate
);
3737 cmd
->bcn_tx_power
= __cpu_to_le32(arg
->bcn_tx_power
);
3740 cmd
->ssid
.ssid_len
= __cpu_to_le32(arg
->ssid_len
);
3741 memcpy(cmd
->ssid
.ssid
, arg
->ssid
, arg
->ssid_len
);
3744 ath10k_wmi_put_wmi_channel(&cmd
->chan
, &arg
->channel
);
3746 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3747 "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
3748 cmdname
, arg
->vdev_id
,
3749 flags
, arg
->channel
.freq
, arg
->channel
.mode
,
3750 cmd
->chan
.flags
, arg
->channel
.max_power
);
3752 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
3755 int ath10k_wmi_vdev_start(struct ath10k
*ar
,
3756 const struct wmi_vdev_start_request_arg
*arg
)
3758 u32 cmd_id
= ar
->wmi
.cmd
->vdev_start_request_cmdid
;
3760 return ath10k_wmi_vdev_start_restart(ar
, arg
, cmd_id
);
3763 int ath10k_wmi_vdev_restart(struct ath10k
*ar
,
3764 const struct wmi_vdev_start_request_arg
*arg
)
3766 u32 cmd_id
= ar
->wmi
.cmd
->vdev_restart_request_cmdid
;
3768 return ath10k_wmi_vdev_start_restart(ar
, arg
, cmd_id
);
3771 int ath10k_wmi_vdev_stop(struct ath10k
*ar
, u32 vdev_id
)
3773 struct wmi_vdev_stop_cmd
*cmd
;
3774 struct sk_buff
*skb
;
3776 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3780 cmd
= (struct wmi_vdev_stop_cmd
*)skb
->data
;
3781 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3783 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi vdev stop id 0x%x\n", vdev_id
);
3785 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_stop_cmdid
);
3788 int ath10k_wmi_vdev_up(struct ath10k
*ar
, u32 vdev_id
, u32 aid
, const u8
*bssid
)
3790 struct wmi_vdev_up_cmd
*cmd
;
3791 struct sk_buff
*skb
;
3793 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3797 cmd
= (struct wmi_vdev_up_cmd
*)skb
->data
;
3798 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3799 cmd
->vdev_assoc_id
= __cpu_to_le32(aid
);
3800 ether_addr_copy(cmd
->vdev_bssid
.addr
, bssid
);
3802 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3803 "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
3804 vdev_id
, aid
, bssid
);
3806 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_up_cmdid
);
3809 int ath10k_wmi_vdev_down(struct ath10k
*ar
, u32 vdev_id
)
3811 struct wmi_vdev_down_cmd
*cmd
;
3812 struct sk_buff
*skb
;
3814 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3818 cmd
= (struct wmi_vdev_down_cmd
*)skb
->data
;
3819 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3821 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3822 "wmi mgmt vdev down id 0x%x\n", vdev_id
);
3824 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_down_cmdid
);
3827 int ath10k_wmi_vdev_set_param(struct ath10k
*ar
, u32 vdev_id
,
3828 u32 param_id
, u32 param_value
)
3830 struct wmi_vdev_set_param_cmd
*cmd
;
3831 struct sk_buff
*skb
;
3833 if (param_id
== WMI_VDEV_PARAM_UNSUPPORTED
) {
3834 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3835 "vdev param %d not supported by firmware\n",
3840 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3844 cmd
= (struct wmi_vdev_set_param_cmd
*)skb
->data
;
3845 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3846 cmd
->param_id
= __cpu_to_le32(param_id
);
3847 cmd
->param_value
= __cpu_to_le32(param_value
);
3849 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3850 "wmi vdev id 0x%x set param %d value %d\n",
3851 vdev_id
, param_id
, param_value
);
3853 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_set_param_cmdid
);
3856 int ath10k_wmi_vdev_install_key(struct ath10k
*ar
,
3857 const struct wmi_vdev_install_key_arg
*arg
)
3859 struct wmi_vdev_install_key_cmd
*cmd
;
3860 struct sk_buff
*skb
;
3862 if (arg
->key_cipher
== WMI_CIPHER_NONE
&& arg
->key_data
!= NULL
)
3864 if (arg
->key_cipher
!= WMI_CIPHER_NONE
&& arg
->key_data
== NULL
)
3867 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
) + arg
->key_len
);
3871 cmd
= (struct wmi_vdev_install_key_cmd
*)skb
->data
;
3872 cmd
->vdev_id
= __cpu_to_le32(arg
->vdev_id
);
3873 cmd
->key_idx
= __cpu_to_le32(arg
->key_idx
);
3874 cmd
->key_flags
= __cpu_to_le32(arg
->key_flags
);
3875 cmd
->key_cipher
= __cpu_to_le32(arg
->key_cipher
);
3876 cmd
->key_len
= __cpu_to_le32(arg
->key_len
);
3877 cmd
->key_txmic_len
= __cpu_to_le32(arg
->key_txmic_len
);
3878 cmd
->key_rxmic_len
= __cpu_to_le32(arg
->key_rxmic_len
);
3881 ether_addr_copy(cmd
->peer_macaddr
.addr
, arg
->macaddr
);
3883 memcpy(cmd
->key_data
, arg
->key_data
, arg
->key_len
);
3885 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3886 "wmi vdev install key idx %d cipher %d len %d\n",
3887 arg
->key_idx
, arg
->key_cipher
, arg
->key_len
);
3888 return ath10k_wmi_cmd_send(ar
, skb
,
3889 ar
->wmi
.cmd
->vdev_install_key_cmdid
);
3892 int ath10k_wmi_vdev_spectral_conf(struct ath10k
*ar
,
3893 const struct wmi_vdev_spectral_conf_arg
*arg
)
3895 struct wmi_vdev_spectral_conf_cmd
*cmd
;
3896 struct sk_buff
*skb
;
3899 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3903 cmd
= (struct wmi_vdev_spectral_conf_cmd
*)skb
->data
;
3904 cmd
->vdev_id
= __cpu_to_le32(arg
->vdev_id
);
3905 cmd
->scan_count
= __cpu_to_le32(arg
->scan_count
);
3906 cmd
->scan_period
= __cpu_to_le32(arg
->scan_period
);
3907 cmd
->scan_priority
= __cpu_to_le32(arg
->scan_priority
);
3908 cmd
->scan_fft_size
= __cpu_to_le32(arg
->scan_fft_size
);
3909 cmd
->scan_gc_ena
= __cpu_to_le32(arg
->scan_gc_ena
);
3910 cmd
->scan_restart_ena
= __cpu_to_le32(arg
->scan_restart_ena
);
3911 cmd
->scan_noise_floor_ref
= __cpu_to_le32(arg
->scan_noise_floor_ref
);
3912 cmd
->scan_init_delay
= __cpu_to_le32(arg
->scan_init_delay
);
3913 cmd
->scan_nb_tone_thr
= __cpu_to_le32(arg
->scan_nb_tone_thr
);
3914 cmd
->scan_str_bin_thr
= __cpu_to_le32(arg
->scan_str_bin_thr
);
3915 cmd
->scan_wb_rpt_mode
= __cpu_to_le32(arg
->scan_wb_rpt_mode
);
3916 cmd
->scan_rssi_rpt_mode
= __cpu_to_le32(arg
->scan_rssi_rpt_mode
);
3917 cmd
->scan_rssi_thr
= __cpu_to_le32(arg
->scan_rssi_thr
);
3918 cmd
->scan_pwr_format
= __cpu_to_le32(arg
->scan_pwr_format
);
3919 cmd
->scan_rpt_mode
= __cpu_to_le32(arg
->scan_rpt_mode
);
3920 cmd
->scan_bin_scale
= __cpu_to_le32(arg
->scan_bin_scale
);
3921 cmd
->scan_dbm_adj
= __cpu_to_le32(arg
->scan_dbm_adj
);
3922 cmd
->scan_chn_mask
= __cpu_to_le32(arg
->scan_chn_mask
);
3924 cmdid
= ar
->wmi
.cmd
->vdev_spectral_scan_configure_cmdid
;
3925 return ath10k_wmi_cmd_send(ar
, skb
, cmdid
);
3928 int ath10k_wmi_vdev_spectral_enable(struct ath10k
*ar
, u32 vdev_id
, u32 trigger
,
3931 struct wmi_vdev_spectral_enable_cmd
*cmd
;
3932 struct sk_buff
*skb
;
3935 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3939 cmd
= (struct wmi_vdev_spectral_enable_cmd
*)skb
->data
;
3940 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3941 cmd
->trigger_cmd
= __cpu_to_le32(trigger
);
3942 cmd
->enable_cmd
= __cpu_to_le32(enable
);
3944 cmdid
= ar
->wmi
.cmd
->vdev_spectral_scan_enable_cmdid
;
3945 return ath10k_wmi_cmd_send(ar
, skb
, cmdid
);
3948 int ath10k_wmi_peer_create(struct ath10k
*ar
, u32 vdev_id
,
3949 const u8 peer_addr
[ETH_ALEN
])
3951 struct wmi_peer_create_cmd
*cmd
;
3952 struct sk_buff
*skb
;
3954 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3958 cmd
= (struct wmi_peer_create_cmd
*)skb
->data
;
3959 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3960 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
3962 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3963 "wmi peer create vdev_id %d peer_addr %pM\n",
3964 vdev_id
, peer_addr
);
3965 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_create_cmdid
);
3968 int ath10k_wmi_peer_delete(struct ath10k
*ar
, u32 vdev_id
,
3969 const u8 peer_addr
[ETH_ALEN
])
3971 struct wmi_peer_delete_cmd
*cmd
;
3972 struct sk_buff
*skb
;
3974 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3978 cmd
= (struct wmi_peer_delete_cmd
*)skb
->data
;
3979 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
3980 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
3982 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
3983 "wmi peer delete vdev_id %d peer_addr %pM\n",
3984 vdev_id
, peer_addr
);
3985 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_delete_cmdid
);
3988 int ath10k_wmi_peer_flush(struct ath10k
*ar
, u32 vdev_id
,
3989 const u8 peer_addr
[ETH_ALEN
], u32 tid_bitmap
)
3991 struct wmi_peer_flush_tids_cmd
*cmd
;
3992 struct sk_buff
*skb
;
3994 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
3998 cmd
= (struct wmi_peer_flush_tids_cmd
*)skb
->data
;
3999 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
4000 cmd
->peer_tid_bitmap
= __cpu_to_le32(tid_bitmap
);
4001 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
4003 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4004 "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
4005 vdev_id
, peer_addr
, tid_bitmap
);
4006 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_flush_tids_cmdid
);
4009 int ath10k_wmi_peer_set_param(struct ath10k
*ar
, u32 vdev_id
,
4010 const u8
*peer_addr
, enum wmi_peer_param param_id
,
4013 struct wmi_peer_set_param_cmd
*cmd
;
4014 struct sk_buff
*skb
;
4016 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4020 cmd
= (struct wmi_peer_set_param_cmd
*)skb
->data
;
4021 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
4022 cmd
->param_id
= __cpu_to_le32(param_id
);
4023 cmd
->param_value
= __cpu_to_le32(param_value
);
4024 ether_addr_copy(cmd
->peer_macaddr
.addr
, peer_addr
);
4026 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4027 "wmi vdev %d peer 0x%pM set param %d value %d\n",
4028 vdev_id
, peer_addr
, param_id
, param_value
);
4030 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_set_param_cmdid
);
4033 int ath10k_wmi_set_psmode(struct ath10k
*ar
, u32 vdev_id
,
4034 enum wmi_sta_ps_mode psmode
)
4036 struct wmi_sta_powersave_mode_cmd
*cmd
;
4037 struct sk_buff
*skb
;
4039 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4043 cmd
= (struct wmi_sta_powersave_mode_cmd
*)skb
->data
;
4044 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
4045 cmd
->sta_ps_mode
= __cpu_to_le32(psmode
);
4047 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4048 "wmi set powersave id 0x%x mode %d\n",
4051 return ath10k_wmi_cmd_send(ar
, skb
,
4052 ar
->wmi
.cmd
->sta_powersave_mode_cmdid
);
4055 int ath10k_wmi_set_sta_ps_param(struct ath10k
*ar
, u32 vdev_id
,
4056 enum wmi_sta_powersave_param param_id
,
4059 struct wmi_sta_powersave_param_cmd
*cmd
;
4060 struct sk_buff
*skb
;
4062 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4066 cmd
= (struct wmi_sta_powersave_param_cmd
*)skb
->data
;
4067 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
4068 cmd
->param_id
= __cpu_to_le32(param_id
);
4069 cmd
->param_value
= __cpu_to_le32(value
);
4071 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4072 "wmi sta ps param vdev_id 0x%x param %d value %d\n",
4073 vdev_id
, param_id
, value
);
4074 return ath10k_wmi_cmd_send(ar
, skb
,
4075 ar
->wmi
.cmd
->sta_powersave_param_cmdid
);
4078 int ath10k_wmi_set_ap_ps_param(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
4079 enum wmi_ap_ps_peer_param param_id
, u32 value
)
4081 struct wmi_ap_ps_peer_cmd
*cmd
;
4082 struct sk_buff
*skb
;
4087 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4091 cmd
= (struct wmi_ap_ps_peer_cmd
*)skb
->data
;
4092 cmd
->vdev_id
= __cpu_to_le32(vdev_id
);
4093 cmd
->param_id
= __cpu_to_le32(param_id
);
4094 cmd
->param_value
= __cpu_to_le32(value
);
4095 ether_addr_copy(cmd
->peer_macaddr
.addr
, mac
);
4097 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4098 "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
4099 vdev_id
, param_id
, value
, mac
);
4101 return ath10k_wmi_cmd_send(ar
, skb
,
4102 ar
->wmi
.cmd
->ap_ps_peer_param_cmdid
);
4105 int ath10k_wmi_scan_chan_list(struct ath10k
*ar
,
4106 const struct wmi_scan_chan_list_arg
*arg
)
4108 struct wmi_scan_chan_list_cmd
*cmd
;
4109 struct sk_buff
*skb
;
4110 struct wmi_channel_arg
*ch
;
4111 struct wmi_channel
*ci
;
4115 len
= sizeof(*cmd
) + arg
->n_channels
* sizeof(struct wmi_channel
);
4117 skb
= ath10k_wmi_alloc_skb(ar
, len
);
4121 cmd
= (struct wmi_scan_chan_list_cmd
*)skb
->data
;
4122 cmd
->num_scan_chans
= __cpu_to_le32(arg
->n_channels
);
4124 for (i
= 0; i
< arg
->n_channels
; i
++) {
4125 ch
= &arg
->channels
[i
];
4126 ci
= &cmd
->chan_info
[i
];
4128 ath10k_wmi_put_wmi_channel(ci
, ch
);
4131 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->scan_chan_list_cmdid
);
4135 ath10k_wmi_peer_assoc_fill(struct ath10k
*ar
, void *buf
,
4136 const struct wmi_peer_assoc_complete_arg
*arg
)
4138 struct wmi_common_peer_assoc_complete_cmd
*cmd
= buf
;
4140 cmd
->vdev_id
= __cpu_to_le32(arg
->vdev_id
);
4141 cmd
->peer_new_assoc
= __cpu_to_le32(arg
->peer_reassoc
? 0 : 1);
4142 cmd
->peer_associd
= __cpu_to_le32(arg
->peer_aid
);
4143 cmd
->peer_flags
= __cpu_to_le32(arg
->peer_flags
);
4144 cmd
->peer_caps
= __cpu_to_le32(arg
->peer_caps
);
4145 cmd
->peer_listen_intval
= __cpu_to_le32(arg
->peer_listen_intval
);
4146 cmd
->peer_ht_caps
= __cpu_to_le32(arg
->peer_ht_caps
);
4147 cmd
->peer_max_mpdu
= __cpu_to_le32(arg
->peer_max_mpdu
);
4148 cmd
->peer_mpdu_density
= __cpu_to_le32(arg
->peer_mpdu_density
);
4149 cmd
->peer_rate_caps
= __cpu_to_le32(arg
->peer_rate_caps
);
4150 cmd
->peer_nss
= __cpu_to_le32(arg
->peer_num_spatial_streams
);
4151 cmd
->peer_vht_caps
= __cpu_to_le32(arg
->peer_vht_caps
);
4152 cmd
->peer_phymode
= __cpu_to_le32(arg
->peer_phymode
);
4154 ether_addr_copy(cmd
->peer_macaddr
.addr
, arg
->addr
);
4156 cmd
->peer_legacy_rates
.num_rates
=
4157 __cpu_to_le32(arg
->peer_legacy_rates
.num_rates
);
4158 memcpy(cmd
->peer_legacy_rates
.rates
, arg
->peer_legacy_rates
.rates
,
4159 arg
->peer_legacy_rates
.num_rates
);
4161 cmd
->peer_ht_rates
.num_rates
=
4162 __cpu_to_le32(arg
->peer_ht_rates
.num_rates
);
4163 memcpy(cmd
->peer_ht_rates
.rates
, arg
->peer_ht_rates
.rates
,
4164 arg
->peer_ht_rates
.num_rates
);
4166 cmd
->peer_vht_rates
.rx_max_rate
=
4167 __cpu_to_le32(arg
->peer_vht_rates
.rx_max_rate
);
4168 cmd
->peer_vht_rates
.rx_mcs_set
=
4169 __cpu_to_le32(arg
->peer_vht_rates
.rx_mcs_set
);
4170 cmd
->peer_vht_rates
.tx_max_rate
=
4171 __cpu_to_le32(arg
->peer_vht_rates
.tx_max_rate
);
4172 cmd
->peer_vht_rates
.tx_mcs_set
=
4173 __cpu_to_le32(arg
->peer_vht_rates
.tx_mcs_set
);
4177 ath10k_wmi_peer_assoc_fill_main(struct ath10k
*ar
, void *buf
,
4178 const struct wmi_peer_assoc_complete_arg
*arg
)
4180 struct wmi_main_peer_assoc_complete_cmd
*cmd
= buf
;
4182 ath10k_wmi_peer_assoc_fill(ar
, buf
, arg
);
4183 memset(cmd
->peer_ht_info
, 0, sizeof(cmd
->peer_ht_info
));
4187 ath10k_wmi_peer_assoc_fill_10_1(struct ath10k
*ar
, void *buf
,
4188 const struct wmi_peer_assoc_complete_arg
*arg
)
4190 ath10k_wmi_peer_assoc_fill(ar
, buf
, arg
);
4194 ath10k_wmi_peer_assoc_fill_10_2(struct ath10k
*ar
, void *buf
,
4195 const struct wmi_peer_assoc_complete_arg
*arg
)
4197 struct wmi_10_2_peer_assoc_complete_cmd
*cmd
= buf
;
4198 int max_mcs
, max_nss
;
4201 /* TODO: Is using max values okay with firmware? */
4205 info0
= SM(max_mcs
, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX
) |
4206 SM(max_nss
, WMI_PEER_ASSOC_INFO0_MAX_NSS
);
4208 ath10k_wmi_peer_assoc_fill(ar
, buf
, arg
);
4209 cmd
->info0
= __cpu_to_le32(info0
);
4212 int ath10k_wmi_peer_assoc(struct ath10k
*ar
,
4213 const struct wmi_peer_assoc_complete_arg
*arg
)
4215 struct sk_buff
*skb
;
4218 if (arg
->peer_mpdu_density
> 16)
4220 if (arg
->peer_legacy_rates
.num_rates
> MAX_SUPPORTED_RATES
)
4222 if (arg
->peer_ht_rates
.num_rates
> MAX_SUPPORTED_RATES
)
4225 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
4226 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2
, ar
->fw_features
))
4227 len
= sizeof(struct wmi_10_2_peer_assoc_complete_cmd
);
4229 len
= sizeof(struct wmi_10_1_peer_assoc_complete_cmd
);
4231 len
= sizeof(struct wmi_main_peer_assoc_complete_cmd
);
4234 skb
= ath10k_wmi_alloc_skb(ar
, len
);
4238 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
4239 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2
, ar
->fw_features
))
4240 ath10k_wmi_peer_assoc_fill_10_2(ar
, skb
->data
, arg
);
4242 ath10k_wmi_peer_assoc_fill_10_1(ar
, skb
->data
, arg
);
4244 ath10k_wmi_peer_assoc_fill_main(ar
, skb
->data
, arg
);
4247 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4248 "wmi peer assoc vdev %d addr %pM (%s)\n",
4249 arg
->vdev_id
, arg
->addr
,
4250 arg
->peer_reassoc
? "reassociate" : "new");
4251 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_assoc_cmdid
);
4254 /* This function assumes the beacon is already DMA mapped */
4255 int ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif
*arvif
)
4257 struct wmi_bcn_tx_ref_cmd
*cmd
;
4258 struct sk_buff
*skb
;
4259 struct sk_buff
*beacon
= arvif
->beacon
;
4260 struct ath10k
*ar
= arvif
->ar
;
4261 struct ieee80211_hdr
*hdr
;
4265 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4269 hdr
= (struct ieee80211_hdr
*)beacon
->data
;
4270 fc
= le16_to_cpu(hdr
->frame_control
);
4272 cmd
= (struct wmi_bcn_tx_ref_cmd
*)skb
->data
;
4273 cmd
->vdev_id
= __cpu_to_le32(arvif
->vdev_id
);
4274 cmd
->data_len
= __cpu_to_le32(beacon
->len
);
4275 cmd
->data_ptr
= __cpu_to_le32(ATH10K_SKB_CB(beacon
)->paddr
);
4277 cmd
->frame_control
= __cpu_to_le32(fc
);
4279 cmd
->antenna_mask
= __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA
);
4281 if (ATH10K_SKB_CB(beacon
)->bcn
.dtim_zero
)
4282 cmd
->flags
|= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO
);
4284 if (ATH10K_SKB_CB(beacon
)->bcn
.deliver_cab
)
4285 cmd
->flags
|= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB
);
4287 ret
= ath10k_wmi_cmd_send_nowait(ar
, skb
,
4288 ar
->wmi
.cmd
->pdev_send_bcn_cmdid
);
4296 static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params
*params
,
4297 const struct wmi_wmm_params_arg
*arg
)
4299 params
->cwmin
= __cpu_to_le32(arg
->cwmin
);
4300 params
->cwmax
= __cpu_to_le32(arg
->cwmax
);
4301 params
->aifs
= __cpu_to_le32(arg
->aifs
);
4302 params
->txop
= __cpu_to_le32(arg
->txop
);
4303 params
->acm
= __cpu_to_le32(arg
->acm
);
4304 params
->no_ack
= __cpu_to_le32(arg
->no_ack
);
4307 int ath10k_wmi_pdev_set_wmm_params(struct ath10k
*ar
,
4308 const struct wmi_pdev_set_wmm_params_arg
*arg
)
4310 struct wmi_pdev_set_wmm_params
*cmd
;
4311 struct sk_buff
*skb
;
4313 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4317 cmd
= (struct wmi_pdev_set_wmm_params
*)skb
->data
;
4318 ath10k_wmi_pdev_set_wmm_param(&cmd
->ac_be
, &arg
->ac_be
);
4319 ath10k_wmi_pdev_set_wmm_param(&cmd
->ac_bk
, &arg
->ac_bk
);
4320 ath10k_wmi_pdev_set_wmm_param(&cmd
->ac_vi
, &arg
->ac_vi
);
4321 ath10k_wmi_pdev_set_wmm_param(&cmd
->ac_vo
, &arg
->ac_vo
);
4323 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi pdev set wmm params\n");
4324 return ath10k_wmi_cmd_send(ar
, skb
,
4325 ar
->wmi
.cmd
->pdev_set_wmm_params_cmdid
);
4328 int ath10k_wmi_request_stats(struct ath10k
*ar
, enum wmi_stats_id stats_id
)
4330 struct wmi_request_stats_cmd
*cmd
;
4331 struct sk_buff
*skb
;
4333 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4337 cmd
= (struct wmi_request_stats_cmd
*)skb
->data
;
4338 cmd
->stats_id
= __cpu_to_le32(stats_id
);
4340 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi request stats %d\n", (int)stats_id
);
4341 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->request_stats_cmdid
);
4344 int ath10k_wmi_force_fw_hang(struct ath10k
*ar
,
4345 enum wmi_force_fw_hang_type type
, u32 delay_ms
)
4347 struct wmi_force_fw_hang_cmd
*cmd
;
4348 struct sk_buff
*skb
;
4350 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4354 cmd
= (struct wmi_force_fw_hang_cmd
*)skb
->data
;
4355 cmd
->type
= __cpu_to_le32(type
);
4356 cmd
->delay_ms
= __cpu_to_le32(delay_ms
);
4358 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi force fw hang %d delay %d\n",
4360 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->force_fw_hang_cmdid
);
4363 int ath10k_wmi_dbglog_cfg(struct ath10k
*ar
, u32 module_enable
)
4365 struct wmi_dbglog_cfg_cmd
*cmd
;
4366 struct sk_buff
*skb
;
4369 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4373 cmd
= (struct wmi_dbglog_cfg_cmd
*)skb
->data
;
4375 if (module_enable
) {
4376 cfg
= SM(ATH10K_DBGLOG_LEVEL_VERBOSE
,
4377 ATH10K_DBGLOG_CFG_LOG_LVL
);
4379 /* set back defaults, all modules with WARN level */
4380 cfg
= SM(ATH10K_DBGLOG_LEVEL_WARN
,
4381 ATH10K_DBGLOG_CFG_LOG_LVL
);
4385 cmd
->module_enable
= __cpu_to_le32(module_enable
);
4386 cmd
->module_valid
= __cpu_to_le32(~0);
4387 cmd
->config_enable
= __cpu_to_le32(cfg
);
4388 cmd
->config_valid
= __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK
);
4390 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4391 "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
4392 __le32_to_cpu(cmd
->module_enable
),
4393 __le32_to_cpu(cmd
->module_valid
),
4394 __le32_to_cpu(cmd
->config_enable
),
4395 __le32_to_cpu(cmd
->config_valid
));
4397 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->dbglog_cfg_cmdid
);
4400 int ath10k_wmi_pdev_pktlog_enable(struct ath10k
*ar
, u32 ev_bitmap
)
4402 struct wmi_pdev_pktlog_enable_cmd
*cmd
;
4403 struct sk_buff
*skb
;
4405 skb
= ath10k_wmi_alloc_skb(ar
, sizeof(*cmd
));
4409 ev_bitmap
&= ATH10K_PKTLOG_ANY
;
4410 ath10k_dbg(ar
, ATH10K_DBG_WMI
,
4411 "wmi enable pktlog filter:%x\n", ev_bitmap
);
4413 cmd
= (struct wmi_pdev_pktlog_enable_cmd
*)skb
->data
;
4414 cmd
->ev_bitmap
= __cpu_to_le32(ev_bitmap
);
4415 return ath10k_wmi_cmd_send(ar
, skb
,
4416 ar
->wmi
.cmd
->pdev_pktlog_enable_cmdid
);
4419 int ath10k_wmi_pdev_pktlog_disable(struct ath10k
*ar
)
4421 struct sk_buff
*skb
;
4423 skb
= ath10k_wmi_alloc_skb(ar
, 0);
4427 ath10k_dbg(ar
, ATH10K_DBG_WMI
, "wmi disable pktlog\n");
4429 return ath10k_wmi_cmd_send(ar
, skb
,
4430 ar
->wmi
.cmd
->pdev_pktlog_disable_cmdid
);
4433 int ath10k_wmi_attach(struct ath10k
*ar
)
4435 if (test_bit(ATH10K_FW_FEATURE_WMI_10X
, ar
->fw_features
)) {
4436 if (test_bit(ATH10K_FW_FEATURE_WMI_10_2
, ar
->fw_features
))
4437 ar
->wmi
.cmd
= &wmi_10_2_cmd_map
;
4439 ar
->wmi
.cmd
= &wmi_10x_cmd_map
;
4441 ar
->wmi
.vdev_param
= &wmi_10x_vdev_param_map
;
4442 ar
->wmi
.pdev_param
= &wmi_10x_pdev_param_map
;
4444 ar
->wmi
.cmd
= &wmi_cmd_map
;
4445 ar
->wmi
.vdev_param
= &wmi_vdev_param_map
;
4446 ar
->wmi
.pdev_param
= &wmi_pdev_param_map
;
4449 init_completion(&ar
->wmi
.service_ready
);
4450 init_completion(&ar
->wmi
.unified_ready
);
4455 void ath10k_wmi_detach(struct ath10k
*ar
)
4459 /* free the host memory chunks requested by firmware */
4460 for (i
= 0; i
< ar
->wmi
.num_mem_chunks
; i
++) {
4461 dma_free_coherent(ar
->dev
,
4462 ar
->wmi
.mem_chunks
[i
].len
,
4463 ar
->wmi
.mem_chunks
[i
].vaddr
,
4464 ar
->wmi
.mem_chunks
[i
].paddr
);
4467 ar
->wmi
.num_mem_chunks
= 0;