1 /* SPDX-License-Identifier: ISC */
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
15 void (*rx
)(struct ath10k
*ar
, struct sk_buff
*skb
);
16 void (*map_svc
)(const __le32
*in
, unsigned long *out
, size_t len
);
17 void (*map_svc_ext
)(const __le32
*in
, unsigned long *out
, size_t len
);
19 int (*pull_scan
)(struct ath10k
*ar
, struct sk_buff
*skb
,
20 struct wmi_scan_ev_arg
*arg
);
21 int (*pull_mgmt_rx
)(struct ath10k
*ar
, struct sk_buff
*skb
,
22 struct wmi_mgmt_rx_ev_arg
*arg
);
23 int (*pull_mgmt_tx_compl
)(struct ath10k
*ar
, struct sk_buff
*skb
,
24 struct wmi_tlv_mgmt_tx_compl_ev_arg
*arg
);
25 int (*pull_mgmt_tx_bundle_compl
)(
26 struct ath10k
*ar
, struct sk_buff
*skb
,
27 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg
*arg
);
28 int (*pull_ch_info
)(struct ath10k
*ar
, struct sk_buff
*skb
,
29 struct wmi_ch_info_ev_arg
*arg
);
30 int (*pull_vdev_start
)(struct ath10k
*ar
, struct sk_buff
*skb
,
31 struct wmi_vdev_start_ev_arg
*arg
);
32 int (*pull_peer_kick
)(struct ath10k
*ar
, struct sk_buff
*skb
,
33 struct wmi_peer_kick_ev_arg
*arg
);
34 int (*pull_swba
)(struct ath10k
*ar
, struct sk_buff
*skb
,
35 struct wmi_swba_ev_arg
*arg
);
36 int (*pull_phyerr_hdr
)(struct ath10k
*ar
, struct sk_buff
*skb
,
37 struct wmi_phyerr_hdr_arg
*arg
);
38 int (*pull_phyerr
)(struct ath10k
*ar
, const void *phyerr_buf
,
39 int left_len
, struct wmi_phyerr_ev_arg
*arg
);
40 int (*pull_svc_rdy
)(struct ath10k
*ar
, struct sk_buff
*skb
,
41 struct wmi_svc_rdy_ev_arg
*arg
);
42 int (*pull_rdy
)(struct ath10k
*ar
, struct sk_buff
*skb
,
43 struct wmi_rdy_ev_arg
*arg
);
44 int (*pull_fw_stats
)(struct ath10k
*ar
, struct sk_buff
*skb
,
45 struct ath10k_fw_stats
*stats
);
46 int (*pull_roam_ev
)(struct ath10k
*ar
, struct sk_buff
*skb
,
47 struct wmi_roam_ev_arg
*arg
);
48 int (*pull_wow_event
)(struct ath10k
*ar
, struct sk_buff
*skb
,
49 struct wmi_wow_ev_arg
*arg
);
50 int (*pull_echo_ev
)(struct ath10k
*ar
, struct sk_buff
*skb
,
51 struct wmi_echo_ev_arg
*arg
);
52 int (*pull_dfs_status_ev
)(struct ath10k
*ar
, struct sk_buff
*skb
,
53 struct wmi_dfs_status_ev_arg
*arg
);
54 int (*pull_svc_avail
)(struct ath10k
*ar
, struct sk_buff
*skb
,
55 struct wmi_svc_avail_ev_arg
*arg
);
57 enum wmi_txbf_conf (*get_txbf_conf_scheme
)(struct ath10k
*ar
);
59 struct sk_buff
*(*gen_pdev_suspend
)(struct ath10k
*ar
, u32 suspend_opt
);
60 struct sk_buff
*(*gen_pdev_resume
)(struct ath10k
*ar
);
61 struct sk_buff
*(*gen_pdev_set_base_macaddr
)(struct ath10k
*ar
,
62 const u8 macaddr
[ETH_ALEN
]);
63 struct sk_buff
*(*gen_pdev_set_rd
)(struct ath10k
*ar
, u16 rd
, u16 rd2g
,
64 u16 rd5g
, u16 ctl2g
, u16 ctl5g
,
65 enum wmi_dfs_region dfs_reg
);
66 struct sk_buff
*(*gen_pdev_set_param
)(struct ath10k
*ar
, u32 id
,
68 struct sk_buff
*(*gen_init
)(struct ath10k
*ar
);
69 struct sk_buff
*(*gen_start_scan
)(struct ath10k
*ar
,
70 const struct wmi_start_scan_arg
*arg
);
71 struct sk_buff
*(*gen_stop_scan
)(struct ath10k
*ar
,
72 const struct wmi_stop_scan_arg
*arg
);
73 struct sk_buff
*(*gen_vdev_create
)(struct ath10k
*ar
, u32 vdev_id
,
74 enum wmi_vdev_type type
,
75 enum wmi_vdev_subtype subtype
,
76 const u8 macaddr
[ETH_ALEN
]);
77 struct sk_buff
*(*gen_vdev_delete
)(struct ath10k
*ar
, u32 vdev_id
);
78 struct sk_buff
*(*gen_vdev_start
)(struct ath10k
*ar
,
79 const struct wmi_vdev_start_request_arg
*arg
,
81 struct sk_buff
*(*gen_vdev_stop
)(struct ath10k
*ar
, u32 vdev_id
);
82 struct sk_buff
*(*gen_vdev_up
)(struct ath10k
*ar
, u32 vdev_id
, u32 aid
,
84 struct sk_buff
*(*gen_vdev_down
)(struct ath10k
*ar
, u32 vdev_id
);
85 struct sk_buff
*(*gen_vdev_set_param
)(struct ath10k
*ar
, u32 vdev_id
,
86 u32 param_id
, u32 param_value
);
87 struct sk_buff
*(*gen_vdev_install_key
)(struct ath10k
*ar
,
88 const struct wmi_vdev_install_key_arg
*arg
);
89 struct sk_buff
*(*gen_vdev_spectral_conf
)(struct ath10k
*ar
,
90 const struct wmi_vdev_spectral_conf_arg
*arg
);
91 struct sk_buff
*(*gen_vdev_spectral_enable
)(struct ath10k
*ar
, u32 vdev_id
,
92 u32 trigger
, u32 enable
);
93 struct sk_buff
*(*gen_vdev_wmm_conf
)(struct ath10k
*ar
, u32 vdev_id
,
94 const struct wmi_wmm_params_all_arg
*arg
);
95 struct sk_buff
*(*gen_peer_create
)(struct ath10k
*ar
, u32 vdev_id
,
96 const u8 peer_addr
[ETH_ALEN
],
97 enum wmi_peer_type peer_type
);
98 struct sk_buff
*(*gen_peer_delete
)(struct ath10k
*ar
, u32 vdev_id
,
99 const u8 peer_addr
[ETH_ALEN
]);
100 struct sk_buff
*(*gen_peer_flush
)(struct ath10k
*ar
, u32 vdev_id
,
101 const u8 peer_addr
[ETH_ALEN
],
103 struct sk_buff
*(*gen_peer_set_param
)(struct ath10k
*ar
, u32 vdev_id
,
105 enum wmi_peer_param param_id
,
107 struct sk_buff
*(*gen_peer_assoc
)(struct ath10k
*ar
,
108 const struct wmi_peer_assoc_complete_arg
*arg
);
109 struct sk_buff
*(*gen_set_psmode
)(struct ath10k
*ar
, u32 vdev_id
,
110 enum wmi_sta_ps_mode psmode
);
111 struct sk_buff
*(*gen_set_sta_ps
)(struct ath10k
*ar
, u32 vdev_id
,
112 enum wmi_sta_powersave_param param_id
,
114 struct sk_buff
*(*gen_set_ap_ps
)(struct ath10k
*ar
, u32 vdev_id
,
116 enum wmi_ap_ps_peer_param param_id
,
118 struct sk_buff
*(*gen_scan_chan_list
)(struct ath10k
*ar
,
119 const struct wmi_scan_chan_list_arg
*arg
);
120 struct sk_buff
*(*gen_scan_prob_req_oui
)(struct ath10k
*ar
,
122 struct sk_buff
*(*gen_beacon_dma
)(struct ath10k
*ar
, u32 vdev_id
,
123 const void *bcn
, size_t bcn_len
,
124 u32 bcn_paddr
, bool dtim_zero
,
126 struct sk_buff
*(*gen_pdev_set_wmm
)(struct ath10k
*ar
,
127 const struct wmi_wmm_params_all_arg
*arg
);
128 struct sk_buff
*(*gen_request_stats
)(struct ath10k
*ar
, u32 stats_mask
);
129 struct sk_buff
*(*gen_request_peer_stats_info
)(struct ath10k
*ar
,
132 wmi_peer_stats_info_request_type
136 struct sk_buff
*(*gen_force_fw_hang
)(struct ath10k
*ar
,
137 enum wmi_force_fw_hang_type type
,
139 struct sk_buff
*(*gen_mgmt_tx
)(struct ath10k
*ar
, struct sk_buff
*skb
);
140 struct sk_buff
*(*gen_mgmt_tx_send
)(struct ath10k
*ar
,
143 int (*cleanup_mgmt_tx_send
)(struct ath10k
*ar
, struct sk_buff
*msdu
);
144 struct sk_buff
*(*gen_dbglog_cfg
)(struct ath10k
*ar
, u64 module_enable
,
146 struct sk_buff
*(*gen_pktlog_enable
)(struct ath10k
*ar
, u32 filter
);
147 struct sk_buff
*(*gen_pktlog_disable
)(struct ath10k
*ar
);
148 struct sk_buff
*(*gen_pdev_set_quiet_mode
)(struct ath10k
*ar
,
149 u32 period
, u32 duration
,
152 struct sk_buff
*(*gen_pdev_get_temperature
)(struct ath10k
*ar
);
153 struct sk_buff
*(*gen_addba_clear_resp
)(struct ath10k
*ar
, u32 vdev_id
,
155 struct sk_buff
*(*gen_addba_send
)(struct ath10k
*ar
, u32 vdev_id
,
156 const u8
*mac
, u32 tid
, u32 buf_size
);
157 struct sk_buff
*(*gen_addba_set_resp
)(struct ath10k
*ar
, u32 vdev_id
,
158 const u8
*mac
, u32 tid
,
160 struct sk_buff
*(*gen_delba_send
)(struct ath10k
*ar
, u32 vdev_id
,
161 const u8
*mac
, u32 tid
, u32 initiator
,
163 struct sk_buff
*(*gen_bcn_tmpl
)(struct ath10k
*ar
, u32 vdev_id
,
164 u32 tim_ie_offset
, struct sk_buff
*bcn
,
165 u32 prb_caps
, u32 prb_erp
,
166 void *prb_ies
, size_t prb_ies_len
);
167 struct sk_buff
*(*gen_prb_tmpl
)(struct ath10k
*ar
, u32 vdev_id
,
168 struct sk_buff
*bcn
);
169 struct sk_buff
*(*gen_p2p_go_bcn_ie
)(struct ath10k
*ar
, u32 vdev_id
,
171 struct sk_buff
*(*gen_vdev_sta_uapsd
)(struct ath10k
*ar
, u32 vdev_id
,
172 const u8 peer_addr
[ETH_ALEN
],
173 const struct wmi_sta_uapsd_auto_trig_arg
*args
,
175 struct sk_buff
*(*gen_sta_keepalive
)(struct ath10k
*ar
,
176 const struct wmi_sta_keepalive_arg
*arg
);
177 struct sk_buff
*(*gen_wow_enable
)(struct ath10k
*ar
);
178 struct sk_buff
*(*gen_wow_add_wakeup_event
)(struct ath10k
*ar
, u32 vdev_id
,
179 enum wmi_wow_wakeup_event event
,
181 struct sk_buff
*(*gen_wow_host_wakeup_ind
)(struct ath10k
*ar
);
182 struct sk_buff
*(*gen_wow_add_pattern
)(struct ath10k
*ar
, u32 vdev_id
,
188 struct sk_buff
*(*gen_wow_del_pattern
)(struct ath10k
*ar
, u32 vdev_id
,
190 struct sk_buff
*(*gen_update_fw_tdls_state
)(struct ath10k
*ar
,
192 enum wmi_tdls_state state
);
193 struct sk_buff
*(*gen_tdls_peer_update
)(struct ath10k
*ar
,
194 const struct wmi_tdls_peer_update_cmd_arg
*arg
,
195 const struct wmi_tdls_peer_capab_arg
*cap
,
196 const struct wmi_channel_arg
*chan
);
197 struct sk_buff
*(*gen_radar_found
)
199 const struct ath10k_radar_found_info
*arg
);
200 struct sk_buff
*(*gen_adaptive_qcs
)(struct ath10k
*ar
, bool enable
);
201 struct sk_buff
*(*gen_pdev_get_tpc_config
)(struct ath10k
*ar
,
203 void (*fw_stats_fill
)(struct ath10k
*ar
,
204 struct ath10k_fw_stats
*fw_stats
,
206 struct sk_buff
*(*gen_pdev_enable_adaptive_cca
)(struct ath10k
*ar
,
210 struct sk_buff
*(*ext_resource_config
)(struct ath10k
*ar
,
211 enum wmi_host_platform_type type
,
212 u32 fw_feature_bitmap
);
213 int (*get_vdev_subtype
)(struct ath10k
*ar
,
214 enum wmi_vdev_subtype subtype
);
215 struct sk_buff
*(*gen_wow_config_pno
)(struct ath10k
*ar
,
217 struct wmi_pno_scan_req
*pno_scan
);
218 struct sk_buff
*(*gen_pdev_bss_chan_info_req
)
220 enum wmi_bss_survey_req_type type
);
221 struct sk_buff
*(*gen_echo
)(struct ath10k
*ar
, u32 value
);
222 struct sk_buff
*(*gen_pdev_get_tpc_table_cmdid
)(struct ath10k
*ar
,
224 struct sk_buff
*(*gen_bb_timing
)
226 const struct wmi_bb_timing_cfg_arg
*arg
);
227 struct sk_buff
*(*gen_per_peer_per_tid_cfg
)(struct ath10k
*ar
,
228 const struct wmi_per_peer_per_tid_cfg_arg
*arg
);
232 int ath10k_wmi_cmd_send(struct ath10k
*ar
, struct sk_buff
*skb
, u32 cmd_id
);
235 ath10k_wmi_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
237 if (WARN_ON_ONCE(!ar
->wmi
.ops
->rx
))
240 ar
->wmi
.ops
->rx(ar
, skb
);
245 ath10k_wmi_map_svc(struct ath10k
*ar
, const __le32
*in
, unsigned long *out
,
248 if (!ar
->wmi
.ops
->map_svc
)
251 ar
->wmi
.ops
->map_svc(in
, out
, len
);
256 ath10k_wmi_map_svc_ext(struct ath10k
*ar
, const __le32
*in
, unsigned long *out
,
259 if (!ar
->wmi
.ops
->map_svc_ext
)
262 ar
->wmi
.ops
->map_svc_ext(in
, out
, len
);
267 ath10k_wmi_pull_scan(struct ath10k
*ar
, struct sk_buff
*skb
,
268 struct wmi_scan_ev_arg
*arg
)
270 if (!ar
->wmi
.ops
->pull_scan
)
273 return ar
->wmi
.ops
->pull_scan(ar
, skb
, arg
);
277 ath10k_wmi_pull_mgmt_tx_compl(struct ath10k
*ar
, struct sk_buff
*skb
,
278 struct wmi_tlv_mgmt_tx_compl_ev_arg
*arg
)
280 if (!ar
->wmi
.ops
->pull_mgmt_tx_compl
)
283 return ar
->wmi
.ops
->pull_mgmt_tx_compl(ar
, skb
, arg
);
287 ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k
*ar
, struct sk_buff
*skb
,
288 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg
*arg
)
290 if (!ar
->wmi
.ops
->pull_mgmt_tx_bundle_compl
)
293 return ar
->wmi
.ops
->pull_mgmt_tx_bundle_compl(ar
, skb
, arg
);
297 ath10k_wmi_pull_mgmt_rx(struct ath10k
*ar
, struct sk_buff
*skb
,
298 struct wmi_mgmt_rx_ev_arg
*arg
)
300 if (!ar
->wmi
.ops
->pull_mgmt_rx
)
303 return ar
->wmi
.ops
->pull_mgmt_rx(ar
, skb
, arg
);
307 ath10k_wmi_pull_ch_info(struct ath10k
*ar
, struct sk_buff
*skb
,
308 struct wmi_ch_info_ev_arg
*arg
)
310 if (!ar
->wmi
.ops
->pull_ch_info
)
313 return ar
->wmi
.ops
->pull_ch_info(ar
, skb
, arg
);
317 ath10k_wmi_pull_vdev_start(struct ath10k
*ar
, struct sk_buff
*skb
,
318 struct wmi_vdev_start_ev_arg
*arg
)
320 if (!ar
->wmi
.ops
->pull_vdev_start
)
323 return ar
->wmi
.ops
->pull_vdev_start(ar
, skb
, arg
);
327 ath10k_wmi_pull_peer_kick(struct ath10k
*ar
, struct sk_buff
*skb
,
328 struct wmi_peer_kick_ev_arg
*arg
)
330 if (!ar
->wmi
.ops
->pull_peer_kick
)
333 return ar
->wmi
.ops
->pull_peer_kick(ar
, skb
, arg
);
337 ath10k_wmi_pull_swba(struct ath10k
*ar
, struct sk_buff
*skb
,
338 struct wmi_swba_ev_arg
*arg
)
340 if (!ar
->wmi
.ops
->pull_swba
)
343 return ar
->wmi
.ops
->pull_swba(ar
, skb
, arg
);
347 ath10k_wmi_pull_phyerr_hdr(struct ath10k
*ar
, struct sk_buff
*skb
,
348 struct wmi_phyerr_hdr_arg
*arg
)
350 if (!ar
->wmi
.ops
->pull_phyerr_hdr
)
353 return ar
->wmi
.ops
->pull_phyerr_hdr(ar
, skb
, arg
);
357 ath10k_wmi_pull_phyerr(struct ath10k
*ar
, const void *phyerr_buf
,
358 int left_len
, struct wmi_phyerr_ev_arg
*arg
)
360 if (!ar
->wmi
.ops
->pull_phyerr
)
363 return ar
->wmi
.ops
->pull_phyerr(ar
, phyerr_buf
, left_len
, arg
);
367 ath10k_wmi_pull_svc_rdy(struct ath10k
*ar
, struct sk_buff
*skb
,
368 struct wmi_svc_rdy_ev_arg
*arg
)
370 if (!ar
->wmi
.ops
->pull_svc_rdy
)
373 return ar
->wmi
.ops
->pull_svc_rdy(ar
, skb
, arg
);
377 ath10k_wmi_pull_rdy(struct ath10k
*ar
, struct sk_buff
*skb
,
378 struct wmi_rdy_ev_arg
*arg
)
380 if (!ar
->wmi
.ops
->pull_rdy
)
383 return ar
->wmi
.ops
->pull_rdy(ar
, skb
, arg
);
387 ath10k_wmi_pull_svc_avail(struct ath10k
*ar
, struct sk_buff
*skb
,
388 struct wmi_svc_avail_ev_arg
*arg
)
390 if (!ar
->wmi
.ops
->pull_svc_avail
)
392 return ar
->wmi
.ops
->pull_svc_avail(ar
, skb
, arg
);
396 ath10k_wmi_pull_fw_stats(struct ath10k
*ar
, struct sk_buff
*skb
,
397 struct ath10k_fw_stats
*stats
)
399 if (!ar
->wmi
.ops
->pull_fw_stats
)
402 return ar
->wmi
.ops
->pull_fw_stats(ar
, skb
, stats
);
406 ath10k_wmi_pull_roam_ev(struct ath10k
*ar
, struct sk_buff
*skb
,
407 struct wmi_roam_ev_arg
*arg
)
409 if (!ar
->wmi
.ops
->pull_roam_ev
)
412 return ar
->wmi
.ops
->pull_roam_ev(ar
, skb
, arg
);
416 ath10k_wmi_pull_wow_event(struct ath10k
*ar
, struct sk_buff
*skb
,
417 struct wmi_wow_ev_arg
*arg
)
419 if (!ar
->wmi
.ops
->pull_wow_event
)
422 return ar
->wmi
.ops
->pull_wow_event(ar
, skb
, arg
);
426 ath10k_wmi_pull_echo_ev(struct ath10k
*ar
, struct sk_buff
*skb
,
427 struct wmi_echo_ev_arg
*arg
)
429 if (!ar
->wmi
.ops
->pull_echo_ev
)
432 return ar
->wmi
.ops
->pull_echo_ev(ar
, skb
, arg
);
436 ath10k_wmi_pull_dfs_status(struct ath10k
*ar
, struct sk_buff
*skb
,
437 struct wmi_dfs_status_ev_arg
*arg
)
439 if (!ar
->wmi
.ops
->pull_dfs_status_ev
)
442 return ar
->wmi
.ops
->pull_dfs_status_ev(ar
, skb
, arg
);
445 static inline enum wmi_txbf_conf
446 ath10k_wmi_get_txbf_conf_scheme(struct ath10k
*ar
)
448 if (!ar
->wmi
.ops
->get_txbf_conf_scheme
)
449 return WMI_TXBF_CONF_UNSUPPORTED
;
451 return ar
->wmi
.ops
->get_txbf_conf_scheme(ar
);
455 ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k
*ar
, struct sk_buff
*msdu
)
457 if (!ar
->wmi
.ops
->cleanup_mgmt_tx_send
)
460 return ar
->wmi
.ops
->cleanup_mgmt_tx_send(ar
, msdu
);
464 ath10k_wmi_mgmt_tx_send(struct ath10k
*ar
, struct sk_buff
*msdu
,
470 if (!ar
->wmi
.ops
->gen_mgmt_tx_send
)
473 skb
= ar
->wmi
.ops
->gen_mgmt_tx_send(ar
, msdu
, paddr
);
477 ret
= ath10k_wmi_cmd_send(ar
, skb
,
478 ar
->wmi
.cmd
->mgmt_tx_send_cmdid
);
486 ath10k_wmi_mgmt_tx(struct ath10k
*ar
, struct sk_buff
*msdu
)
488 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
492 if (!ar
->wmi
.ops
->gen_mgmt_tx
)
495 skb
= ar
->wmi
.ops
->gen_mgmt_tx(ar
, msdu
);
499 ret
= ath10k_wmi_cmd_send(ar
, skb
,
500 ar
->wmi
.cmd
->mgmt_tx_cmdid
);
504 /* FIXME There's no ACK event for Management Tx. This probably
505 * shouldn't be called here either.
507 info
->flags
|= IEEE80211_TX_STAT_ACK
;
508 ieee80211_tx_status_irqsafe(ar
->hw
, msdu
);
514 ath10k_wmi_pdev_set_regdomain(struct ath10k
*ar
, u16 rd
, u16 rd2g
, u16 rd5g
,
515 u16 ctl2g
, u16 ctl5g
,
516 enum wmi_dfs_region dfs_reg
)
520 if (!ar
->wmi
.ops
->gen_pdev_set_rd
)
523 skb
= ar
->wmi
.ops
->gen_pdev_set_rd(ar
, rd
, rd2g
, rd5g
, ctl2g
, ctl5g
,
528 return ath10k_wmi_cmd_send(ar
, skb
,
529 ar
->wmi
.cmd
->pdev_set_regdomain_cmdid
);
533 ath10k_wmi_pdev_set_base_macaddr(struct ath10k
*ar
, const u8 macaddr
[ETH_ALEN
])
537 if (!ar
->wmi
.ops
->gen_pdev_set_base_macaddr
)
540 skb
= ar
->wmi
.ops
->gen_pdev_set_base_macaddr(ar
, macaddr
);
544 return ath10k_wmi_cmd_send(ar
, skb
,
545 ar
->wmi
.cmd
->pdev_set_base_macaddr_cmdid
);
549 ath10k_wmi_pdev_suspend_target(struct ath10k
*ar
, u32 suspend_opt
)
553 if (!ar
->wmi
.ops
->gen_pdev_suspend
)
556 skb
= ar
->wmi
.ops
->gen_pdev_suspend(ar
, suspend_opt
);
560 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_suspend_cmdid
);
564 ath10k_wmi_pdev_resume_target(struct ath10k
*ar
)
568 if (!ar
->wmi
.ops
->gen_pdev_resume
)
571 skb
= ar
->wmi
.ops
->gen_pdev_resume(ar
);
575 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_resume_cmdid
);
579 ath10k_wmi_pdev_set_param(struct ath10k
*ar
, u32 id
, u32 value
)
583 if (!ar
->wmi
.ops
->gen_pdev_set_param
)
586 skb
= ar
->wmi
.ops
->gen_pdev_set_param(ar
, id
, value
);
590 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_set_param_cmdid
);
594 ath10k_wmi_cmd_init(struct ath10k
*ar
)
598 if (!ar
->wmi
.ops
->gen_init
)
601 skb
= ar
->wmi
.ops
->gen_init(ar
);
605 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->init_cmdid
);
609 ath10k_wmi_start_scan(struct ath10k
*ar
,
610 const struct wmi_start_scan_arg
*arg
)
614 if (!ar
->wmi
.ops
->gen_start_scan
)
617 skb
= ar
->wmi
.ops
->gen_start_scan(ar
, arg
);
621 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->start_scan_cmdid
);
625 ath10k_wmi_stop_scan(struct ath10k
*ar
, const struct wmi_stop_scan_arg
*arg
)
629 if (!ar
->wmi
.ops
->gen_stop_scan
)
632 skb
= ar
->wmi
.ops
->gen_stop_scan(ar
, arg
);
636 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->stop_scan_cmdid
);
640 ath10k_wmi_vdev_create(struct ath10k
*ar
, u32 vdev_id
,
641 enum wmi_vdev_type type
,
642 enum wmi_vdev_subtype subtype
,
643 const u8 macaddr
[ETH_ALEN
])
647 if (!ar
->wmi
.ops
->gen_vdev_create
)
650 skb
= ar
->wmi
.ops
->gen_vdev_create(ar
, vdev_id
, type
, subtype
, macaddr
);
654 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_create_cmdid
);
658 ath10k_wmi_vdev_delete(struct ath10k
*ar
, u32 vdev_id
)
662 if (!ar
->wmi
.ops
->gen_vdev_delete
)
665 skb
= ar
->wmi
.ops
->gen_vdev_delete(ar
, vdev_id
);
669 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_delete_cmdid
);
673 ath10k_wmi_vdev_start(struct ath10k
*ar
,
674 const struct wmi_vdev_start_request_arg
*arg
)
678 if (!ar
->wmi
.ops
->gen_vdev_start
)
681 skb
= ar
->wmi
.ops
->gen_vdev_start(ar
, arg
, false);
685 return ath10k_wmi_cmd_send(ar
, skb
,
686 ar
->wmi
.cmd
->vdev_start_request_cmdid
);
690 ath10k_wmi_vdev_restart(struct ath10k
*ar
,
691 const struct wmi_vdev_start_request_arg
*arg
)
695 if (!ar
->wmi
.ops
->gen_vdev_start
)
698 skb
= ar
->wmi
.ops
->gen_vdev_start(ar
, arg
, true);
702 return ath10k_wmi_cmd_send(ar
, skb
,
703 ar
->wmi
.cmd
->vdev_restart_request_cmdid
);
707 ath10k_wmi_vdev_stop(struct ath10k
*ar
, u32 vdev_id
)
711 if (!ar
->wmi
.ops
->gen_vdev_stop
)
714 skb
= ar
->wmi
.ops
->gen_vdev_stop(ar
, vdev_id
);
718 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_stop_cmdid
);
722 ath10k_wmi_vdev_up(struct ath10k
*ar
, u32 vdev_id
, u32 aid
, const u8
*bssid
)
726 if (!ar
->wmi
.ops
->gen_vdev_up
)
729 skb
= ar
->wmi
.ops
->gen_vdev_up(ar
, vdev_id
, aid
, bssid
);
733 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_up_cmdid
);
737 ath10k_wmi_vdev_down(struct ath10k
*ar
, u32 vdev_id
)
741 if (!ar
->wmi
.ops
->gen_vdev_down
)
744 skb
= ar
->wmi
.ops
->gen_vdev_down(ar
, vdev_id
);
748 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_down_cmdid
);
752 ath10k_wmi_vdev_set_param(struct ath10k
*ar
, u32 vdev_id
, u32 param_id
,
757 if (!ar
->wmi
.ops
->gen_vdev_set_param
)
760 skb
= ar
->wmi
.ops
->gen_vdev_set_param(ar
, vdev_id
, param_id
,
765 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_set_param_cmdid
);
769 ath10k_wmi_vdev_install_key(struct ath10k
*ar
,
770 const struct wmi_vdev_install_key_arg
*arg
)
774 if (!ar
->wmi
.ops
->gen_vdev_install_key
)
777 skb
= ar
->wmi
.ops
->gen_vdev_install_key(ar
, arg
);
781 return ath10k_wmi_cmd_send(ar
, skb
,
782 ar
->wmi
.cmd
->vdev_install_key_cmdid
);
786 ath10k_wmi_vdev_spectral_conf(struct ath10k
*ar
,
787 const struct wmi_vdev_spectral_conf_arg
*arg
)
792 if (!ar
->wmi
.ops
->gen_vdev_spectral_conf
)
795 skb
= ar
->wmi
.ops
->gen_vdev_spectral_conf(ar
, arg
);
799 cmd_id
= ar
->wmi
.cmd
->vdev_spectral_scan_configure_cmdid
;
800 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
804 ath10k_wmi_vdev_spectral_enable(struct ath10k
*ar
, u32 vdev_id
, u32 trigger
,
810 if (!ar
->wmi
.ops
->gen_vdev_spectral_enable
)
813 skb
= ar
->wmi
.ops
->gen_vdev_spectral_enable(ar
, vdev_id
, trigger
,
818 cmd_id
= ar
->wmi
.cmd
->vdev_spectral_scan_enable_cmdid
;
819 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
823 ath10k_wmi_vdev_sta_uapsd(struct ath10k
*ar
, u32 vdev_id
,
824 const u8 peer_addr
[ETH_ALEN
],
825 const struct wmi_sta_uapsd_auto_trig_arg
*args
,
831 if (!ar
->wmi
.ops
->gen_vdev_sta_uapsd
)
834 skb
= ar
->wmi
.ops
->gen_vdev_sta_uapsd(ar
, vdev_id
, peer_addr
, args
,
839 cmd_id
= ar
->wmi
.cmd
->sta_uapsd_auto_trig_cmdid
;
840 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
844 ath10k_wmi_vdev_wmm_conf(struct ath10k
*ar
, u32 vdev_id
,
845 const struct wmi_wmm_params_all_arg
*arg
)
850 skb
= ar
->wmi
.ops
->gen_vdev_wmm_conf(ar
, vdev_id
, arg
);
854 cmd_id
= ar
->wmi
.cmd
->vdev_set_wmm_params_cmdid
;
855 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
859 ath10k_wmi_peer_create(struct ath10k
*ar
, u32 vdev_id
,
860 const u8 peer_addr
[ETH_ALEN
],
861 enum wmi_peer_type peer_type
)
865 if (!ar
->wmi
.ops
->gen_peer_create
)
868 skb
= ar
->wmi
.ops
->gen_peer_create(ar
, vdev_id
, peer_addr
, peer_type
);
872 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_create_cmdid
);
876 ath10k_wmi_peer_delete(struct ath10k
*ar
, u32 vdev_id
,
877 const u8 peer_addr
[ETH_ALEN
])
881 if (!ar
->wmi
.ops
->gen_peer_delete
)
884 skb
= ar
->wmi
.ops
->gen_peer_delete(ar
, vdev_id
, peer_addr
);
888 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_delete_cmdid
);
892 ath10k_wmi_peer_flush(struct ath10k
*ar
, u32 vdev_id
,
893 const u8 peer_addr
[ETH_ALEN
], u32 tid_bitmap
)
897 if (!ar
->wmi
.ops
->gen_peer_flush
)
900 skb
= ar
->wmi
.ops
->gen_peer_flush(ar
, vdev_id
, peer_addr
, tid_bitmap
);
904 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_flush_tids_cmdid
);
908 ath10k_wmi_peer_set_param(struct ath10k
*ar
, u32 vdev_id
, const u8
*peer_addr
,
909 enum wmi_peer_param param_id
, u32 param_value
)
913 if (!ar
->wmi
.ops
->gen_peer_set_param
)
916 skb
= ar
->wmi
.ops
->gen_peer_set_param(ar
, vdev_id
, peer_addr
, param_id
,
921 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_set_param_cmdid
);
925 ath10k_wmi_set_psmode(struct ath10k
*ar
, u32 vdev_id
,
926 enum wmi_sta_ps_mode psmode
)
930 if (!ar
->wmi
.ops
->gen_set_psmode
)
933 skb
= ar
->wmi
.ops
->gen_set_psmode(ar
, vdev_id
, psmode
);
937 return ath10k_wmi_cmd_send(ar
, skb
,
938 ar
->wmi
.cmd
->sta_powersave_mode_cmdid
);
942 ath10k_wmi_set_sta_ps_param(struct ath10k
*ar
, u32 vdev_id
,
943 enum wmi_sta_powersave_param param_id
, u32 value
)
947 if (!ar
->wmi
.ops
->gen_set_sta_ps
)
950 skb
= ar
->wmi
.ops
->gen_set_sta_ps(ar
, vdev_id
, param_id
, value
);
954 return ath10k_wmi_cmd_send(ar
, skb
,
955 ar
->wmi
.cmd
->sta_powersave_param_cmdid
);
959 ath10k_wmi_set_ap_ps_param(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
960 enum wmi_ap_ps_peer_param param_id
, u32 value
)
964 if (!ar
->wmi
.ops
->gen_set_ap_ps
)
967 skb
= ar
->wmi
.ops
->gen_set_ap_ps(ar
, vdev_id
, mac
, param_id
, value
);
971 return ath10k_wmi_cmd_send(ar
, skb
,
972 ar
->wmi
.cmd
->ap_ps_peer_param_cmdid
);
976 ath10k_wmi_scan_chan_list(struct ath10k
*ar
,
977 const struct wmi_scan_chan_list_arg
*arg
)
981 if (!ar
->wmi
.ops
->gen_scan_chan_list
)
984 skb
= ar
->wmi
.ops
->gen_scan_chan_list(ar
, arg
);
988 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->scan_chan_list_cmdid
);
992 ath10k_wmi_scan_prob_req_oui(struct ath10k
*ar
, const u8 mac_addr
[ETH_ALEN
])
997 prob_req_oui
= (((u32
)mac_addr
[0]) << 16) |
998 (((u32
)mac_addr
[1]) << 8) | mac_addr
[2];
1000 if (!ar
->wmi
.ops
->gen_scan_prob_req_oui
)
1003 skb
= ar
->wmi
.ops
->gen_scan_prob_req_oui(ar
, prob_req_oui
);
1005 return PTR_ERR(skb
);
1007 return ath10k_wmi_cmd_send(ar
, skb
,
1008 ar
->wmi
.cmd
->scan_prob_req_oui_cmdid
);
1012 ath10k_wmi_peer_assoc(struct ath10k
*ar
,
1013 const struct wmi_peer_assoc_complete_arg
*arg
)
1015 struct sk_buff
*skb
;
1017 if (!ar
->wmi
.ops
->gen_peer_assoc
)
1020 skb
= ar
->wmi
.ops
->gen_peer_assoc(ar
, arg
);
1022 return PTR_ERR(skb
);
1024 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_assoc_cmdid
);
1028 ath10k_wmi_beacon_send_ref_nowait(struct ath10k
*ar
, u32 vdev_id
,
1029 const void *bcn
, size_t bcn_len
,
1030 u32 bcn_paddr
, bool dtim_zero
,
1033 struct sk_buff
*skb
;
1036 if (!ar
->wmi
.ops
->gen_beacon_dma
)
1039 skb
= ar
->wmi
.ops
->gen_beacon_dma(ar
, vdev_id
, bcn
, bcn_len
, bcn_paddr
,
1040 dtim_zero
, deliver_cab
);
1042 return PTR_ERR(skb
);
1044 ret
= ath10k_wmi_cmd_send_nowait(ar
, skb
,
1045 ar
->wmi
.cmd
->pdev_send_bcn_cmdid
);
1055 ath10k_wmi_pdev_set_wmm_params(struct ath10k
*ar
,
1056 const struct wmi_wmm_params_all_arg
*arg
)
1058 struct sk_buff
*skb
;
1060 if (!ar
->wmi
.ops
->gen_pdev_set_wmm
)
1063 skb
= ar
->wmi
.ops
->gen_pdev_set_wmm(ar
, arg
);
1065 return PTR_ERR(skb
);
1067 return ath10k_wmi_cmd_send(ar
, skb
,
1068 ar
->wmi
.cmd
->pdev_set_wmm_params_cmdid
);
1072 ath10k_wmi_request_stats(struct ath10k
*ar
, u32 stats_mask
)
1074 struct sk_buff
*skb
;
1076 if (!ar
->wmi
.ops
->gen_request_stats
)
1079 skb
= ar
->wmi
.ops
->gen_request_stats(ar
, stats_mask
);
1081 return PTR_ERR(skb
);
1083 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->request_stats_cmdid
);
1087 ath10k_wmi_request_peer_stats_info(struct ath10k
*ar
,
1089 enum wmi_peer_stats_info_request_type type
,
1093 struct sk_buff
*skb
;
1095 if (!ar
->wmi
.ops
->gen_request_peer_stats_info
)
1098 skb
= ar
->wmi
.ops
->gen_request_peer_stats_info(ar
,
1104 return PTR_ERR(skb
);
1106 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->request_peer_stats_info_cmdid
);
1110 ath10k_wmi_force_fw_hang(struct ath10k
*ar
,
1111 enum wmi_force_fw_hang_type type
, u32 delay_ms
)
1113 struct sk_buff
*skb
;
1115 if (!ar
->wmi
.ops
->gen_force_fw_hang
)
1118 skb
= ar
->wmi
.ops
->gen_force_fw_hang(ar
, type
, delay_ms
);
1120 return PTR_ERR(skb
);
1122 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->force_fw_hang_cmdid
);
1126 ath10k_wmi_dbglog_cfg(struct ath10k
*ar
, u64 module_enable
, u32 log_level
)
1128 struct sk_buff
*skb
;
1130 if (!ar
->wmi
.ops
->gen_dbglog_cfg
)
1133 skb
= ar
->wmi
.ops
->gen_dbglog_cfg(ar
, module_enable
, log_level
);
1135 return PTR_ERR(skb
);
1137 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->dbglog_cfg_cmdid
);
1141 ath10k_wmi_pdev_pktlog_enable(struct ath10k
*ar
, u32 filter
)
1143 struct sk_buff
*skb
;
1145 if (!ar
->wmi
.ops
->gen_pktlog_enable
)
1148 skb
= ar
->wmi
.ops
->gen_pktlog_enable(ar
, filter
);
1150 return PTR_ERR(skb
);
1152 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_pktlog_enable_cmdid
);
1156 ath10k_wmi_pdev_pktlog_disable(struct ath10k
*ar
)
1158 struct sk_buff
*skb
;
1160 if (!ar
->wmi
.ops
->gen_pktlog_disable
)
1163 skb
= ar
->wmi
.ops
->gen_pktlog_disable(ar
);
1165 return PTR_ERR(skb
);
1167 return ath10k_wmi_cmd_send(ar
, skb
,
1168 ar
->wmi
.cmd
->pdev_pktlog_disable_cmdid
);
1172 ath10k_wmi_pdev_set_quiet_mode(struct ath10k
*ar
, u32 period
, u32 duration
,
1173 u32 next_offset
, u32 enabled
)
1175 struct sk_buff
*skb
;
1177 if (!ar
->wmi
.ops
->gen_pdev_set_quiet_mode
)
1180 skb
= ar
->wmi
.ops
->gen_pdev_set_quiet_mode(ar
, period
, duration
,
1181 next_offset
, enabled
);
1183 return PTR_ERR(skb
);
1185 return ath10k_wmi_cmd_send(ar
, skb
,
1186 ar
->wmi
.cmd
->pdev_set_quiet_mode_cmdid
);
1190 ath10k_wmi_pdev_get_temperature(struct ath10k
*ar
)
1192 struct sk_buff
*skb
;
1194 if (!ar
->wmi
.ops
->gen_pdev_get_temperature
)
1197 skb
= ar
->wmi
.ops
->gen_pdev_get_temperature(ar
);
1199 return PTR_ERR(skb
);
1201 return ath10k_wmi_cmd_send(ar
, skb
,
1202 ar
->wmi
.cmd
->pdev_get_temperature_cmdid
);
1206 ath10k_wmi_addba_clear_resp(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
)
1208 struct sk_buff
*skb
;
1210 if (!ar
->wmi
.ops
->gen_addba_clear_resp
)
1213 skb
= ar
->wmi
.ops
->gen_addba_clear_resp(ar
, vdev_id
, mac
);
1215 return PTR_ERR(skb
);
1217 return ath10k_wmi_cmd_send(ar
, skb
,
1218 ar
->wmi
.cmd
->addba_clear_resp_cmdid
);
1222 ath10k_wmi_addba_send(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
1223 u32 tid
, u32 buf_size
)
1225 struct sk_buff
*skb
;
1227 if (!ar
->wmi
.ops
->gen_addba_send
)
1230 skb
= ar
->wmi
.ops
->gen_addba_send(ar
, vdev_id
, mac
, tid
, buf_size
);
1232 return PTR_ERR(skb
);
1234 return ath10k_wmi_cmd_send(ar
, skb
,
1235 ar
->wmi
.cmd
->addba_send_cmdid
);
1239 ath10k_wmi_addba_set_resp(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
1240 u32 tid
, u32 status
)
1242 struct sk_buff
*skb
;
1244 if (!ar
->wmi
.ops
->gen_addba_set_resp
)
1247 skb
= ar
->wmi
.ops
->gen_addba_set_resp(ar
, vdev_id
, mac
, tid
, status
);
1249 return PTR_ERR(skb
);
1251 return ath10k_wmi_cmd_send(ar
, skb
,
1252 ar
->wmi
.cmd
->addba_set_resp_cmdid
);
1256 ath10k_wmi_delba_send(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
1257 u32 tid
, u32 initiator
, u32 reason
)
1259 struct sk_buff
*skb
;
1261 if (!ar
->wmi
.ops
->gen_delba_send
)
1264 skb
= ar
->wmi
.ops
->gen_delba_send(ar
, vdev_id
, mac
, tid
, initiator
,
1267 return PTR_ERR(skb
);
1269 return ath10k_wmi_cmd_send(ar
, skb
,
1270 ar
->wmi
.cmd
->delba_send_cmdid
);
1274 ath10k_wmi_bcn_tmpl(struct ath10k
*ar
, u32 vdev_id
, u32 tim_ie_offset
,
1275 struct sk_buff
*bcn
, u32 prb_caps
, u32 prb_erp
,
1276 void *prb_ies
, size_t prb_ies_len
)
1278 struct sk_buff
*skb
;
1280 if (!ar
->wmi
.ops
->gen_bcn_tmpl
)
1283 skb
= ar
->wmi
.ops
->gen_bcn_tmpl(ar
, vdev_id
, tim_ie_offset
, bcn
,
1284 prb_caps
, prb_erp
, prb_ies
,
1287 return PTR_ERR(skb
);
1289 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->bcn_tmpl_cmdid
);
1293 ath10k_wmi_prb_tmpl(struct ath10k
*ar
, u32 vdev_id
, struct sk_buff
*prb
)
1295 struct sk_buff
*skb
;
1297 if (!ar
->wmi
.ops
->gen_prb_tmpl
)
1300 skb
= ar
->wmi
.ops
->gen_prb_tmpl(ar
, vdev_id
, prb
);
1302 return PTR_ERR(skb
);
1304 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->prb_tmpl_cmdid
);
1308 ath10k_wmi_p2p_go_bcn_ie(struct ath10k
*ar
, u32 vdev_id
, const u8
*p2p_ie
)
1310 struct sk_buff
*skb
;
1312 if (!ar
->wmi
.ops
->gen_p2p_go_bcn_ie
)
1315 skb
= ar
->wmi
.ops
->gen_p2p_go_bcn_ie(ar
, vdev_id
, p2p_ie
);
1317 return PTR_ERR(skb
);
1319 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->p2p_go_set_beacon_ie
);
1323 ath10k_wmi_sta_keepalive(struct ath10k
*ar
,
1324 const struct wmi_sta_keepalive_arg
*arg
)
1326 struct sk_buff
*skb
;
1329 if (!ar
->wmi
.ops
->gen_sta_keepalive
)
1332 skb
= ar
->wmi
.ops
->gen_sta_keepalive(ar
, arg
);
1334 return PTR_ERR(skb
);
1336 cmd_id
= ar
->wmi
.cmd
->sta_keepalive_cmd
;
1337 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1341 ath10k_wmi_wow_enable(struct ath10k
*ar
)
1343 struct sk_buff
*skb
;
1346 if (!ar
->wmi
.ops
->gen_wow_enable
)
1349 skb
= ar
->wmi
.ops
->gen_wow_enable(ar
);
1351 return PTR_ERR(skb
);
1353 cmd_id
= ar
->wmi
.cmd
->wow_enable_cmdid
;
1354 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1358 ath10k_wmi_wow_add_wakeup_event(struct ath10k
*ar
, u32 vdev_id
,
1359 enum wmi_wow_wakeup_event event
,
1362 struct sk_buff
*skb
;
1365 if (!ar
->wmi
.ops
->gen_wow_add_wakeup_event
)
1368 skb
= ar
->wmi
.ops
->gen_wow_add_wakeup_event(ar
, vdev_id
, event
, enable
);
1370 return PTR_ERR(skb
);
1372 cmd_id
= ar
->wmi
.cmd
->wow_enable_disable_wake_event_cmdid
;
1373 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1377 ath10k_wmi_wow_host_wakeup_ind(struct ath10k
*ar
)
1379 struct sk_buff
*skb
;
1382 if (!ar
->wmi
.ops
->gen_wow_host_wakeup_ind
)
1385 skb
= ar
->wmi
.ops
->gen_wow_host_wakeup_ind(ar
);
1387 return PTR_ERR(skb
);
1389 cmd_id
= ar
->wmi
.cmd
->wow_hostwakeup_from_sleep_cmdid
;
1390 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1394 ath10k_wmi_wow_add_pattern(struct ath10k
*ar
, u32 vdev_id
, u32 pattern_id
,
1395 const u8
*pattern
, const u8
*mask
,
1396 int pattern_len
, int pattern_offset
)
1398 struct sk_buff
*skb
;
1401 if (!ar
->wmi
.ops
->gen_wow_add_pattern
)
1404 skb
= ar
->wmi
.ops
->gen_wow_add_pattern(ar
, vdev_id
, pattern_id
,
1405 pattern
, mask
, pattern_len
,
1408 return PTR_ERR(skb
);
1410 cmd_id
= ar
->wmi
.cmd
->wow_add_wake_pattern_cmdid
;
1411 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1415 ath10k_wmi_wow_del_pattern(struct ath10k
*ar
, u32 vdev_id
, u32 pattern_id
)
1417 struct sk_buff
*skb
;
1420 if (!ar
->wmi
.ops
->gen_wow_del_pattern
)
1423 skb
= ar
->wmi
.ops
->gen_wow_del_pattern(ar
, vdev_id
, pattern_id
);
1425 return PTR_ERR(skb
);
1427 cmd_id
= ar
->wmi
.cmd
->wow_del_wake_pattern_cmdid
;
1428 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1432 ath10k_wmi_wow_config_pno(struct ath10k
*ar
, u32 vdev_id
,
1433 struct wmi_pno_scan_req
*pno_scan
)
1435 struct sk_buff
*skb
;
1438 if (!ar
->wmi
.ops
->gen_wow_config_pno
)
1441 skb
= ar
->wmi
.ops
->gen_wow_config_pno(ar
, vdev_id
, pno_scan
);
1443 return PTR_ERR(skb
);
1445 cmd_id
= ar
->wmi
.cmd
->network_list_offload_config_cmdid
;
1446 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1450 ath10k_wmi_update_fw_tdls_state(struct ath10k
*ar
, u32 vdev_id
,
1451 enum wmi_tdls_state state
)
1453 struct sk_buff
*skb
;
1455 if (!ar
->wmi
.ops
->gen_update_fw_tdls_state
)
1458 skb
= ar
->wmi
.ops
->gen_update_fw_tdls_state(ar
, vdev_id
, state
);
1460 return PTR_ERR(skb
);
1462 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->tdls_set_state_cmdid
);
1466 ath10k_wmi_tdls_peer_update(struct ath10k
*ar
,
1467 const struct wmi_tdls_peer_update_cmd_arg
*arg
,
1468 const struct wmi_tdls_peer_capab_arg
*cap
,
1469 const struct wmi_channel_arg
*chan
)
1471 struct sk_buff
*skb
;
1473 if (!ar
->wmi
.ops
->gen_tdls_peer_update
)
1476 skb
= ar
->wmi
.ops
->gen_tdls_peer_update(ar
, arg
, cap
, chan
);
1478 return PTR_ERR(skb
);
1480 return ath10k_wmi_cmd_send(ar
, skb
,
1481 ar
->wmi
.cmd
->tdls_peer_update_cmdid
);
1485 ath10k_wmi_adaptive_qcs(struct ath10k
*ar
, bool enable
)
1487 struct sk_buff
*skb
;
1489 if (!ar
->wmi
.ops
->gen_adaptive_qcs
)
1492 skb
= ar
->wmi
.ops
->gen_adaptive_qcs(ar
, enable
);
1494 return PTR_ERR(skb
);
1496 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->adaptive_qcs_cmdid
);
1500 ath10k_wmi_pdev_get_tpc_config(struct ath10k
*ar
, u32 param
)
1502 struct sk_buff
*skb
;
1504 if (!ar
->wmi
.ops
->gen_pdev_get_tpc_config
)
1507 skb
= ar
->wmi
.ops
->gen_pdev_get_tpc_config(ar
, param
);
1510 return PTR_ERR(skb
);
1512 return ath10k_wmi_cmd_send(ar
, skb
,
1513 ar
->wmi
.cmd
->pdev_get_tpc_config_cmdid
);
1517 ath10k_wmi_fw_stats_fill(struct ath10k
*ar
, struct ath10k_fw_stats
*fw_stats
,
1520 if (!ar
->wmi
.ops
->fw_stats_fill
)
1523 ar
->wmi
.ops
->fw_stats_fill(ar
, fw_stats
, buf
);
1528 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k
*ar
, u8 enable
,
1529 u32 detect_level
, u32 detect_margin
)
1531 struct sk_buff
*skb
;
1533 if (!ar
->wmi
.ops
->gen_pdev_enable_adaptive_cca
)
1536 skb
= ar
->wmi
.ops
->gen_pdev_enable_adaptive_cca(ar
, enable
,
1541 return PTR_ERR(skb
);
1543 return ath10k_wmi_cmd_send(ar
, skb
,
1544 ar
->wmi
.cmd
->pdev_enable_adaptive_cca_cmdid
);
1548 ath10k_wmi_ext_resource_config(struct ath10k
*ar
,
1549 enum wmi_host_platform_type type
,
1550 u32 fw_feature_bitmap
)
1552 struct sk_buff
*skb
;
1554 if (!ar
->wmi
.ops
->ext_resource_config
)
1557 skb
= ar
->wmi
.ops
->ext_resource_config(ar
, type
,
1561 return PTR_ERR(skb
);
1563 return ath10k_wmi_cmd_send(ar
, skb
,
1564 ar
->wmi
.cmd
->ext_resource_cfg_cmdid
);
1568 ath10k_wmi_get_vdev_subtype(struct ath10k
*ar
, enum wmi_vdev_subtype subtype
)
1570 if (!ar
->wmi
.ops
->get_vdev_subtype
)
1573 return ar
->wmi
.ops
->get_vdev_subtype(ar
, subtype
);
1577 ath10k_wmi_pdev_bss_chan_info_request(struct ath10k
*ar
,
1578 enum wmi_bss_survey_req_type type
)
1580 struct ath10k_wmi
*wmi
= &ar
->wmi
;
1581 struct sk_buff
*skb
;
1583 if (!wmi
->ops
->gen_pdev_bss_chan_info_req
)
1586 skb
= wmi
->ops
->gen_pdev_bss_chan_info_req(ar
, type
);
1588 return PTR_ERR(skb
);
1590 return ath10k_wmi_cmd_send(ar
, skb
,
1591 wmi
->cmd
->pdev_bss_chan_info_request_cmdid
);
1595 ath10k_wmi_echo(struct ath10k
*ar
, u32 value
)
1597 struct ath10k_wmi
*wmi
= &ar
->wmi
;
1598 struct sk_buff
*skb
;
1600 if (!wmi
->ops
->gen_echo
)
1603 skb
= wmi
->ops
->gen_echo(ar
, value
);
1605 return PTR_ERR(skb
);
1607 return ath10k_wmi_cmd_send(ar
, skb
, wmi
->cmd
->echo_cmdid
);
1611 ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k
*ar
, u32 param
)
1613 struct sk_buff
*skb
;
1615 if (!ar
->wmi
.ops
->gen_pdev_get_tpc_table_cmdid
)
1618 skb
= ar
->wmi
.ops
->gen_pdev_get_tpc_table_cmdid(ar
, param
);
1621 return PTR_ERR(skb
);
1623 return ath10k_wmi_cmd_send(ar
, skb
,
1624 ar
->wmi
.cmd
->pdev_get_tpc_table_cmdid
);
1628 ath10k_wmi_report_radar_found(struct ath10k
*ar
,
1629 const struct ath10k_radar_found_info
*arg
)
1631 struct sk_buff
*skb
;
1633 if (!ar
->wmi
.ops
->gen_radar_found
)
1636 skb
= ar
->wmi
.ops
->gen_radar_found(ar
, arg
);
1638 return PTR_ERR(skb
);
1640 return ath10k_wmi_cmd_send(ar
, skb
,
1641 ar
->wmi
.cmd
->radar_found_cmdid
);
1645 ath10k_wmi_pdev_bb_timing(struct ath10k
*ar
,
1646 const struct wmi_bb_timing_cfg_arg
*arg
)
1648 struct sk_buff
*skb
;
1650 if (!ar
->wmi
.ops
->gen_bb_timing
)
1653 skb
= ar
->wmi
.ops
->gen_bb_timing(ar
, arg
);
1656 return PTR_ERR(skb
);
1658 return ath10k_wmi_cmd_send(ar
, skb
,
1659 ar
->wmi
.cmd
->set_bb_timing_cmdid
);
1663 ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k
*ar
,
1664 const struct wmi_per_peer_per_tid_cfg_arg
*arg
)
1666 struct sk_buff
*skb
;
1668 if (!ar
->wmi
.ops
->gen_per_peer_per_tid_cfg
)
1671 skb
= ar
->wmi
.ops
->gen_per_peer_per_tid_cfg(ar
, arg
);
1673 return PTR_ERR(skb
);
1675 return ath10k_wmi_cmd_send(ar
, skb
,
1676 ar
->wmi
.cmd
->per_peer_per_tid_config_cmdid
);