2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 void (*rx
)(struct ath10k
*ar
, struct sk_buff
*skb
);
26 void (*map_svc
)(const __le32
*in
, unsigned long *out
, size_t len
);
28 int (*pull_scan
)(struct ath10k
*ar
, struct sk_buff
*skb
,
29 struct wmi_scan_ev_arg
*arg
);
30 int (*pull_mgmt_rx
)(struct ath10k
*ar
, struct sk_buff
*skb
,
31 struct wmi_mgmt_rx_ev_arg
*arg
);
32 int (*pull_ch_info
)(struct ath10k
*ar
, struct sk_buff
*skb
,
33 struct wmi_ch_info_ev_arg
*arg
);
34 int (*pull_vdev_start
)(struct ath10k
*ar
, struct sk_buff
*skb
,
35 struct wmi_vdev_start_ev_arg
*arg
);
36 int (*pull_peer_kick
)(struct ath10k
*ar
, struct sk_buff
*skb
,
37 struct wmi_peer_kick_ev_arg
*arg
);
38 int (*pull_swba
)(struct ath10k
*ar
, struct sk_buff
*skb
,
39 struct wmi_swba_ev_arg
*arg
);
40 int (*pull_phyerr_hdr
)(struct ath10k
*ar
, struct sk_buff
*skb
,
41 struct wmi_phyerr_hdr_arg
*arg
);
42 int (*pull_phyerr
)(struct ath10k
*ar
, const void *phyerr_buf
,
43 int left_len
, struct wmi_phyerr_ev_arg
*arg
);
44 int (*pull_svc_rdy
)(struct ath10k
*ar
, struct sk_buff
*skb
,
45 struct wmi_svc_rdy_ev_arg
*arg
);
46 int (*pull_rdy
)(struct ath10k
*ar
, struct sk_buff
*skb
,
47 struct wmi_rdy_ev_arg
*arg
);
48 int (*pull_fw_stats
)(struct ath10k
*ar
, struct sk_buff
*skb
,
49 struct ath10k_fw_stats
*stats
);
50 int (*pull_roam_ev
)(struct ath10k
*ar
, struct sk_buff
*skb
,
51 struct wmi_roam_ev_arg
*arg
);
52 int (*pull_wow_event
)(struct ath10k
*ar
, struct sk_buff
*skb
,
53 struct wmi_wow_ev_arg
*arg
);
54 enum wmi_txbf_conf (*get_txbf_conf_scheme
)(struct ath10k
*ar
);
56 struct sk_buff
*(*gen_pdev_suspend
)(struct ath10k
*ar
, u32 suspend_opt
);
57 struct sk_buff
*(*gen_pdev_resume
)(struct ath10k
*ar
);
58 struct sk_buff
*(*gen_pdev_set_rd
)(struct ath10k
*ar
, u16 rd
, u16 rd2g
,
59 u16 rd5g
, u16 ctl2g
, u16 ctl5g
,
60 enum wmi_dfs_region dfs_reg
);
61 struct sk_buff
*(*gen_pdev_set_param
)(struct ath10k
*ar
, u32 id
,
63 struct sk_buff
*(*gen_init
)(struct ath10k
*ar
);
64 struct sk_buff
*(*gen_start_scan
)(struct ath10k
*ar
,
65 const struct wmi_start_scan_arg
*arg
);
66 struct sk_buff
*(*gen_stop_scan
)(struct ath10k
*ar
,
67 const struct wmi_stop_scan_arg
*arg
);
68 struct sk_buff
*(*gen_vdev_create
)(struct ath10k
*ar
, u32 vdev_id
,
69 enum wmi_vdev_type type
,
70 enum wmi_vdev_subtype subtype
,
71 const u8 macaddr
[ETH_ALEN
]);
72 struct sk_buff
*(*gen_vdev_delete
)(struct ath10k
*ar
, u32 vdev_id
);
73 struct sk_buff
*(*gen_vdev_start
)(struct ath10k
*ar
,
74 const struct wmi_vdev_start_request_arg
*arg
,
76 struct sk_buff
*(*gen_vdev_stop
)(struct ath10k
*ar
, u32 vdev_id
);
77 struct sk_buff
*(*gen_vdev_up
)(struct ath10k
*ar
, u32 vdev_id
, u32 aid
,
79 struct sk_buff
*(*gen_vdev_down
)(struct ath10k
*ar
, u32 vdev_id
);
80 struct sk_buff
*(*gen_vdev_set_param
)(struct ath10k
*ar
, u32 vdev_id
,
81 u32 param_id
, u32 param_value
);
82 struct sk_buff
*(*gen_vdev_install_key
)(struct ath10k
*ar
,
83 const struct wmi_vdev_install_key_arg
*arg
);
84 struct sk_buff
*(*gen_vdev_spectral_conf
)(struct ath10k
*ar
,
85 const struct wmi_vdev_spectral_conf_arg
*arg
);
86 struct sk_buff
*(*gen_vdev_spectral_enable
)(struct ath10k
*ar
, u32 vdev_id
,
87 u32 trigger
, u32 enable
);
88 struct sk_buff
*(*gen_vdev_wmm_conf
)(struct ath10k
*ar
, u32 vdev_id
,
89 const struct wmi_wmm_params_all_arg
*arg
);
90 struct sk_buff
*(*gen_peer_create
)(struct ath10k
*ar
, u32 vdev_id
,
91 const u8 peer_addr
[ETH_ALEN
],
92 enum wmi_peer_type peer_type
);
93 struct sk_buff
*(*gen_peer_delete
)(struct ath10k
*ar
, u32 vdev_id
,
94 const u8 peer_addr
[ETH_ALEN
]);
95 struct sk_buff
*(*gen_peer_flush
)(struct ath10k
*ar
, u32 vdev_id
,
96 const u8 peer_addr
[ETH_ALEN
],
98 struct sk_buff
*(*gen_peer_set_param
)(struct ath10k
*ar
, u32 vdev_id
,
100 enum wmi_peer_param param_id
,
102 struct sk_buff
*(*gen_peer_assoc
)(struct ath10k
*ar
,
103 const struct wmi_peer_assoc_complete_arg
*arg
);
104 struct sk_buff
*(*gen_set_psmode
)(struct ath10k
*ar
, u32 vdev_id
,
105 enum wmi_sta_ps_mode psmode
);
106 struct sk_buff
*(*gen_set_sta_ps
)(struct ath10k
*ar
, u32 vdev_id
,
107 enum wmi_sta_powersave_param param_id
,
109 struct sk_buff
*(*gen_set_ap_ps
)(struct ath10k
*ar
, u32 vdev_id
,
111 enum wmi_ap_ps_peer_param param_id
,
113 struct sk_buff
*(*gen_scan_chan_list
)(struct ath10k
*ar
,
114 const struct wmi_scan_chan_list_arg
*arg
);
115 struct sk_buff
*(*gen_beacon_dma
)(struct ath10k
*ar
, u32 vdev_id
,
116 const void *bcn
, size_t bcn_len
,
117 u32 bcn_paddr
, bool dtim_zero
,
119 struct sk_buff
*(*gen_pdev_set_wmm
)(struct ath10k
*ar
,
120 const struct wmi_wmm_params_all_arg
*arg
);
121 struct sk_buff
*(*gen_request_stats
)(struct ath10k
*ar
, u32 stats_mask
);
122 struct sk_buff
*(*gen_force_fw_hang
)(struct ath10k
*ar
,
123 enum wmi_force_fw_hang_type type
,
125 struct sk_buff
*(*gen_mgmt_tx
)(struct ath10k
*ar
, struct sk_buff
*skb
);
126 struct sk_buff
*(*gen_dbglog_cfg
)(struct ath10k
*ar
, u32 module_enable
,
128 struct sk_buff
*(*gen_pktlog_enable
)(struct ath10k
*ar
, u32 filter
);
129 struct sk_buff
*(*gen_pktlog_disable
)(struct ath10k
*ar
);
130 struct sk_buff
*(*gen_pdev_set_quiet_mode
)(struct ath10k
*ar
,
131 u32 period
, u32 duration
,
134 struct sk_buff
*(*gen_pdev_get_temperature
)(struct ath10k
*ar
);
135 struct sk_buff
*(*gen_addba_clear_resp
)(struct ath10k
*ar
, u32 vdev_id
,
137 struct sk_buff
*(*gen_addba_send
)(struct ath10k
*ar
, u32 vdev_id
,
138 const u8
*mac
, u32 tid
, u32 buf_size
);
139 struct sk_buff
*(*gen_addba_set_resp
)(struct ath10k
*ar
, u32 vdev_id
,
140 const u8
*mac
, u32 tid
,
142 struct sk_buff
*(*gen_delba_send
)(struct ath10k
*ar
, u32 vdev_id
,
143 const u8
*mac
, u32 tid
, u32 initiator
,
145 struct sk_buff
*(*gen_bcn_tmpl
)(struct ath10k
*ar
, u32 vdev_id
,
146 u32 tim_ie_offset
, struct sk_buff
*bcn
,
147 u32 prb_caps
, u32 prb_erp
,
148 void *prb_ies
, size_t prb_ies_len
);
149 struct sk_buff
*(*gen_prb_tmpl
)(struct ath10k
*ar
, u32 vdev_id
,
150 struct sk_buff
*bcn
);
151 struct sk_buff
*(*gen_p2p_go_bcn_ie
)(struct ath10k
*ar
, u32 vdev_id
,
153 struct sk_buff
*(*gen_vdev_sta_uapsd
)(struct ath10k
*ar
, u32 vdev_id
,
154 const u8 peer_addr
[ETH_ALEN
],
155 const struct wmi_sta_uapsd_auto_trig_arg
*args
,
157 struct sk_buff
*(*gen_sta_keepalive
)(struct ath10k
*ar
,
158 const struct wmi_sta_keepalive_arg
*arg
);
159 struct sk_buff
*(*gen_wow_enable
)(struct ath10k
*ar
);
160 struct sk_buff
*(*gen_wow_add_wakeup_event
)(struct ath10k
*ar
, u32 vdev_id
,
161 enum wmi_wow_wakeup_event event
,
163 struct sk_buff
*(*gen_wow_host_wakeup_ind
)(struct ath10k
*ar
);
164 struct sk_buff
*(*gen_wow_add_pattern
)(struct ath10k
*ar
, u32 vdev_id
,
170 struct sk_buff
*(*gen_wow_del_pattern
)(struct ath10k
*ar
, u32 vdev_id
,
172 struct sk_buff
*(*gen_update_fw_tdls_state
)(struct ath10k
*ar
,
174 enum wmi_tdls_state state
);
175 struct sk_buff
*(*gen_tdls_peer_update
)(struct ath10k
*ar
,
176 const struct wmi_tdls_peer_update_cmd_arg
*arg
,
177 const struct wmi_tdls_peer_capab_arg
*cap
,
178 const struct wmi_channel_arg
*chan
);
179 struct sk_buff
*(*gen_adaptive_qcs
)(struct ath10k
*ar
, bool enable
);
180 struct sk_buff
*(*gen_pdev_get_tpc_config
)(struct ath10k
*ar
,
182 void (*fw_stats_fill
)(struct ath10k
*ar
,
183 struct ath10k_fw_stats
*fw_stats
,
185 struct sk_buff
*(*gen_pdev_enable_adaptive_cca
)(struct ath10k
*ar
,
191 int ath10k_wmi_cmd_send(struct ath10k
*ar
, struct sk_buff
*skb
, u32 cmd_id
);
194 ath10k_wmi_rx(struct ath10k
*ar
, struct sk_buff
*skb
)
196 if (WARN_ON_ONCE(!ar
->wmi
.ops
->rx
))
199 ar
->wmi
.ops
->rx(ar
, skb
);
204 ath10k_wmi_map_svc(struct ath10k
*ar
, const __le32
*in
, unsigned long *out
,
207 if (!ar
->wmi
.ops
->map_svc
)
210 ar
->wmi
.ops
->map_svc(in
, out
, len
);
215 ath10k_wmi_pull_scan(struct ath10k
*ar
, struct sk_buff
*skb
,
216 struct wmi_scan_ev_arg
*arg
)
218 if (!ar
->wmi
.ops
->pull_scan
)
221 return ar
->wmi
.ops
->pull_scan(ar
, skb
, arg
);
225 ath10k_wmi_pull_mgmt_rx(struct ath10k
*ar
, struct sk_buff
*skb
,
226 struct wmi_mgmt_rx_ev_arg
*arg
)
228 if (!ar
->wmi
.ops
->pull_mgmt_rx
)
231 return ar
->wmi
.ops
->pull_mgmt_rx(ar
, skb
, arg
);
235 ath10k_wmi_pull_ch_info(struct ath10k
*ar
, struct sk_buff
*skb
,
236 struct wmi_ch_info_ev_arg
*arg
)
238 if (!ar
->wmi
.ops
->pull_ch_info
)
241 return ar
->wmi
.ops
->pull_ch_info(ar
, skb
, arg
);
245 ath10k_wmi_pull_vdev_start(struct ath10k
*ar
, struct sk_buff
*skb
,
246 struct wmi_vdev_start_ev_arg
*arg
)
248 if (!ar
->wmi
.ops
->pull_vdev_start
)
251 return ar
->wmi
.ops
->pull_vdev_start(ar
, skb
, arg
);
255 ath10k_wmi_pull_peer_kick(struct ath10k
*ar
, struct sk_buff
*skb
,
256 struct wmi_peer_kick_ev_arg
*arg
)
258 if (!ar
->wmi
.ops
->pull_peer_kick
)
261 return ar
->wmi
.ops
->pull_peer_kick(ar
, skb
, arg
);
265 ath10k_wmi_pull_swba(struct ath10k
*ar
, struct sk_buff
*skb
,
266 struct wmi_swba_ev_arg
*arg
)
268 if (!ar
->wmi
.ops
->pull_swba
)
271 return ar
->wmi
.ops
->pull_swba(ar
, skb
, arg
);
275 ath10k_wmi_pull_phyerr_hdr(struct ath10k
*ar
, struct sk_buff
*skb
,
276 struct wmi_phyerr_hdr_arg
*arg
)
278 if (!ar
->wmi
.ops
->pull_phyerr_hdr
)
281 return ar
->wmi
.ops
->pull_phyerr_hdr(ar
, skb
, arg
);
285 ath10k_wmi_pull_phyerr(struct ath10k
*ar
, const void *phyerr_buf
,
286 int left_len
, struct wmi_phyerr_ev_arg
*arg
)
288 if (!ar
->wmi
.ops
->pull_phyerr
)
291 return ar
->wmi
.ops
->pull_phyerr(ar
, phyerr_buf
, left_len
, arg
);
295 ath10k_wmi_pull_svc_rdy(struct ath10k
*ar
, struct sk_buff
*skb
,
296 struct wmi_svc_rdy_ev_arg
*arg
)
298 if (!ar
->wmi
.ops
->pull_svc_rdy
)
301 return ar
->wmi
.ops
->pull_svc_rdy(ar
, skb
, arg
);
305 ath10k_wmi_pull_rdy(struct ath10k
*ar
, struct sk_buff
*skb
,
306 struct wmi_rdy_ev_arg
*arg
)
308 if (!ar
->wmi
.ops
->pull_rdy
)
311 return ar
->wmi
.ops
->pull_rdy(ar
, skb
, arg
);
315 ath10k_wmi_pull_fw_stats(struct ath10k
*ar
, struct sk_buff
*skb
,
316 struct ath10k_fw_stats
*stats
)
318 if (!ar
->wmi
.ops
->pull_fw_stats
)
321 return ar
->wmi
.ops
->pull_fw_stats(ar
, skb
, stats
);
325 ath10k_wmi_pull_roam_ev(struct ath10k
*ar
, struct sk_buff
*skb
,
326 struct wmi_roam_ev_arg
*arg
)
328 if (!ar
->wmi
.ops
->pull_roam_ev
)
331 return ar
->wmi
.ops
->pull_roam_ev(ar
, skb
, arg
);
335 ath10k_wmi_pull_wow_event(struct ath10k
*ar
, struct sk_buff
*skb
,
336 struct wmi_wow_ev_arg
*arg
)
338 if (!ar
->wmi
.ops
->pull_wow_event
)
341 return ar
->wmi
.ops
->pull_wow_event(ar
, skb
, arg
);
344 static inline enum wmi_txbf_conf
345 ath10k_wmi_get_txbf_conf_scheme(struct ath10k
*ar
)
347 if (!ar
->wmi
.ops
->get_txbf_conf_scheme
)
348 return WMI_TXBF_CONF_UNSUPPORTED
;
350 return ar
->wmi
.ops
->get_txbf_conf_scheme(ar
);
354 ath10k_wmi_mgmt_tx(struct ath10k
*ar
, struct sk_buff
*msdu
)
356 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(msdu
);
360 if (!ar
->wmi
.ops
->gen_mgmt_tx
)
363 skb
= ar
->wmi
.ops
->gen_mgmt_tx(ar
, msdu
);
367 ret
= ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->mgmt_tx_cmdid
);
371 /* FIXME There's no ACK event for Management Tx. This probably
372 * shouldn't be called here either. */
373 info
->flags
|= IEEE80211_TX_STAT_ACK
;
374 ieee80211_tx_status_irqsafe(ar
->hw
, msdu
);
380 ath10k_wmi_pdev_set_regdomain(struct ath10k
*ar
, u16 rd
, u16 rd2g
, u16 rd5g
,
381 u16 ctl2g
, u16 ctl5g
,
382 enum wmi_dfs_region dfs_reg
)
386 if (!ar
->wmi
.ops
->gen_pdev_set_rd
)
389 skb
= ar
->wmi
.ops
->gen_pdev_set_rd(ar
, rd
, rd2g
, rd5g
, ctl2g
, ctl5g
,
394 return ath10k_wmi_cmd_send(ar
, skb
,
395 ar
->wmi
.cmd
->pdev_set_regdomain_cmdid
);
399 ath10k_wmi_pdev_suspend_target(struct ath10k
*ar
, u32 suspend_opt
)
403 if (!ar
->wmi
.ops
->gen_pdev_suspend
)
406 skb
= ar
->wmi
.ops
->gen_pdev_suspend(ar
, suspend_opt
);
410 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_suspend_cmdid
);
414 ath10k_wmi_pdev_resume_target(struct ath10k
*ar
)
418 if (!ar
->wmi
.ops
->gen_pdev_resume
)
421 skb
= ar
->wmi
.ops
->gen_pdev_resume(ar
);
425 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_resume_cmdid
);
429 ath10k_wmi_pdev_set_param(struct ath10k
*ar
, u32 id
, u32 value
)
433 if (!ar
->wmi
.ops
->gen_pdev_set_param
)
436 skb
= ar
->wmi
.ops
->gen_pdev_set_param(ar
, id
, value
);
440 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_set_param_cmdid
);
444 ath10k_wmi_cmd_init(struct ath10k
*ar
)
448 if (!ar
->wmi
.ops
->gen_init
)
451 skb
= ar
->wmi
.ops
->gen_init(ar
);
455 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->init_cmdid
);
459 ath10k_wmi_start_scan(struct ath10k
*ar
,
460 const struct wmi_start_scan_arg
*arg
)
464 if (!ar
->wmi
.ops
->gen_start_scan
)
467 skb
= ar
->wmi
.ops
->gen_start_scan(ar
, arg
);
471 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->start_scan_cmdid
);
475 ath10k_wmi_stop_scan(struct ath10k
*ar
, const struct wmi_stop_scan_arg
*arg
)
479 if (!ar
->wmi
.ops
->gen_stop_scan
)
482 skb
= ar
->wmi
.ops
->gen_stop_scan(ar
, arg
);
486 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->stop_scan_cmdid
);
490 ath10k_wmi_vdev_create(struct ath10k
*ar
, u32 vdev_id
,
491 enum wmi_vdev_type type
,
492 enum wmi_vdev_subtype subtype
,
493 const u8 macaddr
[ETH_ALEN
])
497 if (!ar
->wmi
.ops
->gen_vdev_create
)
500 skb
= ar
->wmi
.ops
->gen_vdev_create(ar
, vdev_id
, type
, subtype
, macaddr
);
504 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_create_cmdid
);
508 ath10k_wmi_vdev_delete(struct ath10k
*ar
, u32 vdev_id
)
512 if (!ar
->wmi
.ops
->gen_vdev_delete
)
515 skb
= ar
->wmi
.ops
->gen_vdev_delete(ar
, vdev_id
);
519 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_delete_cmdid
);
523 ath10k_wmi_vdev_start(struct ath10k
*ar
,
524 const struct wmi_vdev_start_request_arg
*arg
)
528 if (!ar
->wmi
.ops
->gen_vdev_start
)
531 skb
= ar
->wmi
.ops
->gen_vdev_start(ar
, arg
, false);
535 return ath10k_wmi_cmd_send(ar
, skb
,
536 ar
->wmi
.cmd
->vdev_start_request_cmdid
);
540 ath10k_wmi_vdev_restart(struct ath10k
*ar
,
541 const struct wmi_vdev_start_request_arg
*arg
)
545 if (!ar
->wmi
.ops
->gen_vdev_start
)
548 skb
= ar
->wmi
.ops
->gen_vdev_start(ar
, arg
, true);
552 return ath10k_wmi_cmd_send(ar
, skb
,
553 ar
->wmi
.cmd
->vdev_restart_request_cmdid
);
557 ath10k_wmi_vdev_stop(struct ath10k
*ar
, u32 vdev_id
)
561 if (!ar
->wmi
.ops
->gen_vdev_stop
)
564 skb
= ar
->wmi
.ops
->gen_vdev_stop(ar
, vdev_id
);
568 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_stop_cmdid
);
572 ath10k_wmi_vdev_up(struct ath10k
*ar
, u32 vdev_id
, u32 aid
, const u8
*bssid
)
576 if (!ar
->wmi
.ops
->gen_vdev_up
)
579 skb
= ar
->wmi
.ops
->gen_vdev_up(ar
, vdev_id
, aid
, bssid
);
583 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_up_cmdid
);
587 ath10k_wmi_vdev_down(struct ath10k
*ar
, u32 vdev_id
)
591 if (!ar
->wmi
.ops
->gen_vdev_down
)
594 skb
= ar
->wmi
.ops
->gen_vdev_down(ar
, vdev_id
);
598 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_down_cmdid
);
602 ath10k_wmi_vdev_set_param(struct ath10k
*ar
, u32 vdev_id
, u32 param_id
,
607 if (!ar
->wmi
.ops
->gen_vdev_set_param
)
610 skb
= ar
->wmi
.ops
->gen_vdev_set_param(ar
, vdev_id
, param_id
,
615 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->vdev_set_param_cmdid
);
619 ath10k_wmi_vdev_install_key(struct ath10k
*ar
,
620 const struct wmi_vdev_install_key_arg
*arg
)
624 if (!ar
->wmi
.ops
->gen_vdev_install_key
)
627 skb
= ar
->wmi
.ops
->gen_vdev_install_key(ar
, arg
);
631 return ath10k_wmi_cmd_send(ar
, skb
,
632 ar
->wmi
.cmd
->vdev_install_key_cmdid
);
636 ath10k_wmi_vdev_spectral_conf(struct ath10k
*ar
,
637 const struct wmi_vdev_spectral_conf_arg
*arg
)
642 if (!ar
->wmi
.ops
->gen_vdev_spectral_conf
)
645 skb
= ar
->wmi
.ops
->gen_vdev_spectral_conf(ar
, arg
);
649 cmd_id
= ar
->wmi
.cmd
->vdev_spectral_scan_configure_cmdid
;
650 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
654 ath10k_wmi_vdev_spectral_enable(struct ath10k
*ar
, u32 vdev_id
, u32 trigger
,
660 if (!ar
->wmi
.ops
->gen_vdev_spectral_enable
)
663 skb
= ar
->wmi
.ops
->gen_vdev_spectral_enable(ar
, vdev_id
, trigger
,
668 cmd_id
= ar
->wmi
.cmd
->vdev_spectral_scan_enable_cmdid
;
669 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
673 ath10k_wmi_vdev_sta_uapsd(struct ath10k
*ar
, u32 vdev_id
,
674 const u8 peer_addr
[ETH_ALEN
],
675 const struct wmi_sta_uapsd_auto_trig_arg
*args
,
681 if (!ar
->wmi
.ops
->gen_vdev_sta_uapsd
)
684 skb
= ar
->wmi
.ops
->gen_vdev_sta_uapsd(ar
, vdev_id
, peer_addr
, args
,
689 cmd_id
= ar
->wmi
.cmd
->sta_uapsd_auto_trig_cmdid
;
690 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
694 ath10k_wmi_vdev_wmm_conf(struct ath10k
*ar
, u32 vdev_id
,
695 const struct wmi_wmm_params_all_arg
*arg
)
700 skb
= ar
->wmi
.ops
->gen_vdev_wmm_conf(ar
, vdev_id
, arg
);
704 cmd_id
= ar
->wmi
.cmd
->vdev_set_wmm_params_cmdid
;
705 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
709 ath10k_wmi_peer_create(struct ath10k
*ar
, u32 vdev_id
,
710 const u8 peer_addr
[ETH_ALEN
],
711 enum wmi_peer_type peer_type
)
715 if (!ar
->wmi
.ops
->gen_peer_create
)
718 skb
= ar
->wmi
.ops
->gen_peer_create(ar
, vdev_id
, peer_addr
, peer_type
);
722 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_create_cmdid
);
726 ath10k_wmi_peer_delete(struct ath10k
*ar
, u32 vdev_id
,
727 const u8 peer_addr
[ETH_ALEN
])
731 if (!ar
->wmi
.ops
->gen_peer_delete
)
734 skb
= ar
->wmi
.ops
->gen_peer_delete(ar
, vdev_id
, peer_addr
);
738 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_delete_cmdid
);
742 ath10k_wmi_peer_flush(struct ath10k
*ar
, u32 vdev_id
,
743 const u8 peer_addr
[ETH_ALEN
], u32 tid_bitmap
)
747 if (!ar
->wmi
.ops
->gen_peer_flush
)
750 skb
= ar
->wmi
.ops
->gen_peer_flush(ar
, vdev_id
, peer_addr
, tid_bitmap
);
754 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_flush_tids_cmdid
);
758 ath10k_wmi_peer_set_param(struct ath10k
*ar
, u32 vdev_id
, const u8
*peer_addr
,
759 enum wmi_peer_param param_id
, u32 param_value
)
763 if (!ar
->wmi
.ops
->gen_peer_set_param
)
766 skb
= ar
->wmi
.ops
->gen_peer_set_param(ar
, vdev_id
, peer_addr
, param_id
,
771 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_set_param_cmdid
);
775 ath10k_wmi_set_psmode(struct ath10k
*ar
, u32 vdev_id
,
776 enum wmi_sta_ps_mode psmode
)
780 if (!ar
->wmi
.ops
->gen_set_psmode
)
783 skb
= ar
->wmi
.ops
->gen_set_psmode(ar
, vdev_id
, psmode
);
787 return ath10k_wmi_cmd_send(ar
, skb
,
788 ar
->wmi
.cmd
->sta_powersave_mode_cmdid
);
792 ath10k_wmi_set_sta_ps_param(struct ath10k
*ar
, u32 vdev_id
,
793 enum wmi_sta_powersave_param param_id
, u32 value
)
797 if (!ar
->wmi
.ops
->gen_set_sta_ps
)
800 skb
= ar
->wmi
.ops
->gen_set_sta_ps(ar
, vdev_id
, param_id
, value
);
804 return ath10k_wmi_cmd_send(ar
, skb
,
805 ar
->wmi
.cmd
->sta_powersave_param_cmdid
);
809 ath10k_wmi_set_ap_ps_param(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
810 enum wmi_ap_ps_peer_param param_id
, u32 value
)
814 if (!ar
->wmi
.ops
->gen_set_ap_ps
)
817 skb
= ar
->wmi
.ops
->gen_set_ap_ps(ar
, vdev_id
, mac
, param_id
, value
);
821 return ath10k_wmi_cmd_send(ar
, skb
,
822 ar
->wmi
.cmd
->ap_ps_peer_param_cmdid
);
826 ath10k_wmi_scan_chan_list(struct ath10k
*ar
,
827 const struct wmi_scan_chan_list_arg
*arg
)
831 if (!ar
->wmi
.ops
->gen_scan_chan_list
)
834 skb
= ar
->wmi
.ops
->gen_scan_chan_list(ar
, arg
);
838 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->scan_chan_list_cmdid
);
842 ath10k_wmi_peer_assoc(struct ath10k
*ar
,
843 const struct wmi_peer_assoc_complete_arg
*arg
)
847 if (!ar
->wmi
.ops
->gen_peer_assoc
)
850 skb
= ar
->wmi
.ops
->gen_peer_assoc(ar
, arg
);
854 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->peer_assoc_cmdid
);
858 ath10k_wmi_beacon_send_ref_nowait(struct ath10k
*ar
, u32 vdev_id
,
859 const void *bcn
, size_t bcn_len
,
860 u32 bcn_paddr
, bool dtim_zero
,
866 if (!ar
->wmi
.ops
->gen_beacon_dma
)
869 skb
= ar
->wmi
.ops
->gen_beacon_dma(ar
, vdev_id
, bcn
, bcn_len
, bcn_paddr
,
870 dtim_zero
, deliver_cab
);
874 ret
= ath10k_wmi_cmd_send_nowait(ar
, skb
,
875 ar
->wmi
.cmd
->pdev_send_bcn_cmdid
);
885 ath10k_wmi_pdev_set_wmm_params(struct ath10k
*ar
,
886 const struct wmi_wmm_params_all_arg
*arg
)
890 if (!ar
->wmi
.ops
->gen_pdev_set_wmm
)
893 skb
= ar
->wmi
.ops
->gen_pdev_set_wmm(ar
, arg
);
897 return ath10k_wmi_cmd_send(ar
, skb
,
898 ar
->wmi
.cmd
->pdev_set_wmm_params_cmdid
);
902 ath10k_wmi_request_stats(struct ath10k
*ar
, u32 stats_mask
)
906 if (!ar
->wmi
.ops
->gen_request_stats
)
909 skb
= ar
->wmi
.ops
->gen_request_stats(ar
, stats_mask
);
913 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->request_stats_cmdid
);
917 ath10k_wmi_force_fw_hang(struct ath10k
*ar
,
918 enum wmi_force_fw_hang_type type
, u32 delay_ms
)
922 if (!ar
->wmi
.ops
->gen_force_fw_hang
)
925 skb
= ar
->wmi
.ops
->gen_force_fw_hang(ar
, type
, delay_ms
);
929 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->force_fw_hang_cmdid
);
933 ath10k_wmi_dbglog_cfg(struct ath10k
*ar
, u32 module_enable
, u32 log_level
)
937 if (!ar
->wmi
.ops
->gen_dbglog_cfg
)
940 skb
= ar
->wmi
.ops
->gen_dbglog_cfg(ar
, module_enable
, log_level
);
944 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->dbglog_cfg_cmdid
);
948 ath10k_wmi_pdev_pktlog_enable(struct ath10k
*ar
, u32 filter
)
952 if (!ar
->wmi
.ops
->gen_pktlog_enable
)
955 skb
= ar
->wmi
.ops
->gen_pktlog_enable(ar
, filter
);
959 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->pdev_pktlog_enable_cmdid
);
963 ath10k_wmi_pdev_pktlog_disable(struct ath10k
*ar
)
967 if (!ar
->wmi
.ops
->gen_pktlog_disable
)
970 skb
= ar
->wmi
.ops
->gen_pktlog_disable(ar
);
974 return ath10k_wmi_cmd_send(ar
, skb
,
975 ar
->wmi
.cmd
->pdev_pktlog_disable_cmdid
);
979 ath10k_wmi_pdev_set_quiet_mode(struct ath10k
*ar
, u32 period
, u32 duration
,
980 u32 next_offset
, u32 enabled
)
984 if (!ar
->wmi
.ops
->gen_pdev_set_quiet_mode
)
987 skb
= ar
->wmi
.ops
->gen_pdev_set_quiet_mode(ar
, period
, duration
,
988 next_offset
, enabled
);
992 return ath10k_wmi_cmd_send(ar
, skb
,
993 ar
->wmi
.cmd
->pdev_set_quiet_mode_cmdid
);
997 ath10k_wmi_pdev_get_temperature(struct ath10k
*ar
)
1001 if (!ar
->wmi
.ops
->gen_pdev_get_temperature
)
1004 skb
= ar
->wmi
.ops
->gen_pdev_get_temperature(ar
);
1006 return PTR_ERR(skb
);
1008 return ath10k_wmi_cmd_send(ar
, skb
,
1009 ar
->wmi
.cmd
->pdev_get_temperature_cmdid
);
1013 ath10k_wmi_addba_clear_resp(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
)
1015 struct sk_buff
*skb
;
1017 if (!ar
->wmi
.ops
->gen_addba_clear_resp
)
1020 skb
= ar
->wmi
.ops
->gen_addba_clear_resp(ar
, vdev_id
, mac
);
1022 return PTR_ERR(skb
);
1024 return ath10k_wmi_cmd_send(ar
, skb
,
1025 ar
->wmi
.cmd
->addba_clear_resp_cmdid
);
1029 ath10k_wmi_addba_send(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
1030 u32 tid
, u32 buf_size
)
1032 struct sk_buff
*skb
;
1034 if (!ar
->wmi
.ops
->gen_addba_send
)
1037 skb
= ar
->wmi
.ops
->gen_addba_send(ar
, vdev_id
, mac
, tid
, buf_size
);
1039 return PTR_ERR(skb
);
1041 return ath10k_wmi_cmd_send(ar
, skb
,
1042 ar
->wmi
.cmd
->addba_send_cmdid
);
1046 ath10k_wmi_addba_set_resp(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
1047 u32 tid
, u32 status
)
1049 struct sk_buff
*skb
;
1051 if (!ar
->wmi
.ops
->gen_addba_set_resp
)
1054 skb
= ar
->wmi
.ops
->gen_addba_set_resp(ar
, vdev_id
, mac
, tid
, status
);
1056 return PTR_ERR(skb
);
1058 return ath10k_wmi_cmd_send(ar
, skb
,
1059 ar
->wmi
.cmd
->addba_set_resp_cmdid
);
1063 ath10k_wmi_delba_send(struct ath10k
*ar
, u32 vdev_id
, const u8
*mac
,
1064 u32 tid
, u32 initiator
, u32 reason
)
1066 struct sk_buff
*skb
;
1068 if (!ar
->wmi
.ops
->gen_delba_send
)
1071 skb
= ar
->wmi
.ops
->gen_delba_send(ar
, vdev_id
, mac
, tid
, initiator
,
1074 return PTR_ERR(skb
);
1076 return ath10k_wmi_cmd_send(ar
, skb
,
1077 ar
->wmi
.cmd
->delba_send_cmdid
);
1081 ath10k_wmi_bcn_tmpl(struct ath10k
*ar
, u32 vdev_id
, u32 tim_ie_offset
,
1082 struct sk_buff
*bcn
, u32 prb_caps
, u32 prb_erp
,
1083 void *prb_ies
, size_t prb_ies_len
)
1085 struct sk_buff
*skb
;
1087 if (!ar
->wmi
.ops
->gen_bcn_tmpl
)
1090 skb
= ar
->wmi
.ops
->gen_bcn_tmpl(ar
, vdev_id
, tim_ie_offset
, bcn
,
1091 prb_caps
, prb_erp
, prb_ies
,
1094 return PTR_ERR(skb
);
1096 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->bcn_tmpl_cmdid
);
1100 ath10k_wmi_prb_tmpl(struct ath10k
*ar
, u32 vdev_id
, struct sk_buff
*prb
)
1102 struct sk_buff
*skb
;
1104 if (!ar
->wmi
.ops
->gen_prb_tmpl
)
1107 skb
= ar
->wmi
.ops
->gen_prb_tmpl(ar
, vdev_id
, prb
);
1109 return PTR_ERR(skb
);
1111 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->prb_tmpl_cmdid
);
1115 ath10k_wmi_p2p_go_bcn_ie(struct ath10k
*ar
, u32 vdev_id
, const u8
*p2p_ie
)
1117 struct sk_buff
*skb
;
1119 if (!ar
->wmi
.ops
->gen_p2p_go_bcn_ie
)
1122 skb
= ar
->wmi
.ops
->gen_p2p_go_bcn_ie(ar
, vdev_id
, p2p_ie
);
1124 return PTR_ERR(skb
);
1126 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->p2p_go_set_beacon_ie
);
1130 ath10k_wmi_sta_keepalive(struct ath10k
*ar
,
1131 const struct wmi_sta_keepalive_arg
*arg
)
1133 struct sk_buff
*skb
;
1136 if (!ar
->wmi
.ops
->gen_sta_keepalive
)
1139 skb
= ar
->wmi
.ops
->gen_sta_keepalive(ar
, arg
);
1141 return PTR_ERR(skb
);
1143 cmd_id
= ar
->wmi
.cmd
->sta_keepalive_cmd
;
1144 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1148 ath10k_wmi_wow_enable(struct ath10k
*ar
)
1150 struct sk_buff
*skb
;
1153 if (!ar
->wmi
.ops
->gen_wow_enable
)
1156 skb
= ar
->wmi
.ops
->gen_wow_enable(ar
);
1158 return PTR_ERR(skb
);
1160 cmd_id
= ar
->wmi
.cmd
->wow_enable_cmdid
;
1161 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1165 ath10k_wmi_wow_add_wakeup_event(struct ath10k
*ar
, u32 vdev_id
,
1166 enum wmi_wow_wakeup_event event
,
1169 struct sk_buff
*skb
;
1172 if (!ar
->wmi
.ops
->gen_wow_add_wakeup_event
)
1175 skb
= ar
->wmi
.ops
->gen_wow_add_wakeup_event(ar
, vdev_id
, event
, enable
);
1177 return PTR_ERR(skb
);
1179 cmd_id
= ar
->wmi
.cmd
->wow_enable_disable_wake_event_cmdid
;
1180 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1184 ath10k_wmi_wow_host_wakeup_ind(struct ath10k
*ar
)
1186 struct sk_buff
*skb
;
1189 if (!ar
->wmi
.ops
->gen_wow_host_wakeup_ind
)
1192 skb
= ar
->wmi
.ops
->gen_wow_host_wakeup_ind(ar
);
1194 return PTR_ERR(skb
);
1196 cmd_id
= ar
->wmi
.cmd
->wow_hostwakeup_from_sleep_cmdid
;
1197 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1201 ath10k_wmi_wow_add_pattern(struct ath10k
*ar
, u32 vdev_id
, u32 pattern_id
,
1202 const u8
*pattern
, const u8
*mask
,
1203 int pattern_len
, int pattern_offset
)
1205 struct sk_buff
*skb
;
1208 if (!ar
->wmi
.ops
->gen_wow_add_pattern
)
1211 skb
= ar
->wmi
.ops
->gen_wow_add_pattern(ar
, vdev_id
, pattern_id
,
1212 pattern
, mask
, pattern_len
,
1215 return PTR_ERR(skb
);
1217 cmd_id
= ar
->wmi
.cmd
->wow_add_wake_pattern_cmdid
;
1218 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1222 ath10k_wmi_wow_del_pattern(struct ath10k
*ar
, u32 vdev_id
, u32 pattern_id
)
1224 struct sk_buff
*skb
;
1227 if (!ar
->wmi
.ops
->gen_wow_del_pattern
)
1230 skb
= ar
->wmi
.ops
->gen_wow_del_pattern(ar
, vdev_id
, pattern_id
);
1232 return PTR_ERR(skb
);
1234 cmd_id
= ar
->wmi
.cmd
->wow_del_wake_pattern_cmdid
;
1235 return ath10k_wmi_cmd_send(ar
, skb
, cmd_id
);
1239 ath10k_wmi_update_fw_tdls_state(struct ath10k
*ar
, u32 vdev_id
,
1240 enum wmi_tdls_state state
)
1242 struct sk_buff
*skb
;
1244 if (!ar
->wmi
.ops
->gen_update_fw_tdls_state
)
1247 skb
= ar
->wmi
.ops
->gen_update_fw_tdls_state(ar
, vdev_id
, state
);
1249 return PTR_ERR(skb
);
1251 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->tdls_set_state_cmdid
);
1255 ath10k_wmi_tdls_peer_update(struct ath10k
*ar
,
1256 const struct wmi_tdls_peer_update_cmd_arg
*arg
,
1257 const struct wmi_tdls_peer_capab_arg
*cap
,
1258 const struct wmi_channel_arg
*chan
)
1260 struct sk_buff
*skb
;
1262 if (!ar
->wmi
.ops
->gen_tdls_peer_update
)
1265 skb
= ar
->wmi
.ops
->gen_tdls_peer_update(ar
, arg
, cap
, chan
);
1267 return PTR_ERR(skb
);
1269 return ath10k_wmi_cmd_send(ar
, skb
,
1270 ar
->wmi
.cmd
->tdls_peer_update_cmdid
);
1274 ath10k_wmi_adaptive_qcs(struct ath10k
*ar
, bool enable
)
1276 struct sk_buff
*skb
;
1278 if (!ar
->wmi
.ops
->gen_adaptive_qcs
)
1281 skb
= ar
->wmi
.ops
->gen_adaptive_qcs(ar
, enable
);
1283 return PTR_ERR(skb
);
1285 return ath10k_wmi_cmd_send(ar
, skb
, ar
->wmi
.cmd
->adaptive_qcs_cmdid
);
1289 ath10k_wmi_pdev_get_tpc_config(struct ath10k
*ar
, u32 param
)
1291 struct sk_buff
*skb
;
1293 if (!ar
->wmi
.ops
->gen_pdev_get_tpc_config
)
1296 skb
= ar
->wmi
.ops
->gen_pdev_get_tpc_config(ar
, param
);
1299 return PTR_ERR(skb
);
1301 return ath10k_wmi_cmd_send(ar
, skb
,
1302 ar
->wmi
.cmd
->pdev_get_tpc_config_cmdid
);
1306 ath10k_wmi_fw_stats_fill(struct ath10k
*ar
, struct ath10k_fw_stats
*fw_stats
,
1309 if (!ar
->wmi
.ops
->fw_stats_fill
)
1312 ar
->wmi
.ops
->fw_stats_fill(ar
, fw_stats
, buf
);
1317 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k
*ar
, u8 enable
,
1318 u32 detect_level
, u32 detect_margin
)
1320 struct sk_buff
*skb
;
1322 if (!ar
->wmi
.ops
->gen_pdev_enable_adaptive_cca
)
1325 skb
= ar
->wmi
.ops
->gen_pdev_enable_adaptive_cca(ar
, enable
,
1330 return PTR_ERR(skb
);
1332 return ath10k_wmi_cmd_send(ar
, skb
,
1333 ar
->wmi
.cmd
->pdev_enable_adaptive_cca_cmdid
);