Linux 4.4.145
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath10k / wmi-ops.h
blobcfed5808bc4e40d53d0344c1aaa4b3acded7883f
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #ifndef _WMI_OPS_H_
19 #define _WMI_OPS_H_
21 struct ath10k;
22 struct sk_buff;
24 struct wmi_ops {
25 void (*rx)(struct ath10k *ar, struct sk_buff *skb);
26 void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
28 int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
29 struct wmi_scan_ev_arg *arg);
30 int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
31 struct wmi_mgmt_rx_ev_arg *arg);
32 int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
33 struct wmi_ch_info_ev_arg *arg);
34 int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
35 struct wmi_vdev_start_ev_arg *arg);
36 int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
37 struct wmi_peer_kick_ev_arg *arg);
38 int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
39 struct wmi_swba_ev_arg *arg);
40 int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
41 struct wmi_phyerr_hdr_arg *arg);
42 int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
43 int left_len, struct wmi_phyerr_ev_arg *arg);
44 int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
45 struct wmi_svc_rdy_ev_arg *arg);
46 int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
47 struct wmi_rdy_ev_arg *arg);
48 int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
49 struct ath10k_fw_stats *stats);
50 int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
51 struct wmi_roam_ev_arg *arg);
52 int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
53 struct wmi_wow_ev_arg *arg);
54 enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
56 struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
57 struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
58 struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
59 u16 rd5g, u16 ctl2g, u16 ctl5g,
60 enum wmi_dfs_region dfs_reg);
61 struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
62 u32 value);
63 struct sk_buff *(*gen_init)(struct ath10k *ar);
64 struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
65 const struct wmi_start_scan_arg *arg);
66 struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
67 const struct wmi_stop_scan_arg *arg);
68 struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
69 enum wmi_vdev_type type,
70 enum wmi_vdev_subtype subtype,
71 const u8 macaddr[ETH_ALEN]);
72 struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
73 struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
74 const struct wmi_vdev_start_request_arg *arg,
75 bool restart);
76 struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
77 struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
78 const u8 *bssid);
79 struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
80 struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
81 u32 param_id, u32 param_value);
82 struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
83 const struct wmi_vdev_install_key_arg *arg);
84 struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
85 const struct wmi_vdev_spectral_conf_arg *arg);
86 struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
87 u32 trigger, u32 enable);
88 struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
89 const struct wmi_wmm_params_all_arg *arg);
90 struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
91 const u8 peer_addr[ETH_ALEN],
92 enum wmi_peer_type peer_type);
93 struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
94 const u8 peer_addr[ETH_ALEN]);
95 struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
96 const u8 peer_addr[ETH_ALEN],
97 u32 tid_bitmap);
98 struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
99 const u8 *peer_addr,
100 enum wmi_peer_param param_id,
101 u32 param_value);
102 struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
103 const struct wmi_peer_assoc_complete_arg *arg);
104 struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
105 enum wmi_sta_ps_mode psmode);
106 struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
107 enum wmi_sta_powersave_param param_id,
108 u32 value);
109 struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
110 const u8 *mac,
111 enum wmi_ap_ps_peer_param param_id,
112 u32 value);
113 struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
114 const struct wmi_scan_chan_list_arg *arg);
115 struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
116 const void *bcn, size_t bcn_len,
117 u32 bcn_paddr, bool dtim_zero,
118 bool deliver_cab);
119 struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
120 const struct wmi_wmm_params_all_arg *arg);
121 struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
122 struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
123 enum wmi_force_fw_hang_type type,
124 u32 delay_ms);
125 struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
126 struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
127 u32 log_level);
128 struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
129 struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
130 struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
131 u32 period, u32 duration,
132 u32 next_offset,
133 u32 enabled);
134 struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
135 struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
136 const u8 *mac);
137 struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
138 const u8 *mac, u32 tid, u32 buf_size);
139 struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
140 const u8 *mac, u32 tid,
141 u32 status);
142 struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
143 const u8 *mac, u32 tid, u32 initiator,
144 u32 reason);
145 struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
146 u32 tim_ie_offset, struct sk_buff *bcn,
147 u32 prb_caps, u32 prb_erp,
148 void *prb_ies, size_t prb_ies_len);
149 struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
150 struct sk_buff *bcn);
151 struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
152 const u8 *p2p_ie);
153 struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
154 const u8 peer_addr[ETH_ALEN],
155 const struct wmi_sta_uapsd_auto_trig_arg *args,
156 u32 num_ac);
157 struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
158 const struct wmi_sta_keepalive_arg *arg);
159 struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
160 struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
161 enum wmi_wow_wakeup_event event,
162 u32 enable);
163 struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
164 struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
165 u32 pattern_id,
166 const u8 *pattern,
167 const u8 *mask,
168 int pattern_len,
169 int pattern_offset);
170 struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
171 u32 pattern_id);
172 struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
173 u32 vdev_id,
174 enum wmi_tdls_state state);
175 struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
176 const struct wmi_tdls_peer_update_cmd_arg *arg,
177 const struct wmi_tdls_peer_capab_arg *cap,
178 const struct wmi_channel_arg *chan);
179 struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
180 struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
181 u32 param);
182 void (*fw_stats_fill)(struct ath10k *ar,
183 struct ath10k_fw_stats *fw_stats,
184 char *buf);
185 struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
186 u8 enable,
187 u32 detect_level,
188 u32 detect_margin);
191 int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
193 static inline int
194 ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
196 if (WARN_ON_ONCE(!ar->wmi.ops->rx))
197 return -EOPNOTSUPP;
199 ar->wmi.ops->rx(ar, skb);
200 return 0;
203 static inline int
204 ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
205 size_t len)
207 if (!ar->wmi.ops->map_svc)
208 return -EOPNOTSUPP;
210 ar->wmi.ops->map_svc(in, out, len);
211 return 0;
214 static inline int
215 ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
216 struct wmi_scan_ev_arg *arg)
218 if (!ar->wmi.ops->pull_scan)
219 return -EOPNOTSUPP;
221 return ar->wmi.ops->pull_scan(ar, skb, arg);
224 static inline int
225 ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
226 struct wmi_mgmt_rx_ev_arg *arg)
228 if (!ar->wmi.ops->pull_mgmt_rx)
229 return -EOPNOTSUPP;
231 return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
234 static inline int
235 ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
236 struct wmi_ch_info_ev_arg *arg)
238 if (!ar->wmi.ops->pull_ch_info)
239 return -EOPNOTSUPP;
241 return ar->wmi.ops->pull_ch_info(ar, skb, arg);
244 static inline int
245 ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
246 struct wmi_vdev_start_ev_arg *arg)
248 if (!ar->wmi.ops->pull_vdev_start)
249 return -EOPNOTSUPP;
251 return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
254 static inline int
255 ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
256 struct wmi_peer_kick_ev_arg *arg)
258 if (!ar->wmi.ops->pull_peer_kick)
259 return -EOPNOTSUPP;
261 return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
264 static inline int
265 ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
266 struct wmi_swba_ev_arg *arg)
268 if (!ar->wmi.ops->pull_swba)
269 return -EOPNOTSUPP;
271 return ar->wmi.ops->pull_swba(ar, skb, arg);
274 static inline int
275 ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
276 struct wmi_phyerr_hdr_arg *arg)
278 if (!ar->wmi.ops->pull_phyerr_hdr)
279 return -EOPNOTSUPP;
281 return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
284 static inline int
285 ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
286 int left_len, struct wmi_phyerr_ev_arg *arg)
288 if (!ar->wmi.ops->pull_phyerr)
289 return -EOPNOTSUPP;
291 return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
294 static inline int
295 ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
296 struct wmi_svc_rdy_ev_arg *arg)
298 if (!ar->wmi.ops->pull_svc_rdy)
299 return -EOPNOTSUPP;
301 return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
304 static inline int
305 ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
306 struct wmi_rdy_ev_arg *arg)
308 if (!ar->wmi.ops->pull_rdy)
309 return -EOPNOTSUPP;
311 return ar->wmi.ops->pull_rdy(ar, skb, arg);
314 static inline int
315 ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
316 struct ath10k_fw_stats *stats)
318 if (!ar->wmi.ops->pull_fw_stats)
319 return -EOPNOTSUPP;
321 return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
324 static inline int
325 ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
326 struct wmi_roam_ev_arg *arg)
328 if (!ar->wmi.ops->pull_roam_ev)
329 return -EOPNOTSUPP;
331 return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
334 static inline int
335 ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
336 struct wmi_wow_ev_arg *arg)
338 if (!ar->wmi.ops->pull_wow_event)
339 return -EOPNOTSUPP;
341 return ar->wmi.ops->pull_wow_event(ar, skb, arg);
344 static inline enum wmi_txbf_conf
345 ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
347 if (!ar->wmi.ops->get_txbf_conf_scheme)
348 return WMI_TXBF_CONF_UNSUPPORTED;
350 return ar->wmi.ops->get_txbf_conf_scheme(ar);
353 static inline int
354 ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
356 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
357 struct sk_buff *skb;
358 int ret;
360 if (!ar->wmi.ops->gen_mgmt_tx)
361 return -EOPNOTSUPP;
363 skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
364 if (IS_ERR(skb))
365 return PTR_ERR(skb);
367 ret = ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->mgmt_tx_cmdid);
368 if (ret)
369 return ret;
371 /* FIXME There's no ACK event for Management Tx. This probably
372 * shouldn't be called here either. */
373 info->flags |= IEEE80211_TX_STAT_ACK;
374 ieee80211_tx_status_irqsafe(ar->hw, msdu);
376 return 0;
379 static inline int
380 ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
381 u16 ctl2g, u16 ctl5g,
382 enum wmi_dfs_region dfs_reg)
384 struct sk_buff *skb;
386 if (!ar->wmi.ops->gen_pdev_set_rd)
387 return -EOPNOTSUPP;
389 skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
390 dfs_reg);
391 if (IS_ERR(skb))
392 return PTR_ERR(skb);
394 return ath10k_wmi_cmd_send(ar, skb,
395 ar->wmi.cmd->pdev_set_regdomain_cmdid);
398 static inline int
399 ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
401 struct sk_buff *skb;
403 if (!ar->wmi.ops->gen_pdev_suspend)
404 return -EOPNOTSUPP;
406 skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
407 if (IS_ERR(skb))
408 return PTR_ERR(skb);
410 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
413 static inline int
414 ath10k_wmi_pdev_resume_target(struct ath10k *ar)
416 struct sk_buff *skb;
418 if (!ar->wmi.ops->gen_pdev_resume)
419 return -EOPNOTSUPP;
421 skb = ar->wmi.ops->gen_pdev_resume(ar);
422 if (IS_ERR(skb))
423 return PTR_ERR(skb);
425 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
428 static inline int
429 ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
431 struct sk_buff *skb;
433 if (!ar->wmi.ops->gen_pdev_set_param)
434 return -EOPNOTSUPP;
436 skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
437 if (IS_ERR(skb))
438 return PTR_ERR(skb);
440 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
443 static inline int
444 ath10k_wmi_cmd_init(struct ath10k *ar)
446 struct sk_buff *skb;
448 if (!ar->wmi.ops->gen_init)
449 return -EOPNOTSUPP;
451 skb = ar->wmi.ops->gen_init(ar);
452 if (IS_ERR(skb))
453 return PTR_ERR(skb);
455 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
458 static inline int
459 ath10k_wmi_start_scan(struct ath10k *ar,
460 const struct wmi_start_scan_arg *arg)
462 struct sk_buff *skb;
464 if (!ar->wmi.ops->gen_start_scan)
465 return -EOPNOTSUPP;
467 skb = ar->wmi.ops->gen_start_scan(ar, arg);
468 if (IS_ERR(skb))
469 return PTR_ERR(skb);
471 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
474 static inline int
475 ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
477 struct sk_buff *skb;
479 if (!ar->wmi.ops->gen_stop_scan)
480 return -EOPNOTSUPP;
482 skb = ar->wmi.ops->gen_stop_scan(ar, arg);
483 if (IS_ERR(skb))
484 return PTR_ERR(skb);
486 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
489 static inline int
490 ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
491 enum wmi_vdev_type type,
492 enum wmi_vdev_subtype subtype,
493 const u8 macaddr[ETH_ALEN])
495 struct sk_buff *skb;
497 if (!ar->wmi.ops->gen_vdev_create)
498 return -EOPNOTSUPP;
500 skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
501 if (IS_ERR(skb))
502 return PTR_ERR(skb);
504 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
507 static inline int
508 ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
510 struct sk_buff *skb;
512 if (!ar->wmi.ops->gen_vdev_delete)
513 return -EOPNOTSUPP;
515 skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
516 if (IS_ERR(skb))
517 return PTR_ERR(skb);
519 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
522 static inline int
523 ath10k_wmi_vdev_start(struct ath10k *ar,
524 const struct wmi_vdev_start_request_arg *arg)
526 struct sk_buff *skb;
528 if (!ar->wmi.ops->gen_vdev_start)
529 return -EOPNOTSUPP;
531 skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
532 if (IS_ERR(skb))
533 return PTR_ERR(skb);
535 return ath10k_wmi_cmd_send(ar, skb,
536 ar->wmi.cmd->vdev_start_request_cmdid);
539 static inline int
540 ath10k_wmi_vdev_restart(struct ath10k *ar,
541 const struct wmi_vdev_start_request_arg *arg)
543 struct sk_buff *skb;
545 if (!ar->wmi.ops->gen_vdev_start)
546 return -EOPNOTSUPP;
548 skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
549 if (IS_ERR(skb))
550 return PTR_ERR(skb);
552 return ath10k_wmi_cmd_send(ar, skb,
553 ar->wmi.cmd->vdev_restart_request_cmdid);
556 static inline int
557 ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
559 struct sk_buff *skb;
561 if (!ar->wmi.ops->gen_vdev_stop)
562 return -EOPNOTSUPP;
564 skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
565 if (IS_ERR(skb))
566 return PTR_ERR(skb);
568 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
571 static inline int
572 ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
574 struct sk_buff *skb;
576 if (!ar->wmi.ops->gen_vdev_up)
577 return -EOPNOTSUPP;
579 skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
580 if (IS_ERR(skb))
581 return PTR_ERR(skb);
583 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
586 static inline int
587 ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
589 struct sk_buff *skb;
591 if (!ar->wmi.ops->gen_vdev_down)
592 return -EOPNOTSUPP;
594 skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
595 if (IS_ERR(skb))
596 return PTR_ERR(skb);
598 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
601 static inline int
602 ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
603 u32 param_value)
605 struct sk_buff *skb;
607 if (!ar->wmi.ops->gen_vdev_set_param)
608 return -EOPNOTSUPP;
610 skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
611 param_value);
612 if (IS_ERR(skb))
613 return PTR_ERR(skb);
615 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
618 static inline int
619 ath10k_wmi_vdev_install_key(struct ath10k *ar,
620 const struct wmi_vdev_install_key_arg *arg)
622 struct sk_buff *skb;
624 if (!ar->wmi.ops->gen_vdev_install_key)
625 return -EOPNOTSUPP;
627 skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
628 if (IS_ERR(skb))
629 return PTR_ERR(skb);
631 return ath10k_wmi_cmd_send(ar, skb,
632 ar->wmi.cmd->vdev_install_key_cmdid);
635 static inline int
636 ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
637 const struct wmi_vdev_spectral_conf_arg *arg)
639 struct sk_buff *skb;
640 u32 cmd_id;
642 if (!ar->wmi.ops->gen_vdev_spectral_conf)
643 return -EOPNOTSUPP;
645 skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
646 if (IS_ERR(skb))
647 return PTR_ERR(skb);
649 cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
650 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
653 static inline int
654 ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
655 u32 enable)
657 struct sk_buff *skb;
658 u32 cmd_id;
660 if (!ar->wmi.ops->gen_vdev_spectral_enable)
661 return -EOPNOTSUPP;
663 skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
664 enable);
665 if (IS_ERR(skb))
666 return PTR_ERR(skb);
668 cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
669 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
672 static inline int
673 ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
674 const u8 peer_addr[ETH_ALEN],
675 const struct wmi_sta_uapsd_auto_trig_arg *args,
676 u32 num_ac)
678 struct sk_buff *skb;
679 u32 cmd_id;
681 if (!ar->wmi.ops->gen_vdev_sta_uapsd)
682 return -EOPNOTSUPP;
684 skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
685 num_ac);
686 if (IS_ERR(skb))
687 return PTR_ERR(skb);
689 cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
690 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
693 static inline int
694 ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
695 const struct wmi_wmm_params_all_arg *arg)
697 struct sk_buff *skb;
698 u32 cmd_id;
700 skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
701 if (IS_ERR(skb))
702 return PTR_ERR(skb);
704 cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
705 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
708 static inline int
709 ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
710 const u8 peer_addr[ETH_ALEN],
711 enum wmi_peer_type peer_type)
713 struct sk_buff *skb;
715 if (!ar->wmi.ops->gen_peer_create)
716 return -EOPNOTSUPP;
718 skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
719 if (IS_ERR(skb))
720 return PTR_ERR(skb);
722 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
725 static inline int
726 ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
727 const u8 peer_addr[ETH_ALEN])
729 struct sk_buff *skb;
731 if (!ar->wmi.ops->gen_peer_delete)
732 return -EOPNOTSUPP;
734 skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
735 if (IS_ERR(skb))
736 return PTR_ERR(skb);
738 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
741 static inline int
742 ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
743 const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
745 struct sk_buff *skb;
747 if (!ar->wmi.ops->gen_peer_flush)
748 return -EOPNOTSUPP;
750 skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
751 if (IS_ERR(skb))
752 return PTR_ERR(skb);
754 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
757 static inline int
758 ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
759 enum wmi_peer_param param_id, u32 param_value)
761 struct sk_buff *skb;
763 if (!ar->wmi.ops->gen_peer_set_param)
764 return -EOPNOTSUPP;
766 skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
767 param_value);
768 if (IS_ERR(skb))
769 return PTR_ERR(skb);
771 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
774 static inline int
775 ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
776 enum wmi_sta_ps_mode psmode)
778 struct sk_buff *skb;
780 if (!ar->wmi.ops->gen_set_psmode)
781 return -EOPNOTSUPP;
783 skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
784 if (IS_ERR(skb))
785 return PTR_ERR(skb);
787 return ath10k_wmi_cmd_send(ar, skb,
788 ar->wmi.cmd->sta_powersave_mode_cmdid);
791 static inline int
792 ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
793 enum wmi_sta_powersave_param param_id, u32 value)
795 struct sk_buff *skb;
797 if (!ar->wmi.ops->gen_set_sta_ps)
798 return -EOPNOTSUPP;
800 skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
801 if (IS_ERR(skb))
802 return PTR_ERR(skb);
804 return ath10k_wmi_cmd_send(ar, skb,
805 ar->wmi.cmd->sta_powersave_param_cmdid);
808 static inline int
809 ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
810 enum wmi_ap_ps_peer_param param_id, u32 value)
812 struct sk_buff *skb;
814 if (!ar->wmi.ops->gen_set_ap_ps)
815 return -EOPNOTSUPP;
817 skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
818 if (IS_ERR(skb))
819 return PTR_ERR(skb);
821 return ath10k_wmi_cmd_send(ar, skb,
822 ar->wmi.cmd->ap_ps_peer_param_cmdid);
825 static inline int
826 ath10k_wmi_scan_chan_list(struct ath10k *ar,
827 const struct wmi_scan_chan_list_arg *arg)
829 struct sk_buff *skb;
831 if (!ar->wmi.ops->gen_scan_chan_list)
832 return -EOPNOTSUPP;
834 skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
835 if (IS_ERR(skb))
836 return PTR_ERR(skb);
838 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
841 static inline int
842 ath10k_wmi_peer_assoc(struct ath10k *ar,
843 const struct wmi_peer_assoc_complete_arg *arg)
845 struct sk_buff *skb;
847 if (!ar->wmi.ops->gen_peer_assoc)
848 return -EOPNOTSUPP;
850 skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
851 if (IS_ERR(skb))
852 return PTR_ERR(skb);
854 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
857 static inline int
858 ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
859 const void *bcn, size_t bcn_len,
860 u32 bcn_paddr, bool dtim_zero,
861 bool deliver_cab)
863 struct sk_buff *skb;
864 int ret;
866 if (!ar->wmi.ops->gen_beacon_dma)
867 return -EOPNOTSUPP;
869 skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
870 dtim_zero, deliver_cab);
871 if (IS_ERR(skb))
872 return PTR_ERR(skb);
874 ret = ath10k_wmi_cmd_send_nowait(ar, skb,
875 ar->wmi.cmd->pdev_send_bcn_cmdid);
876 if (ret) {
877 dev_kfree_skb(skb);
878 return ret;
881 return 0;
884 static inline int
885 ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
886 const struct wmi_wmm_params_all_arg *arg)
888 struct sk_buff *skb;
890 if (!ar->wmi.ops->gen_pdev_set_wmm)
891 return -EOPNOTSUPP;
893 skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
894 if (IS_ERR(skb))
895 return PTR_ERR(skb);
897 return ath10k_wmi_cmd_send(ar, skb,
898 ar->wmi.cmd->pdev_set_wmm_params_cmdid);
901 static inline int
902 ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
904 struct sk_buff *skb;
906 if (!ar->wmi.ops->gen_request_stats)
907 return -EOPNOTSUPP;
909 skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
910 if (IS_ERR(skb))
911 return PTR_ERR(skb);
913 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
916 static inline int
917 ath10k_wmi_force_fw_hang(struct ath10k *ar,
918 enum wmi_force_fw_hang_type type, u32 delay_ms)
920 struct sk_buff *skb;
922 if (!ar->wmi.ops->gen_force_fw_hang)
923 return -EOPNOTSUPP;
925 skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
926 if (IS_ERR(skb))
927 return PTR_ERR(skb);
929 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
932 static inline int
933 ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
935 struct sk_buff *skb;
937 if (!ar->wmi.ops->gen_dbglog_cfg)
938 return -EOPNOTSUPP;
940 skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
941 if (IS_ERR(skb))
942 return PTR_ERR(skb);
944 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
947 static inline int
948 ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
950 struct sk_buff *skb;
952 if (!ar->wmi.ops->gen_pktlog_enable)
953 return -EOPNOTSUPP;
955 skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
956 if (IS_ERR(skb))
957 return PTR_ERR(skb);
959 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
962 static inline int
963 ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
965 struct sk_buff *skb;
967 if (!ar->wmi.ops->gen_pktlog_disable)
968 return -EOPNOTSUPP;
970 skb = ar->wmi.ops->gen_pktlog_disable(ar);
971 if (IS_ERR(skb))
972 return PTR_ERR(skb);
974 return ath10k_wmi_cmd_send(ar, skb,
975 ar->wmi.cmd->pdev_pktlog_disable_cmdid);
978 static inline int
979 ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
980 u32 next_offset, u32 enabled)
982 struct sk_buff *skb;
984 if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
985 return -EOPNOTSUPP;
987 skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
988 next_offset, enabled);
989 if (IS_ERR(skb))
990 return PTR_ERR(skb);
992 return ath10k_wmi_cmd_send(ar, skb,
993 ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
996 static inline int
997 ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
999 struct sk_buff *skb;
1001 if (!ar->wmi.ops->gen_pdev_get_temperature)
1002 return -EOPNOTSUPP;
1004 skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
1005 if (IS_ERR(skb))
1006 return PTR_ERR(skb);
1008 return ath10k_wmi_cmd_send(ar, skb,
1009 ar->wmi.cmd->pdev_get_temperature_cmdid);
1012 static inline int
1013 ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
1015 struct sk_buff *skb;
1017 if (!ar->wmi.ops->gen_addba_clear_resp)
1018 return -EOPNOTSUPP;
1020 skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
1021 if (IS_ERR(skb))
1022 return PTR_ERR(skb);
1024 return ath10k_wmi_cmd_send(ar, skb,
1025 ar->wmi.cmd->addba_clear_resp_cmdid);
1028 static inline int
1029 ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1030 u32 tid, u32 buf_size)
1032 struct sk_buff *skb;
1034 if (!ar->wmi.ops->gen_addba_send)
1035 return -EOPNOTSUPP;
1037 skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
1038 if (IS_ERR(skb))
1039 return PTR_ERR(skb);
1041 return ath10k_wmi_cmd_send(ar, skb,
1042 ar->wmi.cmd->addba_send_cmdid);
1045 static inline int
1046 ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1047 u32 tid, u32 status)
1049 struct sk_buff *skb;
1051 if (!ar->wmi.ops->gen_addba_set_resp)
1052 return -EOPNOTSUPP;
1054 skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
1055 if (IS_ERR(skb))
1056 return PTR_ERR(skb);
1058 return ath10k_wmi_cmd_send(ar, skb,
1059 ar->wmi.cmd->addba_set_resp_cmdid);
1062 static inline int
1063 ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
1064 u32 tid, u32 initiator, u32 reason)
1066 struct sk_buff *skb;
1068 if (!ar->wmi.ops->gen_delba_send)
1069 return -EOPNOTSUPP;
1071 skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
1072 reason);
1073 if (IS_ERR(skb))
1074 return PTR_ERR(skb);
1076 return ath10k_wmi_cmd_send(ar, skb,
1077 ar->wmi.cmd->delba_send_cmdid);
1080 static inline int
1081 ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
1082 struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
1083 void *prb_ies, size_t prb_ies_len)
1085 struct sk_buff *skb;
1087 if (!ar->wmi.ops->gen_bcn_tmpl)
1088 return -EOPNOTSUPP;
1090 skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
1091 prb_caps, prb_erp, prb_ies,
1092 prb_ies_len);
1093 if (IS_ERR(skb))
1094 return PTR_ERR(skb);
1096 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
1099 static inline int
1100 ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
1102 struct sk_buff *skb;
1104 if (!ar->wmi.ops->gen_prb_tmpl)
1105 return -EOPNOTSUPP;
1107 skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
1108 if (IS_ERR(skb))
1109 return PTR_ERR(skb);
1111 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
1114 static inline int
1115 ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
1117 struct sk_buff *skb;
1119 if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
1120 return -EOPNOTSUPP;
1122 skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
1123 if (IS_ERR(skb))
1124 return PTR_ERR(skb);
1126 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
1129 static inline int
1130 ath10k_wmi_sta_keepalive(struct ath10k *ar,
1131 const struct wmi_sta_keepalive_arg *arg)
1133 struct sk_buff *skb;
1134 u32 cmd_id;
1136 if (!ar->wmi.ops->gen_sta_keepalive)
1137 return -EOPNOTSUPP;
1139 skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
1140 if (IS_ERR(skb))
1141 return PTR_ERR(skb);
1143 cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
1144 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1147 static inline int
1148 ath10k_wmi_wow_enable(struct ath10k *ar)
1150 struct sk_buff *skb;
1151 u32 cmd_id;
1153 if (!ar->wmi.ops->gen_wow_enable)
1154 return -EOPNOTSUPP;
1156 skb = ar->wmi.ops->gen_wow_enable(ar);
1157 if (IS_ERR(skb))
1158 return PTR_ERR(skb);
1160 cmd_id = ar->wmi.cmd->wow_enable_cmdid;
1161 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1164 static inline int
1165 ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
1166 enum wmi_wow_wakeup_event event,
1167 u32 enable)
1169 struct sk_buff *skb;
1170 u32 cmd_id;
1172 if (!ar->wmi.ops->gen_wow_add_wakeup_event)
1173 return -EOPNOTSUPP;
1175 skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
1176 if (IS_ERR(skb))
1177 return PTR_ERR(skb);
1179 cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
1180 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1183 static inline int
1184 ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
1186 struct sk_buff *skb;
1187 u32 cmd_id;
1189 if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
1190 return -EOPNOTSUPP;
1192 skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
1193 if (IS_ERR(skb))
1194 return PTR_ERR(skb);
1196 cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
1197 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1200 static inline int
1201 ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
1202 const u8 *pattern, const u8 *mask,
1203 int pattern_len, int pattern_offset)
1205 struct sk_buff *skb;
1206 u32 cmd_id;
1208 if (!ar->wmi.ops->gen_wow_add_pattern)
1209 return -EOPNOTSUPP;
1211 skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
1212 pattern, mask, pattern_len,
1213 pattern_offset);
1214 if (IS_ERR(skb))
1215 return PTR_ERR(skb);
1217 cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
1218 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1221 static inline int
1222 ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
1224 struct sk_buff *skb;
1225 u32 cmd_id;
1227 if (!ar->wmi.ops->gen_wow_del_pattern)
1228 return -EOPNOTSUPP;
1230 skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
1231 if (IS_ERR(skb))
1232 return PTR_ERR(skb);
1234 cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
1235 return ath10k_wmi_cmd_send(ar, skb, cmd_id);
1238 static inline int
1239 ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
1240 enum wmi_tdls_state state)
1242 struct sk_buff *skb;
1244 if (!ar->wmi.ops->gen_update_fw_tdls_state)
1245 return -EOPNOTSUPP;
1247 skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
1248 if (IS_ERR(skb))
1249 return PTR_ERR(skb);
1251 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
1254 static inline int
1255 ath10k_wmi_tdls_peer_update(struct ath10k *ar,
1256 const struct wmi_tdls_peer_update_cmd_arg *arg,
1257 const struct wmi_tdls_peer_capab_arg *cap,
1258 const struct wmi_channel_arg *chan)
1260 struct sk_buff *skb;
1262 if (!ar->wmi.ops->gen_tdls_peer_update)
1263 return -EOPNOTSUPP;
1265 skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
1266 if (IS_ERR(skb))
1267 return PTR_ERR(skb);
1269 return ath10k_wmi_cmd_send(ar, skb,
1270 ar->wmi.cmd->tdls_peer_update_cmdid);
1273 static inline int
1274 ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
1276 struct sk_buff *skb;
1278 if (!ar->wmi.ops->gen_adaptive_qcs)
1279 return -EOPNOTSUPP;
1281 skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
1282 if (IS_ERR(skb))
1283 return PTR_ERR(skb);
1285 return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
1288 static inline int
1289 ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
1291 struct sk_buff *skb;
1293 if (!ar->wmi.ops->gen_pdev_get_tpc_config)
1294 return -EOPNOTSUPP;
1296 skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
1298 if (IS_ERR(skb))
1299 return PTR_ERR(skb);
1301 return ath10k_wmi_cmd_send(ar, skb,
1302 ar->wmi.cmd->pdev_get_tpc_config_cmdid);
1305 static inline int
1306 ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
1307 char *buf)
1309 if (!ar->wmi.ops->fw_stats_fill)
1310 return -EOPNOTSUPP;
1312 ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
1313 return 0;
1316 static inline int
1317 ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
1318 u32 detect_level, u32 detect_margin)
1320 struct sk_buff *skb;
1322 if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
1323 return -EOPNOTSUPP;
1325 skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
1326 detect_level,
1327 detect_margin);
1329 if (IS_ERR(skb))
1330 return PTR_ERR(skb);
1332 return ath10k_wmi_cmd_send(ar, skb,
1333 ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
1336 #endif