2 * Copyright (c) 2010-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd
)
23 return "WMI_ECHO_CMDID";
24 case WMI_ACCESS_MEMORY_CMDID
:
25 return "WMI_ACCESS_MEMORY_CMDID";
26 case WMI_GET_FW_VERSION
:
27 return "WMI_GET_FW_VERSION";
28 case WMI_DISABLE_INTR_CMDID
:
29 return "WMI_DISABLE_INTR_CMDID";
30 case WMI_ENABLE_INTR_CMDID
:
31 return "WMI_ENABLE_INTR_CMDID";
32 case WMI_ATH_INIT_CMDID
:
33 return "WMI_ATH_INIT_CMDID";
34 case WMI_ABORT_TXQ_CMDID
:
35 return "WMI_ABORT_TXQ_CMDID";
36 case WMI_STOP_TX_DMA_CMDID
:
37 return "WMI_STOP_TX_DMA_CMDID";
38 case WMI_ABORT_TX_DMA_CMDID
:
39 return "WMI_ABORT_TX_DMA_CMDID";
40 case WMI_DRAIN_TXQ_CMDID
:
41 return "WMI_DRAIN_TXQ_CMDID";
42 case WMI_DRAIN_TXQ_ALL_CMDID
:
43 return "WMI_DRAIN_TXQ_ALL_CMDID";
44 case WMI_START_RECV_CMDID
:
45 return "WMI_START_RECV_CMDID";
46 case WMI_STOP_RECV_CMDID
:
47 return "WMI_STOP_RECV_CMDID";
48 case WMI_FLUSH_RECV_CMDID
:
49 return "WMI_FLUSH_RECV_CMDID";
50 case WMI_SET_MODE_CMDID
:
51 return "WMI_SET_MODE_CMDID";
52 case WMI_NODE_CREATE_CMDID
:
53 return "WMI_NODE_CREATE_CMDID";
54 case WMI_NODE_REMOVE_CMDID
:
55 return "WMI_NODE_REMOVE_CMDID";
56 case WMI_VAP_REMOVE_CMDID
:
57 return "WMI_VAP_REMOVE_CMDID";
58 case WMI_VAP_CREATE_CMDID
:
59 return "WMI_VAP_CREATE_CMDID";
60 case WMI_REG_READ_CMDID
:
61 return "WMI_REG_READ_CMDID";
62 case WMI_REG_WRITE_CMDID
:
63 return "WMI_REG_WRITE_CMDID";
64 case WMI_REG_RMW_CMDID
:
65 return "WMI_REG_RMW_CMDID";
66 case WMI_RC_STATE_CHANGE_CMDID
:
67 return "WMI_RC_STATE_CHANGE_CMDID";
68 case WMI_RC_RATE_UPDATE_CMDID
:
69 return "WMI_RC_RATE_UPDATE_CMDID";
70 case WMI_TARGET_IC_UPDATE_CMDID
:
71 return "WMI_TARGET_IC_UPDATE_CMDID";
72 case WMI_TX_AGGR_ENABLE_CMDID
:
73 return "WMI_TX_AGGR_ENABLE_CMDID";
74 case WMI_TGT_DETACH_CMDID
:
75 return "WMI_TGT_DETACH_CMDID";
76 case WMI_NODE_UPDATE_CMDID
:
77 return "WMI_NODE_UPDATE_CMDID";
78 case WMI_INT_STATS_CMDID
:
79 return "WMI_INT_STATS_CMDID";
80 case WMI_TX_STATS_CMDID
:
81 return "WMI_TX_STATS_CMDID";
82 case WMI_RX_STATS_CMDID
:
83 return "WMI_RX_STATS_CMDID";
84 case WMI_BITRATE_MASK_CMDID
:
85 return "WMI_BITRATE_MASK_CMDID";
91 struct wmi
*ath9k_init_wmi(struct ath9k_htc_priv
*priv
)
95 wmi
= kzalloc(sizeof(struct wmi
), GFP_KERNEL
);
100 wmi
->stopped
= false;
101 skb_queue_head_init(&wmi
->wmi_event_queue
);
102 spin_lock_init(&wmi
->wmi_lock
);
103 spin_lock_init(&wmi
->event_lock
);
104 mutex_init(&wmi
->op_mutex
);
105 mutex_init(&wmi
->multi_write_mutex
);
106 mutex_init(&wmi
->multi_rmw_mutex
);
107 init_completion(&wmi
->cmd_wait
);
108 INIT_LIST_HEAD(&wmi
->pending_tx_events
);
109 tasklet_setup(&wmi
->wmi_event_tasklet
, ath9k_wmi_event_tasklet
);
114 void ath9k_stop_wmi(struct ath9k_htc_priv
*priv
)
116 struct wmi
*wmi
= priv
->wmi
;
118 mutex_lock(&wmi
->op_mutex
);
120 mutex_unlock(&wmi
->op_mutex
);
123 void ath9k_destroy_wmi(struct ath9k_htc_priv
*priv
)
128 void ath9k_wmi_event_drain(struct ath9k_htc_priv
*priv
)
132 tasklet_kill(&priv
->wmi
->wmi_event_tasklet
);
133 spin_lock_irqsave(&priv
->wmi
->wmi_lock
, flags
);
134 __skb_queue_purge(&priv
->wmi
->wmi_event_queue
);
135 spin_unlock_irqrestore(&priv
->wmi
->wmi_lock
, flags
);
138 void ath9k_wmi_event_tasklet(struct tasklet_struct
*t
)
140 struct wmi
*wmi
= from_tasklet(wmi
, t
, wmi_event_tasklet
);
141 struct ath9k_htc_priv
*priv
= wmi
->drv_priv
;
142 struct wmi_cmd_hdr
*hdr
;
144 struct wmi_event_swba
*swba
;
145 struct sk_buff
*skb
= NULL
;
150 spin_lock_irqsave(&wmi
->wmi_lock
, flags
);
151 skb
= __skb_dequeue(&wmi
->wmi_event_queue
);
153 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
156 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
158 /* Check if ath9k_htc_probe_device() completed. */
159 if (!data_race(priv
->initialized
)) {
164 hdr
= (struct wmi_cmd_hdr
*) skb
->data
;
165 cmd_id
= be16_to_cpu(hdr
->command_id
);
166 wmi_event
= skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
169 case WMI_SWBA_EVENTID
:
171 ath9k_htc_swba(priv
, swba
);
173 case WMI_FATAL_EVENTID
:
174 ieee80211_queue_work(wmi
->drv_priv
->hw
,
175 &wmi
->drv_priv
->fatal_work
);
177 case WMI_TXSTATUS_EVENTID
:
178 spin_lock_bh(&priv
->tx
.tx_lock
);
179 if (priv
->tx
.flags
& ATH9K_HTC_OP_TX_DRAIN
) {
180 spin_unlock_bh(&priv
->tx
.tx_lock
);
183 spin_unlock_bh(&priv
->tx
.tx_lock
);
185 ath9k_htc_txstatus(priv
, wmi_event
);
195 void ath9k_fatal_work(struct work_struct
*work
)
197 struct ath9k_htc_priv
*priv
= container_of(work
, struct ath9k_htc_priv
,
199 struct ath_common
*common
= ath9k_hw_common(priv
->ah
);
201 ath_dbg(common
, FATAL
, "FATAL Event received, resetting device\n");
202 ath9k_htc_reset(priv
);
205 static void ath9k_wmi_rsp_callback(struct wmi
*wmi
, struct sk_buff
*skb
)
207 skb_pull(skb
, sizeof(struct wmi_cmd_hdr
));
209 if (wmi
->cmd_rsp_buf
!= NULL
&& wmi
->cmd_rsp_len
!= 0)
210 memcpy(wmi
->cmd_rsp_buf
, skb
->data
, wmi
->cmd_rsp_len
);
212 complete(&wmi
->cmd_wait
);
215 static void ath9k_wmi_ctrl_rx(void *priv
, struct sk_buff
*skb
,
216 enum htc_endpoint_id epid
)
218 struct wmi
*wmi
= priv
;
219 struct wmi_cmd_hdr
*hdr
;
223 if (unlikely(wmi
->stopped
))
226 /* Validate the obtained SKB. */
227 if (unlikely(skb
->len
< sizeof(struct wmi_cmd_hdr
)))
230 hdr
= (struct wmi_cmd_hdr
*) skb
->data
;
231 cmd_id
= be16_to_cpu(hdr
->command_id
);
233 if (cmd_id
& 0x1000) {
234 spin_lock_irqsave(&wmi
->wmi_lock
, flags
);
235 __skb_queue_tail(&wmi
->wmi_event_queue
, skb
);
236 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
237 tasklet_schedule(&wmi
->wmi_event_tasklet
);
241 /* Check if there has been a timeout. */
242 spin_lock_irqsave(&wmi
->wmi_lock
, flags
);
243 if (be16_to_cpu(hdr
->seq_no
) != wmi
->last_seq_id
) {
244 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
248 /* WMI command response */
249 ath9k_wmi_rsp_callback(wmi
, skb
);
250 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
256 static void ath9k_wmi_ctrl_tx(void *priv
, struct sk_buff
*skb
,
257 enum htc_endpoint_id epid
, bool txok
)
262 int ath9k_wmi_connect(struct htc_target
*htc
, struct wmi
*wmi
,
263 enum htc_endpoint_id
*wmi_ctrl_epid
)
265 struct htc_service_connreq connect
;
270 memset(&connect
, 0, sizeof(connect
));
272 connect
.ep_callbacks
.priv
= wmi
;
273 connect
.ep_callbacks
.tx
= ath9k_wmi_ctrl_tx
;
274 connect
.ep_callbacks
.rx
= ath9k_wmi_ctrl_rx
;
275 connect
.service_id
= WMI_CONTROL_SVC
;
277 ret
= htc_connect_service(htc
, &connect
, &wmi
->ctrl_epid
);
281 *wmi_ctrl_epid
= wmi
->ctrl_epid
;
286 static int ath9k_wmi_cmd_issue(struct wmi
*wmi
,
288 enum wmi_cmd_id cmd
, u16 len
,
289 u8
*rsp_buf
, u32 rsp_len
)
291 struct wmi_cmd_hdr
*hdr
;
294 hdr
= skb_push(skb
, sizeof(struct wmi_cmd_hdr
));
295 hdr
->command_id
= cpu_to_be16(cmd
);
296 hdr
->seq_no
= cpu_to_be16(++wmi
->tx_seq_id
);
298 spin_lock_irqsave(&wmi
->wmi_lock
, flags
);
300 /* record the rsp buffer and length */
301 wmi
->cmd_rsp_buf
= rsp_buf
;
302 wmi
->cmd_rsp_len
= rsp_len
;
304 wmi
->last_seq_id
= wmi
->tx_seq_id
;
305 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
307 return htc_send_epid(wmi
->htc
, skb
, wmi
->ctrl_epid
);
310 int ath9k_wmi_cmd(struct wmi
*wmi
, enum wmi_cmd_id cmd_id
,
311 u8
*cmd_buf
, u32 cmd_len
,
312 u8
*rsp_buf
, u32 rsp_len
,
315 struct ath_hw
*ah
= wmi
->drv_priv
->ah
;
316 struct ath_common
*common
= ath9k_hw_common(ah
);
317 u16 headroom
= sizeof(struct htc_frame_hdr
) +
318 sizeof(struct wmi_cmd_hdr
);
319 unsigned long time_left
, flags
;
323 if (ah
->ah_flags
& AH_UNPLUGGED
)
326 skb
= alloc_skb(headroom
+ cmd_len
, GFP_ATOMIC
);
330 skb_reserve(skb
, headroom
);
332 if (cmd_len
!= 0 && cmd_buf
!= NULL
) {
333 skb_put_data(skb
, cmd_buf
, cmd_len
);
336 mutex_lock(&wmi
->op_mutex
);
338 /* check if wmi stopped flag is set */
339 if (unlikely(wmi
->stopped
)) {
344 ret
= ath9k_wmi_cmd_issue(wmi
, skb
, cmd_id
, cmd_len
, rsp_buf
, rsp_len
);
348 time_left
= wait_for_completion_timeout(&wmi
->cmd_wait
, timeout
);
350 ath_dbg(common
, WMI
, "Timeout waiting for WMI command: %s\n",
351 wmi_cmd_to_name(cmd_id
));
352 spin_lock_irqsave(&wmi
->wmi_lock
, flags
);
353 wmi
->last_seq_id
= 0;
354 spin_unlock_irqrestore(&wmi
->wmi_lock
, flags
);
355 mutex_unlock(&wmi
->op_mutex
);
359 mutex_unlock(&wmi
->op_mutex
);
364 ath_dbg(common
, WMI
, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id
));
365 mutex_unlock(&wmi
->op_mutex
);