x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath10k / bmi.c
blob744da6d1c405d91a645428d0f686c3c9cb4f00f0
1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "bmi.h"
19 #include "hif.h"
20 #include "debug.h"
21 #include "htc.h"
23 void ath10k_bmi_start(struct ath10k *ar)
25 ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
26 ar->bmi.done_sent = false;
29 int ath10k_bmi_done(struct ath10k *ar)
31 struct bmi_cmd cmd;
32 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
33 int ret;
35 if (ar->bmi.done_sent) {
36 ath10k_dbg(ATH10K_DBG_CORE, "%s skipped\n", __func__);
37 return 0;
40 ar->bmi.done_sent = true;
41 cmd.id = __cpu_to_le32(BMI_DONE);
43 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
44 if (ret) {
45 ath10k_warn("unable to write to the device: %d\n", ret);
46 return ret;
49 ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n");
50 return 0;
53 int ath10k_bmi_get_target_info(struct ath10k *ar,
54 struct bmi_target_info *target_info)
56 struct bmi_cmd cmd;
57 union bmi_resp resp;
58 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
59 u32 resplen = sizeof(resp.get_target_info);
60 int ret;
62 if (ar->bmi.done_sent) {
63 ath10k_warn("BMI Get Target Info Command disallowed\n");
64 return -EBUSY;
67 cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
69 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
70 if (ret) {
71 ath10k_warn("unable to get target info from device\n");
72 return ret;
75 if (resplen < sizeof(resp.get_target_info)) {
76 ath10k_warn("invalid get_target_info response length (%d)\n",
77 resplen);
78 return -EIO;
81 target_info->version = __le32_to_cpu(resp.get_target_info.version);
82 target_info->type = __le32_to_cpu(resp.get_target_info.type);
83 return 0;
86 int ath10k_bmi_read_memory(struct ath10k *ar,
87 u32 address, void *buffer, u32 length)
89 struct bmi_cmd cmd;
90 union bmi_resp resp;
91 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
92 u32 rxlen;
93 int ret;
95 if (ar->bmi.done_sent) {
96 ath10k_warn("command disallowed\n");
97 return -EBUSY;
100 ath10k_dbg(ATH10K_DBG_CORE,
101 "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
102 __func__, ar, address, length);
104 while (length) {
105 rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
107 cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
108 cmd.read_mem.addr = __cpu_to_le32(address);
109 cmd.read_mem.len = __cpu_to_le32(rxlen);
111 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
112 &resp, &rxlen);
113 if (ret) {
114 ath10k_warn("unable to read from the device (%d)\n",
115 ret);
116 return ret;
119 memcpy(buffer, resp.read_mem.payload, rxlen);
120 address += rxlen;
121 buffer += rxlen;
122 length -= rxlen;
125 return 0;
128 int ath10k_bmi_write_memory(struct ath10k *ar,
129 u32 address, const void *buffer, u32 length)
131 struct bmi_cmd cmd;
132 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
133 u32 txlen;
134 int ret;
136 if (ar->bmi.done_sent) {
137 ath10k_warn("command disallowed\n");
138 return -EBUSY;
141 ath10k_dbg(ATH10K_DBG_CORE,
142 "%s: (device: 0x%p, address: 0x%x, length: %d)\n",
143 __func__, ar, address, length);
145 while (length) {
146 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
148 /* copy before roundup to avoid reading beyond buffer*/
149 memcpy(cmd.write_mem.payload, buffer, txlen);
150 txlen = roundup(txlen, 4);
152 cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
153 cmd.write_mem.addr = __cpu_to_le32(address);
154 cmd.write_mem.len = __cpu_to_le32(txlen);
156 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
157 NULL, NULL);
158 if (ret) {
159 ath10k_warn("unable to write to the device (%d)\n",
160 ret);
161 return ret;
164 /* fixup roundup() so `length` zeroes out for last chunk */
165 txlen = min(txlen, length);
167 address += txlen;
168 buffer += txlen;
169 length -= txlen;
172 return 0;
175 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param)
177 struct bmi_cmd cmd;
178 union bmi_resp resp;
179 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
180 u32 resplen = sizeof(resp.execute);
181 int ret;
183 if (ar->bmi.done_sent) {
184 ath10k_warn("command disallowed\n");
185 return -EBUSY;
188 ath10k_dbg(ATH10K_DBG_CORE,
189 "%s: (device: 0x%p, address: 0x%x, param: %d)\n",
190 __func__, ar, address, *param);
192 cmd.id = __cpu_to_le32(BMI_EXECUTE);
193 cmd.execute.addr = __cpu_to_le32(address);
194 cmd.execute.param = __cpu_to_le32(*param);
196 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
197 if (ret) {
198 ath10k_warn("unable to read from the device\n");
199 return ret;
202 if (resplen < sizeof(resp.execute)) {
203 ath10k_warn("invalid execute response length (%d)\n",
204 resplen);
205 return ret;
208 *param = __le32_to_cpu(resp.execute.result);
209 return 0;
212 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
214 struct bmi_cmd cmd;
215 u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
216 u32 txlen;
217 int ret;
219 if (ar->bmi.done_sent) {
220 ath10k_warn("command disallowed\n");
221 return -EBUSY;
224 while (length) {
225 txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
227 WARN_ON_ONCE(txlen & 3);
229 cmd.id = __cpu_to_le32(BMI_LZ_DATA);
230 cmd.lz_data.len = __cpu_to_le32(txlen);
231 memcpy(cmd.lz_data.payload, buffer, txlen);
233 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
234 NULL, NULL);
235 if (ret) {
236 ath10k_warn("unable to write to the device\n");
237 return ret;
240 buffer += txlen;
241 length -= txlen;
244 return 0;
247 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
249 struct bmi_cmd cmd;
250 u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
251 int ret;
253 if (ar->bmi.done_sent) {
254 ath10k_warn("command disallowed\n");
255 return -EBUSY;
258 cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
259 cmd.lz_start.addr = __cpu_to_le32(address);
261 ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
262 if (ret) {
263 ath10k_warn("unable to Start LZ Stream to the device\n");
264 return ret;
267 return 0;
270 int ath10k_bmi_fast_download(struct ath10k *ar,
271 u32 address, const void *buffer, u32 length)
273 u8 trailer[4] = {};
274 u32 head_len = rounddown(length, 4);
275 u32 trailer_len = length - head_len;
276 int ret;
278 ret = ath10k_bmi_lz_stream_start(ar, address);
279 if (ret)
280 return ret;
282 /* copy the last word into a zero padded buffer */
283 if (trailer_len > 0)
284 memcpy(trailer, buffer + head_len, trailer_len);
286 ret = ath10k_bmi_lz_data(ar, buffer, head_len);
287 if (ret)
288 return ret;
290 if (trailer_len > 0)
291 ret = ath10k_bmi_lz_data(ar, trailer, 4);
293 if (ret != 0)
294 return ret;
297 * Close compressed stream and open a new (fake) one.
298 * This serves mainly to flush Target caches.
300 ret = ath10k_bmi_lz_stream_start(ar, 0x00);
302 return ret;