net: ptp: do not reimplement PTP/BPF classifier
[linux/fpc-iii.git] / drivers / net / wireless / ath / ath6kl / bmi.c
blob334dbd834b3a6b37447ff324ed43f4a389d603f9
1 /*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include "core.h"
19 #include "hif-ops.h"
20 #include "target.h"
21 #include "debug.h"
23 int ath6kl_bmi_done(struct ath6kl *ar)
25 int ret;
26 u32 cid = BMI_DONE;
28 if (ar->bmi.done_sent) {
29 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
30 return 0;
33 ar->bmi.done_sent = true;
35 ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
36 if (ret) {
37 ath6kl_err("Unable to send bmi done: %d\n", ret);
38 return ret;
41 return 0;
44 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
45 struct ath6kl_bmi_target_info *targ_info)
47 int ret;
48 u32 cid = BMI_GET_TARGET_INFO;
50 if (ar->bmi.done_sent) {
51 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
52 return -EACCES;
55 ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
56 if (ret) {
57 ath6kl_err("Unable to send get target info: %d\n", ret);
58 return ret;
61 if (ar->hif_type == ATH6KL_HIF_TYPE_USB) {
62 ret = ath6kl_hif_bmi_read(ar, (u8 *)targ_info,
63 sizeof(*targ_info));
64 } else {
65 ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
66 sizeof(targ_info->version));
69 if (ret) {
70 ath6kl_err("Unable to recv target info: %d\n", ret);
71 return ret;
74 if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
75 /* Determine how many bytes are in the Target's targ_info */
76 ret = ath6kl_hif_bmi_read(ar,
77 (u8 *)&targ_info->byte_count,
78 sizeof(targ_info->byte_count));
79 if (ret) {
80 ath6kl_err("unable to read target info byte count: %d\n",
81 ret);
82 return ret;
86 * The target's targ_info doesn't match the host's targ_info.
87 * We need to do some backwards compatibility to make this work.
89 if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
90 WARN_ON(1);
91 return -EINVAL;
94 /* Read the remainder of the targ_info */
95 ret = ath6kl_hif_bmi_read(ar,
96 ((u8 *)targ_info) +
97 sizeof(targ_info->byte_count),
98 sizeof(*targ_info) -
99 sizeof(targ_info->byte_count));
101 if (ret) {
102 ath6kl_err("Unable to read target info (%d bytes): %d\n",
103 targ_info->byte_count, ret);
104 return ret;
108 ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
109 targ_info->version, targ_info->type);
111 return 0;
114 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
116 u32 cid = BMI_READ_MEMORY;
117 int ret;
118 u32 offset;
119 u32 len_remain, rx_len;
120 u16 size;
122 if (ar->bmi.done_sent) {
123 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
124 return -EACCES;
127 size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
128 if (size > ar->bmi.max_cmd_size) {
129 WARN_ON(1);
130 return -EINVAL;
132 memset(ar->bmi.cmd_buf, 0, size);
134 ath6kl_dbg(ATH6KL_DBG_BMI,
135 "bmi read memory: device: addr: 0x%x, len: %d\n",
136 addr, len);
138 len_remain = len;
140 while (len_remain) {
141 rx_len = (len_remain < ar->bmi.max_data_size) ?
142 len_remain : ar->bmi.max_data_size;
143 offset = 0;
144 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
145 offset += sizeof(cid);
146 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
147 offset += sizeof(addr);
148 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
149 offset += sizeof(len);
151 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
152 if (ret) {
153 ath6kl_err("Unable to write to the device: %d\n",
154 ret);
155 return ret;
157 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
158 if (ret) {
159 ath6kl_err("Unable to read from the device: %d\n",
160 ret);
161 return ret;
163 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
164 len_remain -= rx_len; addr += rx_len;
167 return 0;
170 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
172 u32 cid = BMI_WRITE_MEMORY;
173 int ret;
174 u32 offset;
175 u32 len_remain, tx_len;
176 const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
177 u8 aligned_buf[400];
178 u8 *src;
180 if (ar->bmi.done_sent) {
181 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
182 return -EACCES;
185 if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
186 WARN_ON(1);
187 return -EINVAL;
190 if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
191 return -E2BIG;
193 memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
195 ath6kl_dbg(ATH6KL_DBG_BMI,
196 "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
198 len_remain = len;
199 while (len_remain) {
200 src = &buf[len - len_remain];
202 if (len_remain < (ar->bmi.max_data_size - header)) {
203 if (len_remain & 3) {
204 /* align it with 4 bytes */
205 len_remain = len_remain +
206 (4 - (len_remain & 3));
207 memcpy(aligned_buf, src, len_remain);
208 src = aligned_buf;
210 tx_len = len_remain;
211 } else {
212 tx_len = (ar->bmi.max_data_size - header);
215 offset = 0;
216 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
217 offset += sizeof(cid);
218 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
219 offset += sizeof(addr);
220 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
221 offset += sizeof(tx_len);
222 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
223 offset += tx_len;
225 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
226 if (ret) {
227 ath6kl_err("Unable to write to the device: %d\n",
228 ret);
229 return ret;
231 len_remain -= tx_len; addr += tx_len;
234 return 0;
237 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
239 u32 cid = BMI_EXECUTE;
240 int ret;
241 u32 offset;
242 u16 size;
244 if (ar->bmi.done_sent) {
245 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
246 return -EACCES;
249 size = sizeof(cid) + sizeof(addr) + sizeof(param);
250 if (size > ar->bmi.max_cmd_size) {
251 WARN_ON(1);
252 return -EINVAL;
254 memset(ar->bmi.cmd_buf, 0, size);
256 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
257 addr, *param);
259 offset = 0;
260 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
261 offset += sizeof(cid);
262 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
263 offset += sizeof(addr);
264 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
265 offset += sizeof(*param);
267 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
268 if (ret) {
269 ath6kl_err("Unable to write to the device: %d\n", ret);
270 return ret;
273 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
274 if (ret) {
275 ath6kl_err("Unable to read from the device: %d\n", ret);
276 return ret;
279 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
281 return 0;
284 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
286 u32 cid = BMI_SET_APP_START;
287 int ret;
288 u32 offset;
289 u16 size;
291 if (ar->bmi.done_sent) {
292 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
293 return -EACCES;
296 size = sizeof(cid) + sizeof(addr);
297 if (size > ar->bmi.max_cmd_size) {
298 WARN_ON(1);
299 return -EINVAL;
301 memset(ar->bmi.cmd_buf, 0, size);
303 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
305 offset = 0;
306 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
307 offset += sizeof(cid);
308 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
309 offset += sizeof(addr);
311 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
312 if (ret) {
313 ath6kl_err("Unable to write to the device: %d\n", ret);
314 return ret;
317 return 0;
320 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
322 u32 cid = BMI_READ_SOC_REGISTER;
323 int ret;
324 u32 offset;
325 u16 size;
327 if (ar->bmi.done_sent) {
328 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
329 return -EACCES;
332 size = sizeof(cid) + sizeof(addr);
333 if (size > ar->bmi.max_cmd_size) {
334 WARN_ON(1);
335 return -EINVAL;
337 memset(ar->bmi.cmd_buf, 0, size);
339 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
341 offset = 0;
342 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
343 offset += sizeof(cid);
344 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
345 offset += sizeof(addr);
347 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
348 if (ret) {
349 ath6kl_err("Unable to write to the device: %d\n", ret);
350 return ret;
353 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
354 if (ret) {
355 ath6kl_err("Unable to read from the device: %d\n", ret);
356 return ret;
358 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
360 return 0;
363 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
365 u32 cid = BMI_WRITE_SOC_REGISTER;
366 int ret;
367 u32 offset;
368 u16 size;
370 if (ar->bmi.done_sent) {
371 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
372 return -EACCES;
375 size = sizeof(cid) + sizeof(addr) + sizeof(param);
376 if (size > ar->bmi.max_cmd_size) {
377 WARN_ON(1);
378 return -EINVAL;
380 memset(ar->bmi.cmd_buf, 0, size);
382 ath6kl_dbg(ATH6KL_DBG_BMI,
383 "bmi write SOC reg: addr: 0x%x, param: %d\n",
384 addr, param);
386 offset = 0;
387 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
388 offset += sizeof(cid);
389 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
390 offset += sizeof(addr);
391 memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
392 offset += sizeof(param);
394 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
395 if (ret) {
396 ath6kl_err("Unable to write to the device: %d\n", ret);
397 return ret;
400 return 0;
403 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
405 u32 cid = BMI_LZ_DATA;
406 int ret;
407 u32 offset;
408 u32 len_remain, tx_len;
409 const u32 header = sizeof(cid) + sizeof(len);
410 u16 size;
412 if (ar->bmi.done_sent) {
413 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
414 return -EACCES;
417 size = ar->bmi.max_data_size + header;
418 if (size > ar->bmi.max_cmd_size) {
419 WARN_ON(1);
420 return -EINVAL;
422 memset(ar->bmi.cmd_buf, 0, size);
424 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
425 len);
427 len_remain = len;
428 while (len_remain) {
429 tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
430 len_remain : (ar->bmi.max_data_size - header);
432 offset = 0;
433 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
434 offset += sizeof(cid);
435 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
436 offset += sizeof(tx_len);
437 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
438 tx_len);
439 offset += tx_len;
441 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
442 if (ret) {
443 ath6kl_err("Unable to write to the device: %d\n",
444 ret);
445 return ret;
448 len_remain -= tx_len;
451 return 0;
454 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
456 u32 cid = BMI_LZ_STREAM_START;
457 int ret;
458 u32 offset;
459 u16 size;
461 if (ar->bmi.done_sent) {
462 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
463 return -EACCES;
466 size = sizeof(cid) + sizeof(addr);
467 if (size > ar->bmi.max_cmd_size) {
468 WARN_ON(1);
469 return -EINVAL;
471 memset(ar->bmi.cmd_buf, 0, size);
473 ath6kl_dbg(ATH6KL_DBG_BMI,
474 "bmi LZ stream start: addr: 0x%x)\n",
475 addr);
477 offset = 0;
478 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
479 offset += sizeof(cid);
480 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
481 offset += sizeof(addr);
483 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
484 if (ret) {
485 ath6kl_err("Unable to start LZ stream to the device: %d\n",
486 ret);
487 return ret;
490 return 0;
493 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
495 int ret;
496 u32 last_word = 0;
497 u32 last_word_offset = len & ~0x3;
498 u32 unaligned_bytes = len & 0x3;
500 ret = ath6kl_bmi_lz_stream_start(ar, addr);
501 if (ret)
502 return ret;
504 if (unaligned_bytes) {
505 /* copy the last word into a zero padded buffer */
506 memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
509 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
510 if (ret)
511 return ret;
513 if (unaligned_bytes)
514 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
516 if (!ret) {
517 /* Close compressed stream and open a new (fake) one.
518 * This serves mainly to flush Target caches. */
519 ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
521 return ret;
524 void ath6kl_bmi_reset(struct ath6kl *ar)
526 ar->bmi.done_sent = false;
529 int ath6kl_bmi_init(struct ath6kl *ar)
531 if (WARN_ON(ar->bmi.max_data_size == 0))
532 return -EINVAL;
534 /* cmd + addr + len + data_size */
535 ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
537 ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
538 if (!ar->bmi.cmd_buf)
539 return -ENOMEM;
541 return 0;
544 void ath6kl_bmi_cleanup(struct ath6kl *ar)
546 kfree(ar->bmi.cmd_buf);
547 ar->bmi.cmd_buf = NULL;