spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / drivers / net / wireless / ath / ath6kl / bmi.c
blobbce3575c310ae230acc99024d14d42e3935e3ad6
1 /*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include "core.h"
18 #include "hif-ops.h"
19 #include "target.h"
20 #include "debug.h"
22 int ath6kl_bmi_done(struct ath6kl *ar)
24 int ret;
25 u32 cid = BMI_DONE;
27 if (ar->bmi.done_sent) {
28 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi done skipped\n");
29 return 0;
32 ar->bmi.done_sent = true;
34 ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
35 if (ret) {
36 ath6kl_err("Unable to send bmi done: %d\n", ret);
37 return ret;
40 return 0;
43 int ath6kl_bmi_get_target_info(struct ath6kl *ar,
44 struct ath6kl_bmi_target_info *targ_info)
46 int ret;
47 u32 cid = BMI_GET_TARGET_INFO;
49 if (ar->bmi.done_sent) {
50 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
51 return -EACCES;
54 ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
55 if (ret) {
56 ath6kl_err("Unable to send get target info: %d\n", ret);
57 return ret;
60 ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
61 sizeof(targ_info->version));
62 if (ret) {
63 ath6kl_err("Unable to recv target info: %d\n", ret);
64 return ret;
67 if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
68 /* Determine how many bytes are in the Target's targ_info */
69 ret = ath6kl_hif_bmi_read(ar,
70 (u8 *)&targ_info->byte_count,
71 sizeof(targ_info->byte_count));
72 if (ret) {
73 ath6kl_err("unable to read target info byte count: %d\n",
74 ret);
75 return ret;
79 * The target's targ_info doesn't match the host's targ_info.
80 * We need to do some backwards compatibility to make this work.
82 if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
83 WARN_ON(1);
84 return -EINVAL;
87 /* Read the remainder of the targ_info */
88 ret = ath6kl_hif_bmi_read(ar,
89 ((u8 *)targ_info) +
90 sizeof(targ_info->byte_count),
91 sizeof(*targ_info) -
92 sizeof(targ_info->byte_count));
94 if (ret) {
95 ath6kl_err("Unable to read target info (%d bytes): %d\n",
96 targ_info->byte_count, ret);
97 return ret;
101 ath6kl_dbg(ATH6KL_DBG_BMI, "target info (ver: 0x%x type: 0x%x)\n",
102 targ_info->version, targ_info->type);
104 return 0;
107 int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
109 u32 cid = BMI_READ_MEMORY;
110 int ret;
111 u32 offset;
112 u32 len_remain, rx_len;
113 u16 size;
115 if (ar->bmi.done_sent) {
116 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
117 return -EACCES;
120 size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
121 if (size > ar->bmi.max_cmd_size) {
122 WARN_ON(1);
123 return -EINVAL;
125 memset(ar->bmi.cmd_buf, 0, size);
127 ath6kl_dbg(ATH6KL_DBG_BMI,
128 "bmi read memory: device: addr: 0x%x, len: %d\n",
129 addr, len);
131 len_remain = len;
133 while (len_remain) {
134 rx_len = (len_remain < ar->bmi.max_data_size) ?
135 len_remain : ar->bmi.max_data_size;
136 offset = 0;
137 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
138 offset += sizeof(cid);
139 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
140 offset += sizeof(addr);
141 memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
142 offset += sizeof(len);
144 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
145 if (ret) {
146 ath6kl_err("Unable to write to the device: %d\n",
147 ret);
148 return ret;
150 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
151 if (ret) {
152 ath6kl_err("Unable to read from the device: %d\n",
153 ret);
154 return ret;
156 memcpy(&buf[len - len_remain], ar->bmi.cmd_buf, rx_len);
157 len_remain -= rx_len; addr += rx_len;
160 return 0;
163 int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
165 u32 cid = BMI_WRITE_MEMORY;
166 int ret;
167 u32 offset;
168 u32 len_remain, tx_len;
169 const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
170 u8 aligned_buf[400];
171 u8 *src;
173 if (ar->bmi.done_sent) {
174 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
175 return -EACCES;
178 if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
179 WARN_ON(1);
180 return -EINVAL;
183 if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
184 return -E2BIG;
186 memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
188 ath6kl_dbg(ATH6KL_DBG_BMI,
189 "bmi write memory: addr: 0x%x, len: %d\n", addr, len);
191 len_remain = len;
192 while (len_remain) {
193 src = &buf[len - len_remain];
195 if (len_remain < (ar->bmi.max_data_size - header)) {
196 if (len_remain & 3) {
197 /* align it with 4 bytes */
198 len_remain = len_remain +
199 (4 - (len_remain & 3));
200 memcpy(aligned_buf, src, len_remain);
201 src = aligned_buf;
203 tx_len = len_remain;
204 } else {
205 tx_len = (ar->bmi.max_data_size - header);
208 offset = 0;
209 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
210 offset += sizeof(cid);
211 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
212 offset += sizeof(addr);
213 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
214 offset += sizeof(tx_len);
215 memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
216 offset += tx_len;
218 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
219 if (ret) {
220 ath6kl_err("Unable to write to the device: %d\n",
221 ret);
222 return ret;
224 len_remain -= tx_len; addr += tx_len;
227 return 0;
230 int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
232 u32 cid = BMI_EXECUTE;
233 int ret;
234 u32 offset;
235 u16 size;
237 if (ar->bmi.done_sent) {
238 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
239 return -EACCES;
242 size = sizeof(cid) + sizeof(addr) + sizeof(param);
243 if (size > ar->bmi.max_cmd_size) {
244 WARN_ON(1);
245 return -EINVAL;
247 memset(ar->bmi.cmd_buf, 0, size);
249 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi execute: addr: 0x%x, param: %d)\n",
250 addr, *param);
252 offset = 0;
253 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
254 offset += sizeof(cid);
255 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
256 offset += sizeof(addr);
257 memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
258 offset += sizeof(*param);
260 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
261 if (ret) {
262 ath6kl_err("Unable to write to the device: %d\n", ret);
263 return ret;
266 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
267 if (ret) {
268 ath6kl_err("Unable to read from the device: %d\n", ret);
269 return ret;
272 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
274 return 0;
277 int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
279 u32 cid = BMI_SET_APP_START;
280 int ret;
281 u32 offset;
282 u16 size;
284 if (ar->bmi.done_sent) {
285 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
286 return -EACCES;
289 size = sizeof(cid) + sizeof(addr);
290 if (size > ar->bmi.max_cmd_size) {
291 WARN_ON(1);
292 return -EINVAL;
294 memset(ar->bmi.cmd_buf, 0, size);
296 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi set app start: addr: 0x%x\n", addr);
298 offset = 0;
299 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
300 offset += sizeof(cid);
301 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
302 offset += sizeof(addr);
304 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
305 if (ret) {
306 ath6kl_err("Unable to write to the device: %d\n", ret);
307 return ret;
310 return 0;
313 int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
315 u32 cid = BMI_READ_SOC_REGISTER;
316 int ret;
317 u32 offset;
318 u16 size;
320 if (ar->bmi.done_sent) {
321 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
322 return -EACCES;
325 size = sizeof(cid) + sizeof(addr);
326 if (size > ar->bmi.max_cmd_size) {
327 WARN_ON(1);
328 return -EINVAL;
330 memset(ar->bmi.cmd_buf, 0, size);
332 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi read SOC reg: addr: 0x%x\n", addr);
334 offset = 0;
335 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
336 offset += sizeof(cid);
337 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
338 offset += sizeof(addr);
340 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
341 if (ret) {
342 ath6kl_err("Unable to write to the device: %d\n", ret);
343 return ret;
346 ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
347 if (ret) {
348 ath6kl_err("Unable to read from the device: %d\n", ret);
349 return ret;
351 memcpy(param, ar->bmi.cmd_buf, sizeof(*param));
353 return 0;
356 int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
358 u32 cid = BMI_WRITE_SOC_REGISTER;
359 int ret;
360 u32 offset;
361 u16 size;
363 if (ar->bmi.done_sent) {
364 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
365 return -EACCES;
368 size = sizeof(cid) + sizeof(addr) + sizeof(param);
369 if (size > ar->bmi.max_cmd_size) {
370 WARN_ON(1);
371 return -EINVAL;
373 memset(ar->bmi.cmd_buf, 0, size);
375 ath6kl_dbg(ATH6KL_DBG_BMI,
376 "bmi write SOC reg: addr: 0x%x, param: %d\n",
377 addr, param);
379 offset = 0;
380 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
381 offset += sizeof(cid);
382 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
383 offset += sizeof(addr);
384 memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
385 offset += sizeof(param);
387 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
388 if (ret) {
389 ath6kl_err("Unable to write to the device: %d\n", ret);
390 return ret;
393 return 0;
396 int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
398 u32 cid = BMI_LZ_DATA;
399 int ret;
400 u32 offset;
401 u32 len_remain, tx_len;
402 const u32 header = sizeof(cid) + sizeof(len);
403 u16 size;
405 if (ar->bmi.done_sent) {
406 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
407 return -EACCES;
410 size = ar->bmi.max_data_size + header;
411 if (size > ar->bmi.max_cmd_size) {
412 WARN_ON(1);
413 return -EINVAL;
415 memset(ar->bmi.cmd_buf, 0, size);
417 ath6kl_dbg(ATH6KL_DBG_BMI, "bmi send LZ data: len: %d)\n",
418 len);
420 len_remain = len;
421 while (len_remain) {
422 tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
423 len_remain : (ar->bmi.max_data_size - header);
425 offset = 0;
426 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
427 offset += sizeof(cid);
428 memcpy(&(ar->bmi.cmd_buf[offset]), &tx_len, sizeof(tx_len));
429 offset += sizeof(tx_len);
430 memcpy(&(ar->bmi.cmd_buf[offset]), &buf[len - len_remain],
431 tx_len);
432 offset += tx_len;
434 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
435 if (ret) {
436 ath6kl_err("Unable to write to the device: %d\n",
437 ret);
438 return ret;
441 len_remain -= tx_len;
444 return 0;
447 int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
449 u32 cid = BMI_LZ_STREAM_START;
450 int ret;
451 u32 offset;
452 u16 size;
454 if (ar->bmi.done_sent) {
455 ath6kl_err("bmi done sent already, cmd %d disallowed\n", cid);
456 return -EACCES;
459 size = sizeof(cid) + sizeof(addr);
460 if (size > ar->bmi.max_cmd_size) {
461 WARN_ON(1);
462 return -EINVAL;
464 memset(ar->bmi.cmd_buf, 0, size);
466 ath6kl_dbg(ATH6KL_DBG_BMI,
467 "bmi LZ stream start: addr: 0x%x)\n",
468 addr);
470 offset = 0;
471 memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
472 offset += sizeof(cid);
473 memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
474 offset += sizeof(addr);
476 ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
477 if (ret) {
478 ath6kl_err("Unable to start LZ stream to the device: %d\n",
479 ret);
480 return ret;
483 return 0;
486 int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
488 int ret;
489 u32 last_word = 0;
490 u32 last_word_offset = len & ~0x3;
491 u32 unaligned_bytes = len & 0x3;
493 ret = ath6kl_bmi_lz_stream_start(ar, addr);
494 if (ret)
495 return ret;
497 if (unaligned_bytes) {
498 /* copy the last word into a zero padded buffer */
499 memcpy(&last_word, &buf[last_word_offset], unaligned_bytes);
502 ret = ath6kl_bmi_lz_data(ar, buf, last_word_offset);
503 if (ret)
504 return ret;
506 if (unaligned_bytes)
507 ret = ath6kl_bmi_lz_data(ar, (u8 *)&last_word, 4);
509 if (!ret) {
510 /* Close compressed stream and open a new (fake) one.
511 * This serves mainly to flush Target caches. */
512 ret = ath6kl_bmi_lz_stream_start(ar, 0x00);
514 return ret;
517 void ath6kl_bmi_reset(struct ath6kl *ar)
519 ar->bmi.done_sent = false;
522 int ath6kl_bmi_init(struct ath6kl *ar)
524 if (WARN_ON(ar->bmi.max_data_size == 0))
525 return -EINVAL;
527 /* cmd + addr + len + data_size */
528 ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
530 ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
531 if (!ar->bmi.cmd_buf)
532 return -ENOMEM;
534 return 0;
537 void ath6kl_bmi_cleanup(struct ath6kl *ar)
539 kfree(ar->bmi.cmd_buf);
540 ar->bmi.cmd_buf = NULL;