2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 void ath10k_bmi_start(struct ath10k
*ar
)
28 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi start\n");
30 ar
->bmi
.done_sent
= false;
32 /* Enable hardware clock to speed up firmware download */
33 if (ar
->hw_params
.hw_ops
->enable_pll_clk
) {
34 ret
= ar
->hw_params
.hw_ops
->enable_pll_clk(ar
);
35 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi enable pll ret %d\n", ret
);
39 int ath10k_bmi_done(struct ath10k
*ar
)
42 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.done
);
45 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi done\n");
47 if (ar
->bmi
.done_sent
) {
48 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi skipped\n");
52 ar
->bmi
.done_sent
= true;
53 cmd
.id
= __cpu_to_le32(BMI_DONE
);
55 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, NULL
, NULL
);
57 ath10k_warn(ar
, "unable to write to the device: %d\n", ret
);
64 int ath10k_bmi_get_target_info(struct ath10k
*ar
,
65 struct bmi_target_info
*target_info
)
69 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.get_target_info
);
70 u32 resplen
= sizeof(resp
.get_target_info
);
73 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi get target info\n");
75 if (ar
->bmi
.done_sent
) {
76 ath10k_warn(ar
, "BMI Get Target Info Command disallowed\n");
80 cmd
.id
= __cpu_to_le32(BMI_GET_TARGET_INFO
);
82 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, &resp
, &resplen
);
84 ath10k_warn(ar
, "unable to get target info from device\n");
88 if (resplen
< sizeof(resp
.get_target_info
)) {
89 ath10k_warn(ar
, "invalid get_target_info response length (%d)\n",
94 target_info
->version
= __le32_to_cpu(resp
.get_target_info
.version
);
95 target_info
->type
= __le32_to_cpu(resp
.get_target_info
.type
);
100 #define TARGET_VERSION_SENTINAL 0xffffffffu
102 int ath10k_bmi_get_target_info_sdio(struct ath10k
*ar
,
103 struct bmi_target_info
*target_info
)
107 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.get_target_info
);
108 u32 resplen
, ver_len
;
112 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi get target info SDIO\n");
114 if (ar
->bmi
.done_sent
) {
115 ath10k_warn(ar
, "BMI Get Target Info Command disallowed\n");
119 cmd
.id
= __cpu_to_le32(BMI_GET_TARGET_INFO
);
121 /* Step 1: Read 4 bytes of the target info and check if it is
122 * the special sentinal version word or the first word in the
125 resplen
= sizeof(u32
);
126 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, &tmp
, &resplen
);
128 ath10k_warn(ar
, "unable to read from device\n");
132 /* Some SDIO boards have a special sentinal byte before the real
135 if (__le32_to_cpu(tmp
) == TARGET_VERSION_SENTINAL
) {
136 /* Step 1b: Read the version length */
137 resplen
= sizeof(u32
);
138 ret
= ath10k_hif_exchange_bmi_msg(ar
, NULL
, 0, &tmp
,
141 ath10k_warn(ar
, "unable to read from device\n");
146 ver_len
= __le32_to_cpu(tmp
);
148 /* Step 2: Check the target info length */
149 if (ver_len
!= sizeof(resp
.get_target_info
)) {
150 ath10k_warn(ar
, "Unexpected target info len: %u. Expected: %zu\n",
151 ver_len
, sizeof(resp
.get_target_info
));
155 /* Step 3: Read the rest of the version response */
156 resplen
= sizeof(resp
.get_target_info
) - sizeof(u32
);
157 ret
= ath10k_hif_exchange_bmi_msg(ar
, NULL
, 0,
158 &resp
.get_target_info
.version
,
161 ath10k_warn(ar
, "unable to read from device\n");
165 target_info
->version
= __le32_to_cpu(resp
.get_target_info
.version
);
166 target_info
->type
= __le32_to_cpu(resp
.get_target_info
.type
);
171 int ath10k_bmi_read_memory(struct ath10k
*ar
,
172 u32 address
, void *buffer
, u32 length
)
176 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.read_mem
);
180 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi read address 0x%x length %d\n",
183 if (ar
->bmi
.done_sent
) {
184 ath10k_warn(ar
, "command disallowed\n");
189 rxlen
= min_t(u32
, length
, BMI_MAX_DATA_SIZE
);
191 cmd
.id
= __cpu_to_le32(BMI_READ_MEMORY
);
192 cmd
.read_mem
.addr
= __cpu_to_le32(address
);
193 cmd
.read_mem
.len
= __cpu_to_le32(rxlen
);
195 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
,
198 ath10k_warn(ar
, "unable to read from the device (%d)\n",
203 memcpy(buffer
, resp
.read_mem
.payload
, rxlen
);
212 int ath10k_bmi_write_soc_reg(struct ath10k
*ar
, u32 address
, u32 reg_val
)
215 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.write_soc_reg
);
218 ath10k_dbg(ar
, ATH10K_DBG_BMI
,
219 "bmi write soc register 0x%08x val 0x%08x\n",
222 if (ar
->bmi
.done_sent
) {
223 ath10k_warn(ar
, "bmi write soc register command in progress\n");
227 cmd
.id
= __cpu_to_le32(BMI_WRITE_SOC_REGISTER
);
228 cmd
.write_soc_reg
.addr
= __cpu_to_le32(address
);
229 cmd
.write_soc_reg
.value
= __cpu_to_le32(reg_val
);
231 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, NULL
, NULL
);
233 ath10k_warn(ar
, "Unable to write soc register to device: %d\n",
241 int ath10k_bmi_read_soc_reg(struct ath10k
*ar
, u32 address
, u32
*reg_val
)
245 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.read_soc_reg
);
246 u32 resplen
= sizeof(resp
.read_soc_reg
);
249 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi read soc register 0x%08x\n",
252 if (ar
->bmi
.done_sent
) {
253 ath10k_warn(ar
, "bmi read soc register command in progress\n");
257 cmd
.id
= __cpu_to_le32(BMI_READ_SOC_REGISTER
);
258 cmd
.read_soc_reg
.addr
= __cpu_to_le32(address
);
260 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, &resp
, &resplen
);
262 ath10k_warn(ar
, "Unable to read soc register from device: %d\n",
267 *reg_val
= __le32_to_cpu(resp
.read_soc_reg
.value
);
269 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi read soc register value 0x%08x\n",
275 int ath10k_bmi_write_memory(struct ath10k
*ar
,
276 u32 address
, const void *buffer
, u32 length
)
279 u32 hdrlen
= sizeof(cmd
.id
) + sizeof(cmd
.write_mem
);
283 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi write address 0x%x length %d\n",
286 if (ar
->bmi
.done_sent
) {
287 ath10k_warn(ar
, "command disallowed\n");
292 txlen
= min(length
, BMI_MAX_DATA_SIZE
- hdrlen
);
294 /* copy before roundup to avoid reading beyond buffer*/
295 memcpy(cmd
.write_mem
.payload
, buffer
, txlen
);
296 txlen
= roundup(txlen
, 4);
298 cmd
.id
= __cpu_to_le32(BMI_WRITE_MEMORY
);
299 cmd
.write_mem
.addr
= __cpu_to_le32(address
);
300 cmd
.write_mem
.len
= __cpu_to_le32(txlen
);
302 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, hdrlen
+ txlen
,
305 ath10k_warn(ar
, "unable to write to the device (%d)\n",
310 /* fixup roundup() so `length` zeroes out for last chunk */
311 txlen
= min(txlen
, length
);
321 int ath10k_bmi_execute(struct ath10k
*ar
, u32 address
, u32 param
, u32
*result
)
325 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.execute
);
326 u32 resplen
= sizeof(resp
.execute
);
329 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi execute address 0x%x param 0x%x\n",
332 if (ar
->bmi
.done_sent
) {
333 ath10k_warn(ar
, "command disallowed\n");
337 cmd
.id
= __cpu_to_le32(BMI_EXECUTE
);
338 cmd
.execute
.addr
= __cpu_to_le32(address
);
339 cmd
.execute
.param
= __cpu_to_le32(param
);
341 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, &resp
, &resplen
);
343 ath10k_warn(ar
, "unable to read from the device\n");
347 if (resplen
< sizeof(resp
.execute
)) {
348 ath10k_warn(ar
, "invalid execute response length (%d)\n",
353 *result
= __le32_to_cpu(resp
.execute
.result
);
355 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi execute result 0x%x\n", *result
);
360 int ath10k_bmi_lz_data(struct ath10k
*ar
, const void *buffer
, u32 length
)
363 u32 hdrlen
= sizeof(cmd
.id
) + sizeof(cmd
.lz_data
);
367 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi lz data buffer 0x%pK length %d\n",
370 if (ar
->bmi
.done_sent
) {
371 ath10k_warn(ar
, "command disallowed\n");
376 txlen
= min(length
, BMI_MAX_DATA_SIZE
- hdrlen
);
378 WARN_ON_ONCE(txlen
& 3);
380 cmd
.id
= __cpu_to_le32(BMI_LZ_DATA
);
381 cmd
.lz_data
.len
= __cpu_to_le32(txlen
);
382 memcpy(cmd
.lz_data
.payload
, buffer
, txlen
);
384 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, hdrlen
+ txlen
,
387 ath10k_warn(ar
, "unable to write to the device\n");
398 int ath10k_bmi_lz_stream_start(struct ath10k
*ar
, u32 address
)
401 u32 cmdlen
= sizeof(cmd
.id
) + sizeof(cmd
.lz_start
);
404 ath10k_dbg(ar
, ATH10K_DBG_BMI
, "bmi lz stream start address 0x%x\n",
407 if (ar
->bmi
.done_sent
) {
408 ath10k_warn(ar
, "command disallowed\n");
412 cmd
.id
= __cpu_to_le32(BMI_LZ_STREAM_START
);
413 cmd
.lz_start
.addr
= __cpu_to_le32(address
);
415 ret
= ath10k_hif_exchange_bmi_msg(ar
, &cmd
, cmdlen
, NULL
, NULL
);
417 ath10k_warn(ar
, "unable to Start LZ Stream to the device\n");
424 int ath10k_bmi_fast_download(struct ath10k
*ar
,
425 u32 address
, const void *buffer
, u32 length
)
428 u32 head_len
= rounddown(length
, 4);
429 u32 trailer_len
= length
- head_len
;
432 ath10k_dbg(ar
, ATH10K_DBG_BMI
,
433 "bmi fast download address 0x%x buffer 0x%pK length %d\n",
434 address
, buffer
, length
);
436 ret
= ath10k_bmi_lz_stream_start(ar
, address
);
440 /* copy the last word into a zero padded buffer */
442 memcpy(trailer
, buffer
+ head_len
, trailer_len
);
444 ret
= ath10k_bmi_lz_data(ar
, buffer
, head_len
);
449 ret
= ath10k_bmi_lz_data(ar
, trailer
, 4);
455 * Close compressed stream and open a new (fake) one.
456 * This serves mainly to flush Target caches.
458 ret
= ath10k_bmi_lz_stream_start(ar
, 0x00);