iwlwifi: use the DMA state API instead of the pci equivalents
[linux-2.6/next.git] / drivers / net / wireless / iwlwifi / iwl-agn-ucode.c
blob6f77441cb65a3f734260e23b1a38a08574d4a668
1 /******************************************************************************
3 * GPL LICENSE SUMMARY
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *****************************************************************************/
30 #include <linux/kernel.h>
31 #include <linux/module.h>
32 #include <linux/init.h>
33 #include <linux/sched.h>
35 #include "iwl-dev.h"
36 #include "iwl-core.h"
37 #include "iwl-io.h"
38 #include "iwl-helpers.h"
39 #include "iwl-agn-hw.h"
40 #include "iwl-agn.h"
42 static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO,
44 IWL_TX_FIFO_VI,
45 IWL_TX_FIFO_BE,
46 IWL_TX_FIFO_BK,
47 IWLAGN_CMD_FIFO_NUM,
48 IWL_TX_FIFO_UNUSED,
49 IWL_TX_FIFO_UNUSED,
50 IWL_TX_FIFO_UNUSED,
51 IWL_TX_FIFO_UNUSED,
52 IWL_TX_FIFO_UNUSED,
55 static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
56 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
57 0, COEX_UNASSOC_IDLE_FLAGS},
58 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
59 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
60 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
61 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
62 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
63 0, COEX_CALIBRATION_FLAGS},
64 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
65 0, COEX_PERIODIC_CALIBRATION_FLAGS},
66 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
67 0, COEX_CONNECTION_ESTAB_FLAGS},
68 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
69 0, COEX_ASSOCIATED_IDLE_FLAGS},
70 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
71 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
72 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
73 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
74 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
75 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
76 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
77 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
78 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
79 0, COEX_STAND_ALONE_DEBUG_FLAGS},
80 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
81 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
82 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
83 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
87 * ucode
89 static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
90 struct fw_desc *image, u32 dst_addr)
92 dma_addr_t phy_addr = image->p_addr;
93 u32 byte_cnt = image->len;
94 int ret;
96 priv->ucode_write_complete = 0;
98 iwl_write_direct32(priv,
99 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
100 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
102 iwl_write_direct32(priv,
103 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
105 iwl_write_direct32(priv,
106 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
107 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
109 iwl_write_direct32(priv,
110 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
111 (iwl_get_dma_hi_addr(phy_addr)
112 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
114 iwl_write_direct32(priv,
115 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
116 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
117 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
118 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
120 iwl_write_direct32(priv,
121 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
122 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
123 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
124 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
126 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
127 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
128 priv->ucode_write_complete, 5 * HZ);
129 if (ret == -ERESTARTSYS) {
130 IWL_ERR(priv, "Could not load the %s uCode section due "
131 "to interrupt\n", name);
132 return ret;
134 if (!ret) {
135 IWL_ERR(priv, "Could not load the %s uCode section\n",
136 name);
137 return -ETIMEDOUT;
140 return 0;
143 static int iwlagn_load_given_ucode(struct iwl_priv *priv,
144 struct fw_desc *inst_image,
145 struct fw_desc *data_image)
147 int ret = 0;
149 ret = iwlagn_load_section(priv, "INST", inst_image,
150 IWLAGN_RTC_INST_LOWER_BOUND);
151 if (ret)
152 return ret;
154 return iwlagn_load_section(priv, "DATA", data_image,
155 IWLAGN_RTC_DATA_LOWER_BOUND);
158 int iwlagn_load_ucode(struct iwl_priv *priv)
160 int ret = 0;
162 /* check whether init ucode should be loaded, or rather runtime ucode */
163 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
164 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
165 ret = iwlagn_load_given_ucode(priv,
166 &priv->ucode_init, &priv->ucode_init_data);
167 if (!ret) {
168 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
169 priv->ucode_type = UCODE_INIT;
171 } else {
172 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
173 "Loading runtime ucode...\n");
174 ret = iwlagn_load_given_ucode(priv,
175 &priv->ucode_code, &priv->ucode_data);
176 if (!ret) {
177 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
178 priv->ucode_type = UCODE_RT;
182 return ret;
186 * Calibration
188 static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
190 struct iwl_calib_xtal_freq_cmd cmd;
191 __le16 *xtal_calib =
192 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
194 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
195 cmd.hdr.first_group = 0;
196 cmd.hdr.groups_num = 1;
197 cmd.hdr.data_valid = 1;
198 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
199 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
200 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
201 (u8 *)&cmd, sizeof(cmd));
204 static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
206 struct iwl_calib_cfg_cmd calib_cfg_cmd;
207 struct iwl_host_cmd cmd = {
208 .id = CALIBRATION_CFG_CMD,
209 .len = sizeof(struct iwl_calib_cfg_cmd),
210 .data = &calib_cfg_cmd,
213 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
214 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
215 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
216 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
217 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
219 return iwl_send_cmd(priv, &cmd);
222 void iwlagn_rx_calib_result(struct iwl_priv *priv,
223 struct iwl_rx_mem_buffer *rxb)
225 struct iwl_rx_packet *pkt = rxb_addr(rxb);
226 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
227 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
228 int index;
230 /* reduce the size of the length field itself */
231 len -= 4;
233 /* Define the order in which the results will be sent to the runtime
234 * uCode. iwl_send_calib_results sends them in a row according to
235 * their index. We sort them here
237 switch (hdr->op_code) {
238 case IWL_PHY_CALIBRATE_DC_CMD:
239 index = IWL_CALIB_DC;
240 break;
241 case IWL_PHY_CALIBRATE_LO_CMD:
242 index = IWL_CALIB_LO;
243 break;
244 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
245 index = IWL_CALIB_TX_IQ;
246 break;
247 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
248 index = IWL_CALIB_TX_IQ_PERD;
249 break;
250 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
251 index = IWL_CALIB_BASE_BAND;
252 break;
253 default:
254 IWL_ERR(priv, "Unknown calibration notification %d\n",
255 hdr->op_code);
256 return;
258 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
261 void iwlagn_rx_calib_complete(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb)
264 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
265 queue_work(priv->workqueue, &priv->restart);
268 void iwlagn_init_alive_start(struct iwl_priv *priv)
270 int ret = 0;
272 /* Check alive response for "valid" sign from uCode */
273 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
274 /* We had an error bringing up the hardware, so take it
275 * all the way back down so we can try again */
276 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
277 goto restart;
280 /* initialize uCode was loaded... verify inst image.
281 * This is a paranoid check, because we would not have gotten the
282 * "initialize" alive if code weren't properly loaded. */
283 if (iwl_verify_ucode(priv)) {
284 /* Runtime instruction load was bad;
285 * take it all the way back down so we can try again */
286 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
287 goto restart;
290 ret = priv->cfg->ops->lib->alive_notify(priv);
291 if (ret) {
292 IWL_WARN(priv,
293 "Could not complete ALIVE transition: %d\n", ret);
294 goto restart;
297 iwlagn_send_calib_cfg(priv);
298 return;
300 restart:
301 /* real restart (first load init_ucode) */
302 queue_work(priv->workqueue, &priv->restart);
305 static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
307 struct iwl_wimax_coex_cmd coex_cmd;
309 if (priv->cfg->support_wimax_coexist) {
310 /* UnMask wake up src at associated sleep */
311 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
313 /* UnMask wake up src at unassociated sleep */
314 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
315 memcpy(coex_cmd.sta_prio, cu_priorities,
316 sizeof(struct iwl_wimax_coex_event_entry) *
317 COEX_NUM_OF_EVENTS);
319 /* enabling the coexistence feature */
320 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
322 /* enabling the priorities tables */
323 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
324 } else {
325 /* coexistence is disabled */
326 memset(&coex_cmd, 0, sizeof(coex_cmd));
328 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
329 sizeof(coex_cmd), &coex_cmd);
332 int iwlagn_alive_notify(struct iwl_priv *priv)
334 u32 a;
335 unsigned long flags;
336 int i, chan;
337 u32 reg_val;
339 spin_lock_irqsave(&priv->lock, flags);
341 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
342 a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
343 for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
344 a += 4)
345 iwl_write_targ_mem(priv, a, 0);
346 for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
347 a += 4)
348 iwl_write_targ_mem(priv, a, 0);
349 for (; a < priv->scd_base_addr +
350 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
351 iwl_write_targ_mem(priv, a, 0);
353 iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
354 priv->scd_bc_tbls.dma >> 10);
356 /* Enable DMA channel */
357 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
358 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
359 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
360 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
362 /* Update FH chicken bits */
363 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
364 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
365 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
367 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
368 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
369 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
371 /* initiate the queues */
372 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
373 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
374 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
375 iwl_write_targ_mem(priv, priv->scd_base_addr +
376 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
377 iwl_write_targ_mem(priv, priv->scd_base_addr +
378 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
379 sizeof(u32),
380 ((SCD_WIN_SIZE <<
381 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
382 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
383 ((SCD_FRAME_LIMIT <<
384 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
385 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
388 iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
389 IWL_MASK(0, priv->hw_params.max_txq_num));
391 /* Activate all Tx DMA/FIFO channels */
392 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
394 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
396 /* make sure all queue are not stopped */
397 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
398 for (i = 0; i < 4; i++)
399 atomic_set(&priv->queue_stop_count[i], 0);
401 /* reset to 0 to enable all the queue first */
402 priv->txq_ctx_active_msk = 0;
403 /* map qos queues to fifos one-to-one */
404 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
406 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
407 int ac = iwlagn_default_queue_to_tx_fifo[i];
409 iwl_txq_ctx_activate(priv, i);
411 if (ac == IWL_TX_FIFO_UNUSED)
412 continue;
414 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
417 spin_unlock_irqrestore(&priv->lock, flags);
419 iwlagn_send_wimax_coex(priv);
421 iwlagn_set_Xtal_calib(priv);
422 iwl_send_calib_results(priv);
424 return 0;
429 * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
430 * using sample data 100 bytes apart. If these sample points are good,
431 * it's a pretty good bet that everything between them is good, too.
433 static int iwlcore_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
435 u32 val;
436 int ret = 0;
437 u32 errcnt = 0;
438 u32 i;
440 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
442 for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
443 /* read data comes through single port, auto-incr addr */
444 /* NOTE: Use the debugless read so we don't flood kernel log
445 * if IWL_DL_IO is set */
446 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
447 i + IWLAGN_RTC_INST_LOWER_BOUND);
448 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
449 if (val != le32_to_cpu(*image)) {
450 ret = -EIO;
451 errcnt++;
452 if (errcnt >= 3)
453 break;
457 return ret;
461 * iwlcore_verify_inst_full - verify runtime uCode image in card vs. host,
462 * looking at all data.
464 static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 *image,
465 u32 len)
467 u32 val;
468 u32 save_len = len;
469 int ret = 0;
470 u32 errcnt;
472 IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
474 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
475 IWLAGN_RTC_INST_LOWER_BOUND);
477 errcnt = 0;
478 for (; len > 0; len -= sizeof(u32), image++) {
479 /* read data comes through single port, auto-incr addr */
480 /* NOTE: Use the debugless read so we don't flood kernel log
481 * if IWL_DL_IO is set */
482 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
483 if (val != le32_to_cpu(*image)) {
484 IWL_ERR(priv, "uCode INST section is invalid at "
485 "offset 0x%x, is 0x%x, s/b 0x%x\n",
486 save_len - len, val, le32_to_cpu(*image));
487 ret = -EIO;
488 errcnt++;
489 if (errcnt >= 20)
490 break;
494 if (!errcnt)
495 IWL_DEBUG_INFO(priv,
496 "ucode image in INSTRUCTION memory is good\n");
498 return ret;
502 * iwl_verify_ucode - determine which instruction image is in SRAM,
503 * and verify its contents
505 int iwl_verify_ucode(struct iwl_priv *priv)
507 __le32 *image;
508 u32 len;
509 int ret;
511 /* Try bootstrap */
512 image = (__le32 *)priv->ucode_boot.v_addr;
513 len = priv->ucode_boot.len;
514 ret = iwlcore_verify_inst_sparse(priv, image, len);
515 if (!ret) {
516 IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
517 return 0;
520 /* Try initialize */
521 image = (__le32 *)priv->ucode_init.v_addr;
522 len = priv->ucode_init.len;
523 ret = iwlcore_verify_inst_sparse(priv, image, len);
524 if (!ret) {
525 IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
526 return 0;
529 /* Try runtime/protocol */
530 image = (__le32 *)priv->ucode_code.v_addr;
531 len = priv->ucode_code.len;
532 ret = iwlcore_verify_inst_sparse(priv, image, len);
533 if (!ret) {
534 IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
535 return 0;
538 IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
540 /* Since nothing seems to match, show first several data entries in
541 * instruction SRAM, so maybe visual inspection will give a clue.
542 * Selection of bootstrap image (vs. other images) is arbitrary. */
543 image = (__le32 *)priv->ucode_boot.v_addr;
544 len = priv->ucode_boot.len;
545 ret = iwl_verify_inst_full(priv, image, len);
547 return ret;