Linux 4.19.133
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_mbx.c
blobac5d2d34aeeaecc570eaf8e5117598965e069a7c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
23 static const char *mb_to_str(uint16_t cmd)
25 int i;
26 struct mb_cmd_name *e;
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 e = mb_str + i;
30 if (cmd == e->cmd)
31 return e->str;
33 return "unknown";
36 static struct rom_cmd {
37 uint16_t cmd;
38 } rom_cmds[] = {
39 { MBC_LOAD_RAM },
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_READ_SFP },
62 { MBC_GET_RNID_PARAMS },
65 static int is_rom_cmd(uint16_t cmd)
67 int i;
68 struct rom_cmd *wc;
70 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
71 wc = rom_cmds + i;
72 if (wc->cmd == cmd)
73 return 1;
76 return 0;
80 * qla2x00_mailbox_command
81 * Issue mailbox command and waits for completion.
83 * Input:
84 * ha = adapter block pointer.
85 * mcp = driver internal mbx struct pointer.
87 * Output:
88 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
90 * Returns:
91 * 0 : QLA_SUCCESS = cmd performed success
92 * 1 : QLA_FUNCTION_FAILED (error encountered)
93 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
95 * Context:
96 * Kernel context.
98 static int
99 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
101 int rval, i;
102 unsigned long flags = 0;
103 device_reg_t *reg;
104 uint8_t abort_active;
105 uint8_t io_lock_on;
106 uint16_t command = 0;
107 uint16_t *iptr;
108 uint16_t __iomem *optr;
109 uint32_t cnt;
110 uint32_t mboxes;
111 unsigned long wait_time;
112 struct qla_hw_data *ha = vha->hw;
113 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
114 u32 chip_reset;
117 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
119 if (ha->pdev->error_state > pci_channel_io_frozen) {
120 ql_log(ql_log_warn, vha, 0x1001,
121 "error_state is greater than pci_channel_io_frozen, "
122 "exiting.\n");
123 return QLA_FUNCTION_TIMEOUT;
126 if (vha->device_flags & DFLG_DEV_FAILED) {
127 ql_log(ql_log_warn, vha, 0x1002,
128 "Device in failed state, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 /* if PCI error, then avoid mbx processing.*/
133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 ql_log(ql_log_warn, vha, 0xd04e,
136 "PCI error, exiting.\n");
137 return QLA_FUNCTION_TIMEOUT;
140 reg = ha->iobase;
141 io_lock_on = base_vha->flags.init_done;
143 rval = QLA_SUCCESS;
144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 chip_reset = ha->chip_reset;
147 if (ha->flags.pci_channel_io_perm_failure) {
148 ql_log(ql_log_warn, vha, 0x1003,
149 "Perm failure on EEH timeout MBX, exiting.\n");
150 return QLA_FUNCTION_TIMEOUT;
153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 /* Setting Link-Down error */
155 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 ql_log(ql_log_warn, vha, 0x1004,
157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 return QLA_FUNCTION_TIMEOUT;
161 /* check if ISP abort is active and return cmd with timeout */
162 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 !is_rom_cmd(mcp->mb[0])) {
166 ql_log(ql_log_info, vha, 0x1005,
167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
168 mcp->mb[0]);
169 return QLA_FUNCTION_TIMEOUT;
172 atomic_inc(&ha->num_pend_mbx_stage1);
174 * Wait for active mailbox commands to finish by waiting at most tov
175 * seconds. This is to serialize actual issuing of mailbox cmds during
176 * non ISP abort time.
178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 /* Timeout occurred. Return error. */
180 ql_log(ql_log_warn, vha, 0xd035,
181 "Cmd access timeout, cmd=0x%x, Exiting.\n",
182 mcp->mb[0]);
183 atomic_dec(&ha->num_pend_mbx_stage1);
184 return QLA_FUNCTION_TIMEOUT;
186 atomic_dec(&ha->num_pend_mbx_stage1);
187 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
188 rval = QLA_ABORTED;
189 goto premature_exit;
192 ha->flags.mbox_busy = 1;
193 /* Save mailbox command for debug */
194 ha->mcp = mcp;
196 ql_dbg(ql_dbg_mbx, vha, 0x1006,
197 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
199 spin_lock_irqsave(&ha->hardware_lock, flags);
201 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
202 rval = QLA_ABORTED;
203 ha->flags.mbox_busy = 0;
204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
205 goto premature_exit;
208 /* Load mailbox registers. */
209 if (IS_P3P_TYPE(ha))
210 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
211 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
212 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
213 else
214 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
216 iptr = mcp->mb;
217 command = mcp->mb[0];
218 mboxes = mcp->out_mb;
220 ql_dbg(ql_dbg_mbx, vha, 0x1111,
221 "Mailbox registers (OUT):\n");
222 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
223 if (IS_QLA2200(ha) && cnt == 8)
224 optr =
225 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
226 if (mboxes & BIT_0) {
227 ql_dbg(ql_dbg_mbx, vha, 0x1112,
228 "mbox[%d]<-0x%04x\n", cnt, *iptr);
229 WRT_REG_WORD(optr, *iptr);
232 mboxes >>= 1;
233 optr++;
234 iptr++;
237 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
238 "I/O Address = %p.\n", optr);
240 /* Issue set host interrupt command to send cmd out. */
241 ha->flags.mbox_int = 0;
242 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
244 /* Unlock mbx registers and wait for interrupt */
245 ql_dbg(ql_dbg_mbx, vha, 0x100f,
246 "Going to unlock irq & waiting for interrupts. "
247 "jiffies=%lx.\n", jiffies);
249 /* Wait for mbx cmd completion until timeout */
250 atomic_inc(&ha->num_pend_mbx_stage2);
251 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
252 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
254 if (IS_P3P_TYPE(ha)) {
255 if (RD_REG_DWORD(&reg->isp82.hint) &
256 HINT_MBX_INT_PENDING) {
257 spin_unlock_irqrestore(&ha->hardware_lock,
258 flags);
259 ha->flags.mbox_busy = 0;
260 atomic_dec(&ha->num_pend_mbx_stage2);
261 ql_dbg(ql_dbg_mbx, vha, 0x1010,
262 "Pending mailbox timeout, exiting.\n");
263 rval = QLA_FUNCTION_TIMEOUT;
264 goto premature_exit;
266 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
267 } else if (IS_FWI2_CAPABLE(ha))
268 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
269 else
270 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
271 spin_unlock_irqrestore(&ha->hardware_lock, flags);
273 wait_time = jiffies;
274 atomic_inc(&ha->num_pend_mbx_stage3);
275 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
276 mcp->tov * HZ)) {
277 ql_dbg(ql_dbg_mbx, vha, 0x117a,
278 "cmd=%x Timeout.\n", command);
279 spin_lock_irqsave(&ha->hardware_lock, flags);
280 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
283 } else if (ha->flags.purge_mbox ||
284 chip_reset != ha->chip_reset) {
285 ha->flags.mbox_busy = 0;
286 atomic_dec(&ha->num_pend_mbx_stage2);
287 atomic_dec(&ha->num_pend_mbx_stage3);
288 rval = QLA_ABORTED;
289 goto premature_exit;
291 atomic_dec(&ha->num_pend_mbx_stage3);
293 if (time_after(jiffies, wait_time + 5 * HZ))
294 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
295 command, jiffies_to_msecs(jiffies - wait_time));
296 } else {
297 ql_dbg(ql_dbg_mbx, vha, 0x1011,
298 "Cmd=%x Polling Mode.\n", command);
300 if (IS_P3P_TYPE(ha)) {
301 if (RD_REG_DWORD(&reg->isp82.hint) &
302 HINT_MBX_INT_PENDING) {
303 spin_unlock_irqrestore(&ha->hardware_lock,
304 flags);
305 ha->flags.mbox_busy = 0;
306 atomic_dec(&ha->num_pend_mbx_stage2);
307 ql_dbg(ql_dbg_mbx, vha, 0x1012,
308 "Pending mailbox timeout, exiting.\n");
309 rval = QLA_FUNCTION_TIMEOUT;
310 goto premature_exit;
312 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
313 } else if (IS_FWI2_CAPABLE(ha))
314 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
315 else
316 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
317 spin_unlock_irqrestore(&ha->hardware_lock, flags);
319 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
320 while (!ha->flags.mbox_int) {
321 if (ha->flags.purge_mbox ||
322 chip_reset != ha->chip_reset) {
323 ha->flags.mbox_busy = 0;
324 atomic_dec(&ha->num_pend_mbx_stage2);
325 rval = QLA_ABORTED;
326 goto premature_exit;
329 if (time_after(jiffies, wait_time))
330 break;
333 * Check if it's UNLOADING, cause we cannot poll in
334 * this case, or else a NULL pointer dereference
335 * is triggered.
337 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
338 return QLA_FUNCTION_TIMEOUT;
340 /* Check for pending interrupts. */
341 qla2x00_poll(ha->rsp_q_map[0]);
343 if (!ha->flags.mbox_int &&
344 !(IS_QLA2200(ha) &&
345 command == MBC_LOAD_RISC_RAM_EXTENDED))
346 msleep(10);
347 } /* while */
348 ql_dbg(ql_dbg_mbx, vha, 0x1013,
349 "Waited %d sec.\n",
350 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
352 atomic_dec(&ha->num_pend_mbx_stage2);
354 /* Check whether we timed out */
355 if (ha->flags.mbox_int) {
356 uint16_t *iptr2;
358 ql_dbg(ql_dbg_mbx, vha, 0x1014,
359 "Cmd=%x completed.\n", command);
361 /* Got interrupt. Clear the flag. */
362 ha->flags.mbox_int = 0;
363 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
365 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
366 ha->flags.mbox_busy = 0;
367 /* Setting Link-Down error */
368 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
369 ha->mcp = NULL;
370 rval = QLA_FUNCTION_FAILED;
371 ql_log(ql_log_warn, vha, 0xd048,
372 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
373 goto premature_exit;
376 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
377 rval = QLA_FUNCTION_FAILED;
379 /* Load return mailbox registers. */
380 iptr2 = mcp->mb;
381 iptr = (uint16_t *)&ha->mailbox_out[0];
382 mboxes = mcp->in_mb;
384 ql_dbg(ql_dbg_mbx, vha, 0x1113,
385 "Mailbox registers (IN):\n");
386 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
387 if (mboxes & BIT_0) {
388 *iptr2 = *iptr;
389 ql_dbg(ql_dbg_mbx, vha, 0x1114,
390 "mbox[%d]->0x%04x\n", cnt, *iptr2);
393 mboxes >>= 1;
394 iptr2++;
395 iptr++;
397 } else {
399 uint16_t mb[8];
400 uint32_t ictrl, host_status, hccr;
401 uint16_t w;
403 if (IS_FWI2_CAPABLE(ha)) {
404 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
405 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
406 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
407 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
408 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
409 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
410 host_status = RD_REG_DWORD(&reg->isp24.host_status);
411 hccr = RD_REG_DWORD(&reg->isp24.hccr);
413 ql_log(ql_log_warn, vha, 0xd04c,
414 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
415 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
416 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
417 mb[7], host_status, hccr);
419 } else {
420 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
421 ictrl = RD_REG_WORD(&reg->isp.ictrl);
422 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
423 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
424 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
426 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
428 /* Capture FW dump only, if PCI device active */
429 if (!pci_channel_offline(vha->hw->pdev)) {
430 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
431 if (w == 0xffff || ictrl == 0xffffffff ||
432 (chip_reset != ha->chip_reset)) {
433 /* This is special case if there is unload
434 * of driver happening and if PCI device go
435 * into bad state due to PCI error condition
436 * then only PCI ERR flag would be set.
437 * we will do premature exit for above case.
439 ha->flags.mbox_busy = 0;
440 rval = QLA_FUNCTION_TIMEOUT;
441 goto premature_exit;
444 /* Attempt to capture firmware dump for further
445 * anallysis of the current formware state. we do not
446 * need to do this if we are intentionally generating
447 * a dump
449 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
450 ha->isp_ops->fw_dump(vha, 0);
451 rval = QLA_FUNCTION_TIMEOUT;
455 ha->flags.mbox_busy = 0;
457 /* Clean up */
458 ha->mcp = NULL;
460 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
461 ql_dbg(ql_dbg_mbx, vha, 0x101a,
462 "Checking for additional resp interrupt.\n");
464 /* polling mode for non isp_abort commands. */
465 qla2x00_poll(ha->rsp_q_map[0]);
468 if (rval == QLA_FUNCTION_TIMEOUT &&
469 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
470 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
471 ha->flags.eeh_busy) {
472 /* not in dpc. schedule it for dpc to take over. */
473 ql_dbg(ql_dbg_mbx, vha, 0x101b,
474 "Timeout, schedule isp_abort_needed.\n");
476 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
477 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
478 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
479 if (IS_QLA82XX(ha)) {
480 ql_dbg(ql_dbg_mbx, vha, 0x112a,
481 "disabling pause transmit on port "
482 "0 & 1.\n");
483 qla82xx_wr_32(ha,
484 QLA82XX_CRB_NIU + 0x98,
485 CRB_NIU_XG_PAUSE_CTL_P0|
486 CRB_NIU_XG_PAUSE_CTL_P1);
488 ql_log(ql_log_info, base_vha, 0x101c,
489 "Mailbox cmd timeout occurred, cmd=0x%x, "
490 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
491 "abort.\n", command, mcp->mb[0],
492 ha->flags.eeh_busy);
493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
494 qla2xxx_wake_dpc(vha);
496 } else if (current == ha->dpc_thread) {
497 /* call abort directly since we are in the DPC thread */
498 ql_dbg(ql_dbg_mbx, vha, 0x101d,
499 "Timeout, calling abort_isp.\n");
501 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 if (IS_QLA82XX(ha)) {
505 ql_dbg(ql_dbg_mbx, vha, 0x112b,
506 "disabling pause transmit on port "
507 "0 & 1.\n");
508 qla82xx_wr_32(ha,
509 QLA82XX_CRB_NIU + 0x98,
510 CRB_NIU_XG_PAUSE_CTL_P0|
511 CRB_NIU_XG_PAUSE_CTL_P1);
513 ql_log(ql_log_info, base_vha, 0x101e,
514 "Mailbox cmd timeout occurred, cmd=0x%x, "
515 "mb[0]=0x%x. Scheduling ISP abort ",
516 command, mcp->mb[0]);
517 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
518 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
519 /* Allow next mbx cmd to come in. */
520 complete(&ha->mbx_cmd_comp);
521 if (ha->isp_ops->abort_isp(vha)) {
522 /* Failed. retry later. */
523 set_bit(ISP_ABORT_NEEDED,
524 &vha->dpc_flags);
526 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
527 ql_dbg(ql_dbg_mbx, vha, 0x101f,
528 "Finished abort_isp.\n");
529 goto mbx_done;
534 premature_exit:
535 /* Allow next mbx cmd to come in. */
536 complete(&ha->mbx_cmd_comp);
538 mbx_done:
539 if (rval == QLA_ABORTED) {
540 ql_log(ql_log_info, vha, 0xd035,
541 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
542 mcp->mb[0]);
543 } else if (rval) {
544 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
545 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
546 dev_name(&ha->pdev->dev), 0x1020+0x800,
547 vha->host_no);
548 mboxes = mcp->in_mb;
549 cnt = 4;
550 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
551 if (mboxes & BIT_0) {
552 printk(" mb[%u]=%x", i, mcp->mb[i]);
553 cnt--;
555 pr_warn(" cmd=%x ****\n", command);
557 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
558 ql_dbg(ql_dbg_mbx, vha, 0x1198,
559 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
560 RD_REG_DWORD(&reg->isp24.host_status),
561 RD_REG_DWORD(&reg->isp24.ictrl),
562 RD_REG_DWORD(&reg->isp24.istatus));
563 } else {
564 ql_dbg(ql_dbg_mbx, vha, 0x1206,
565 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
566 RD_REG_WORD(&reg->isp.ctrl_status),
567 RD_REG_WORD(&reg->isp.ictrl),
568 RD_REG_WORD(&reg->isp.istatus));
570 } else {
571 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
574 return rval;
578 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
579 uint32_t risc_code_size)
581 int rval;
582 struct qla_hw_data *ha = vha->hw;
583 mbx_cmd_t mc;
584 mbx_cmd_t *mcp = &mc;
586 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
587 "Entered %s.\n", __func__);
589 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
590 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
591 mcp->mb[8] = MSW(risc_addr);
592 mcp->out_mb = MBX_8|MBX_0;
593 } else {
594 mcp->mb[0] = MBC_LOAD_RISC_RAM;
595 mcp->out_mb = MBX_0;
597 mcp->mb[1] = LSW(risc_addr);
598 mcp->mb[2] = MSW(req_dma);
599 mcp->mb[3] = LSW(req_dma);
600 mcp->mb[6] = MSW(MSD(req_dma));
601 mcp->mb[7] = LSW(MSD(req_dma));
602 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
603 if (IS_FWI2_CAPABLE(ha)) {
604 mcp->mb[4] = MSW(risc_code_size);
605 mcp->mb[5] = LSW(risc_code_size);
606 mcp->out_mb |= MBX_5|MBX_4;
607 } else {
608 mcp->mb[4] = LSW(risc_code_size);
609 mcp->out_mb |= MBX_4;
612 mcp->in_mb = MBX_0;
613 mcp->tov = MBX_TOV_SECONDS;
614 mcp->flags = 0;
615 rval = qla2x00_mailbox_command(vha, mcp);
617 if (rval != QLA_SUCCESS) {
618 ql_dbg(ql_dbg_mbx, vha, 0x1023,
619 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
620 } else {
621 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
622 "Done %s.\n", __func__);
625 return rval;
628 #define EXTENDED_BB_CREDITS BIT_0
629 #define NVME_ENABLE_FLAG BIT_3
630 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
632 uint16_t mb4 = BIT_0;
634 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
635 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
637 return mb4;
640 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
642 uint16_t mb4 = BIT_0;
644 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
645 struct nvram_81xx *nv = ha->nvram;
647 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
650 return mb4;
654 * qla2x00_execute_fw
655 * Start adapter firmware.
657 * Input:
658 * ha = adapter block pointer.
659 * TARGET_QUEUE_LOCK must be released.
660 * ADAPTER_STATE_LOCK must be released.
662 * Returns:
663 * qla2x00 local function return status code.
665 * Context:
666 * Kernel context.
669 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
671 int rval;
672 struct qla_hw_data *ha = vha->hw;
673 mbx_cmd_t mc;
674 mbx_cmd_t *mcp = &mc;
676 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
677 "Entered %s.\n", __func__);
679 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
680 mcp->out_mb = MBX_0;
681 mcp->in_mb = MBX_0;
682 if (IS_FWI2_CAPABLE(ha)) {
683 mcp->mb[1] = MSW(risc_addr);
684 mcp->mb[2] = LSW(risc_addr);
685 mcp->mb[3] = 0;
686 mcp->mb[4] = 0;
687 mcp->mb[11] = 0;
688 ha->flags.using_lr_setting = 0;
689 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
690 IS_QLA27XX(ha)) {
691 if (ql2xautodetectsfp) {
692 if (ha->flags.detected_lr_sfp) {
693 mcp->mb[4] |=
694 qla25xx_set_sfp_lr_dist(ha);
695 ha->flags.using_lr_setting = 1;
697 } else {
698 struct nvram_81xx *nv = ha->nvram;
699 /* set LR distance if specified in nvram */
700 if (nv->enhanced_features &
701 NEF_LR_DIST_ENABLE) {
702 mcp->mb[4] |=
703 qla25xx_set_nvr_lr_dist(ha);
704 ha->flags.using_lr_setting = 1;
709 if (ql2xnvmeenable && IS_QLA27XX(ha))
710 mcp->mb[4] |= NVME_ENABLE_FLAG;
712 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
713 struct nvram_81xx *nv = ha->nvram;
714 /* set minimum speed if specified in nvram */
715 if (nv->min_link_speed >= 2 &&
716 nv->min_link_speed <= 5) {
717 mcp->mb[4] |= BIT_4;
718 mcp->mb[11] = nv->min_link_speed;
719 mcp->out_mb |= MBX_11;
720 mcp->in_mb |= BIT_5;
721 vha->min_link_speed_feat = nv->min_link_speed;
725 if (ha->flags.exlogins_enabled)
726 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
728 if (ha->flags.exchoffld_enabled)
729 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
731 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
732 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
733 } else {
734 mcp->mb[1] = LSW(risc_addr);
735 mcp->out_mb |= MBX_1;
736 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
737 mcp->mb[2] = 0;
738 mcp->out_mb |= MBX_2;
742 mcp->tov = MBX_TOV_SECONDS;
743 mcp->flags = 0;
744 rval = qla2x00_mailbox_command(vha, mcp);
746 if (rval != QLA_SUCCESS) {
747 ql_dbg(ql_dbg_mbx, vha, 0x1026,
748 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
749 } else {
750 if (IS_FWI2_CAPABLE(ha)) {
751 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
752 ql_dbg(ql_dbg_mbx, vha, 0x119a,
753 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
754 ql_dbg(ql_dbg_mbx, vha, 0x1027,
755 "exchanges=%x.\n", mcp->mb[1]);
756 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
757 ha->max_speed_sup = mcp->mb[2] & BIT_0;
758 ql_dbg(ql_dbg_mbx, vha, 0x119b,
759 "Maximum speed supported=%s.\n",
760 ha->max_speed_sup ? "32Gps" : "16Gps");
761 if (vha->min_link_speed_feat) {
762 ha->min_link_speed = mcp->mb[5];
763 ql_dbg(ql_dbg_mbx, vha, 0x119c,
764 "Minimum speed set=%s.\n",
765 mcp->mb[5] == 5 ? "32Gps" :
766 mcp->mb[5] == 4 ? "16Gps" :
767 mcp->mb[5] == 3 ? "8Gps" :
768 mcp->mb[5] == 2 ? "4Gps" :
769 "unknown");
773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
774 "Done.\n");
777 return rval;
781 * qla_get_exlogin_status
782 * Get extended login status
783 * uses the memory offload control/status Mailbox
785 * Input:
786 * ha: adapter state pointer.
787 * fwopt: firmware options
789 * Returns:
790 * qla2x00 local function status
792 * Context:
793 * Kernel context.
795 #define FETCH_XLOGINS_STAT 0x8
797 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
798 uint16_t *ex_logins_cnt)
800 int rval;
801 mbx_cmd_t mc;
802 mbx_cmd_t *mcp = &mc;
804 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
805 "Entered %s\n", __func__);
807 memset(mcp->mb, 0 , sizeof(mcp->mb));
808 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
809 mcp->mb[1] = FETCH_XLOGINS_STAT;
810 mcp->out_mb = MBX_1|MBX_0;
811 mcp->in_mb = MBX_10|MBX_4|MBX_0;
812 mcp->tov = MBX_TOV_SECONDS;
813 mcp->flags = 0;
815 rval = qla2x00_mailbox_command(vha, mcp);
816 if (rval != QLA_SUCCESS) {
817 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
818 } else {
819 *buf_sz = mcp->mb[4];
820 *ex_logins_cnt = mcp->mb[10];
822 ql_log(ql_log_info, vha, 0x1190,
823 "buffer size 0x%x, exchange login count=%d\n",
824 mcp->mb[4], mcp->mb[10]);
826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
827 "Done %s.\n", __func__);
830 return rval;
834 * qla_set_exlogin_mem_cfg
835 * set extended login memory configuration
836 * Mbx needs to be issues before init_cb is set
838 * Input:
839 * ha: adapter state pointer.
840 * buffer: buffer pointer
841 * phys_addr: physical address of buffer
842 * size: size of buffer
843 * TARGET_QUEUE_LOCK must be released
844 * ADAPTER_STATE_LOCK must be release
846 * Returns:
847 * qla2x00 local funxtion status code.
849 * Context:
850 * Kernel context.
852 #define CONFIG_XLOGINS_MEM 0x3
854 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
856 int rval;
857 mbx_cmd_t mc;
858 mbx_cmd_t *mcp = &mc;
859 struct qla_hw_data *ha = vha->hw;
861 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
862 "Entered %s.\n", __func__);
864 memset(mcp->mb, 0 , sizeof(mcp->mb));
865 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
866 mcp->mb[1] = CONFIG_XLOGINS_MEM;
867 mcp->mb[2] = MSW(phys_addr);
868 mcp->mb[3] = LSW(phys_addr);
869 mcp->mb[6] = MSW(MSD(phys_addr));
870 mcp->mb[7] = LSW(MSD(phys_addr));
871 mcp->mb[8] = MSW(ha->exlogin_size);
872 mcp->mb[9] = LSW(ha->exlogin_size);
873 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
874 mcp->in_mb = MBX_11|MBX_0;
875 mcp->tov = MBX_TOV_SECONDS;
876 mcp->flags = 0;
877 rval = qla2x00_mailbox_command(vha, mcp);
878 if (rval != QLA_SUCCESS) {
879 /*EMPTY*/
880 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
881 } else {
882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
883 "Done %s.\n", __func__);
886 return rval;
890 * qla_get_exchoffld_status
891 * Get exchange offload status
892 * uses the memory offload control/status Mailbox
894 * Input:
895 * ha: adapter state pointer.
896 * fwopt: firmware options
898 * Returns:
899 * qla2x00 local function status
901 * Context:
902 * Kernel context.
904 #define FETCH_XCHOFFLD_STAT 0x2
906 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
907 uint16_t *ex_logins_cnt)
909 int rval;
910 mbx_cmd_t mc;
911 mbx_cmd_t *mcp = &mc;
913 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
914 "Entered %s\n", __func__);
916 memset(mcp->mb, 0 , sizeof(mcp->mb));
917 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
918 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
919 mcp->out_mb = MBX_1|MBX_0;
920 mcp->in_mb = MBX_10|MBX_4|MBX_0;
921 mcp->tov = MBX_TOV_SECONDS;
922 mcp->flags = 0;
924 rval = qla2x00_mailbox_command(vha, mcp);
925 if (rval != QLA_SUCCESS) {
926 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
927 } else {
928 *buf_sz = mcp->mb[4];
929 *ex_logins_cnt = mcp->mb[10];
931 ql_log(ql_log_info, vha, 0x118e,
932 "buffer size 0x%x, exchange offload count=%d\n",
933 mcp->mb[4], mcp->mb[10]);
935 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
936 "Done %s.\n", __func__);
939 return rval;
943 * qla_set_exchoffld_mem_cfg
944 * Set exchange offload memory configuration
945 * Mbx needs to be issues before init_cb is set
947 * Input:
948 * ha: adapter state pointer.
949 * buffer: buffer pointer
950 * phys_addr: physical address of buffer
951 * size: size of buffer
952 * TARGET_QUEUE_LOCK must be released
953 * ADAPTER_STATE_LOCK must be release
955 * Returns:
956 * qla2x00 local funxtion status code.
958 * Context:
959 * Kernel context.
961 #define CONFIG_XCHOFFLD_MEM 0x3
963 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
965 int rval;
966 mbx_cmd_t mc;
967 mbx_cmd_t *mcp = &mc;
968 struct qla_hw_data *ha = vha->hw;
970 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
971 "Entered %s.\n", __func__);
973 memset(mcp->mb, 0 , sizeof(mcp->mb));
974 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
975 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
976 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
977 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
978 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
979 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
980 mcp->mb[8] = MSW(ha->exchoffld_size);
981 mcp->mb[9] = LSW(ha->exchoffld_size);
982 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
983 mcp->in_mb = MBX_11|MBX_0;
984 mcp->tov = MBX_TOV_SECONDS;
985 mcp->flags = 0;
986 rval = qla2x00_mailbox_command(vha, mcp);
987 if (rval != QLA_SUCCESS) {
988 /*EMPTY*/
989 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
990 } else {
991 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
992 "Done %s.\n", __func__);
995 return rval;
999 * qla2x00_get_fw_version
1000 * Get firmware version.
1002 * Input:
1003 * ha: adapter state pointer.
1004 * major: pointer for major number.
1005 * minor: pointer for minor number.
1006 * subminor: pointer for subminor number.
1008 * Returns:
1009 * qla2x00 local function return status code.
1011 * Context:
1012 * Kernel context.
1015 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1017 int rval;
1018 mbx_cmd_t mc;
1019 mbx_cmd_t *mcp = &mc;
1020 struct qla_hw_data *ha = vha->hw;
1022 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1023 "Entered %s.\n", __func__);
1025 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1026 mcp->out_mb = MBX_0;
1027 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1028 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1029 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1030 if (IS_FWI2_CAPABLE(ha))
1031 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1032 if (IS_QLA27XX(ha))
1033 mcp->in_mb |=
1034 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1035 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
1037 mcp->flags = 0;
1038 mcp->tov = MBX_TOV_SECONDS;
1039 rval = qla2x00_mailbox_command(vha, mcp);
1040 if (rval != QLA_SUCCESS)
1041 goto failed;
1043 /* Return mailbox data. */
1044 ha->fw_major_version = mcp->mb[1];
1045 ha->fw_minor_version = mcp->mb[2];
1046 ha->fw_subminor_version = mcp->mb[3];
1047 ha->fw_attributes = mcp->mb[6];
1048 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1049 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1050 else
1051 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1053 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1054 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1055 ha->mpi_version[1] = mcp->mb[11] >> 8;
1056 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1057 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1058 ha->phy_version[0] = mcp->mb[8] & 0xff;
1059 ha->phy_version[1] = mcp->mb[9] >> 8;
1060 ha->phy_version[2] = mcp->mb[9] & 0xff;
1063 if (IS_FWI2_CAPABLE(ha)) {
1064 ha->fw_attributes_h = mcp->mb[15];
1065 ha->fw_attributes_ext[0] = mcp->mb[16];
1066 ha->fw_attributes_ext[1] = mcp->mb[17];
1067 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1068 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1069 __func__, mcp->mb[15], mcp->mb[6]);
1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1071 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1072 __func__, mcp->mb[17], mcp->mb[16]);
1074 if (ha->fw_attributes_h & 0x4)
1075 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1076 "%s: Firmware supports Extended Login 0x%x\n",
1077 __func__, ha->fw_attributes_h);
1079 if (ha->fw_attributes_h & 0x8)
1080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1081 "%s: Firmware supports Exchange Offload 0x%x\n",
1082 __func__, ha->fw_attributes_h);
1085 * FW supports nvme and driver load parameter requested nvme.
1086 * BIT 26 of fw_attributes indicates NVMe support.
1088 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
1089 vha->flags.nvme_enabled = 1;
1090 ql_log(ql_log_info, vha, 0xd302,
1091 "%s: FC-NVMe is Enabled (0x%x)\n",
1092 __func__, ha->fw_attributes_h);
1096 if (IS_QLA27XX(ha)) {
1097 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1098 ha->mpi_version[1] = mcp->mb[11] >> 8;
1099 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1100 ha->pep_version[0] = mcp->mb[13] & 0xff;
1101 ha->pep_version[1] = mcp->mb[14] >> 8;
1102 ha->pep_version[2] = mcp->mb[14] & 0xff;
1103 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1104 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1105 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1106 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1109 failed:
1110 if (rval != QLA_SUCCESS) {
1111 /*EMPTY*/
1112 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1113 } else {
1114 /*EMPTY*/
1115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1116 "Done %s.\n", __func__);
1118 return rval;
1122 * qla2x00_get_fw_options
1123 * Set firmware options.
1125 * Input:
1126 * ha = adapter block pointer.
1127 * fwopt = pointer for firmware options.
1129 * Returns:
1130 * qla2x00 local function return status code.
1132 * Context:
1133 * Kernel context.
1136 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1138 int rval;
1139 mbx_cmd_t mc;
1140 mbx_cmd_t *mcp = &mc;
1142 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1143 "Entered %s.\n", __func__);
1145 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1146 mcp->out_mb = MBX_0;
1147 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1148 mcp->tov = MBX_TOV_SECONDS;
1149 mcp->flags = 0;
1150 rval = qla2x00_mailbox_command(vha, mcp);
1152 if (rval != QLA_SUCCESS) {
1153 /*EMPTY*/
1154 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1155 } else {
1156 fwopts[0] = mcp->mb[0];
1157 fwopts[1] = mcp->mb[1];
1158 fwopts[2] = mcp->mb[2];
1159 fwopts[3] = mcp->mb[3];
1161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1162 "Done %s.\n", __func__);
1165 return rval;
1170 * qla2x00_set_fw_options
1171 * Set firmware options.
1173 * Input:
1174 * ha = adapter block pointer.
1175 * fwopt = pointer for firmware options.
1177 * Returns:
1178 * qla2x00 local function return status code.
1180 * Context:
1181 * Kernel context.
1184 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1186 int rval;
1187 mbx_cmd_t mc;
1188 mbx_cmd_t *mcp = &mc;
1190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1191 "Entered %s.\n", __func__);
1193 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1194 mcp->mb[1] = fwopts[1];
1195 mcp->mb[2] = fwopts[2];
1196 mcp->mb[3] = fwopts[3];
1197 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1198 mcp->in_mb = MBX_0;
1199 if (IS_FWI2_CAPABLE(vha->hw)) {
1200 mcp->in_mb |= MBX_1;
1201 mcp->mb[10] = fwopts[10];
1202 mcp->out_mb |= MBX_10;
1203 } else {
1204 mcp->mb[10] = fwopts[10];
1205 mcp->mb[11] = fwopts[11];
1206 mcp->mb[12] = 0; /* Undocumented, but used */
1207 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1209 mcp->tov = MBX_TOV_SECONDS;
1210 mcp->flags = 0;
1211 rval = qla2x00_mailbox_command(vha, mcp);
1213 fwopts[0] = mcp->mb[0];
1215 if (rval != QLA_SUCCESS) {
1216 /*EMPTY*/
1217 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1218 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1219 } else {
1220 /*EMPTY*/
1221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1222 "Done %s.\n", __func__);
1225 return rval;
1229 * qla2x00_mbx_reg_test
1230 * Mailbox register wrap test.
1232 * Input:
1233 * ha = adapter block pointer.
1234 * TARGET_QUEUE_LOCK must be released.
1235 * ADAPTER_STATE_LOCK must be released.
1237 * Returns:
1238 * qla2x00 local function return status code.
1240 * Context:
1241 * Kernel context.
1244 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1246 int rval;
1247 mbx_cmd_t mc;
1248 mbx_cmd_t *mcp = &mc;
1250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1251 "Entered %s.\n", __func__);
1253 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1254 mcp->mb[1] = 0xAAAA;
1255 mcp->mb[2] = 0x5555;
1256 mcp->mb[3] = 0xAA55;
1257 mcp->mb[4] = 0x55AA;
1258 mcp->mb[5] = 0xA5A5;
1259 mcp->mb[6] = 0x5A5A;
1260 mcp->mb[7] = 0x2525;
1261 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1262 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1263 mcp->tov = MBX_TOV_SECONDS;
1264 mcp->flags = 0;
1265 rval = qla2x00_mailbox_command(vha, mcp);
1267 if (rval == QLA_SUCCESS) {
1268 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1269 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1270 rval = QLA_FUNCTION_FAILED;
1271 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1272 mcp->mb[7] != 0x2525)
1273 rval = QLA_FUNCTION_FAILED;
1276 if (rval != QLA_SUCCESS) {
1277 /*EMPTY*/
1278 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1279 } else {
1280 /*EMPTY*/
1281 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1282 "Done %s.\n", __func__);
1285 return rval;
1289 * qla2x00_verify_checksum
1290 * Verify firmware checksum.
1292 * Input:
1293 * ha = adapter block pointer.
1294 * TARGET_QUEUE_LOCK must be released.
1295 * ADAPTER_STATE_LOCK must be released.
1297 * Returns:
1298 * qla2x00 local function return status code.
1300 * Context:
1301 * Kernel context.
1304 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1306 int rval;
1307 mbx_cmd_t mc;
1308 mbx_cmd_t *mcp = &mc;
1310 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1311 "Entered %s.\n", __func__);
1313 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1314 mcp->out_mb = MBX_0;
1315 mcp->in_mb = MBX_0;
1316 if (IS_FWI2_CAPABLE(vha->hw)) {
1317 mcp->mb[1] = MSW(risc_addr);
1318 mcp->mb[2] = LSW(risc_addr);
1319 mcp->out_mb |= MBX_2|MBX_1;
1320 mcp->in_mb |= MBX_2|MBX_1;
1321 } else {
1322 mcp->mb[1] = LSW(risc_addr);
1323 mcp->out_mb |= MBX_1;
1324 mcp->in_mb |= MBX_1;
1327 mcp->tov = MBX_TOV_SECONDS;
1328 mcp->flags = 0;
1329 rval = qla2x00_mailbox_command(vha, mcp);
1331 if (rval != QLA_SUCCESS) {
1332 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1333 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1334 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1335 } else {
1336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1337 "Done %s.\n", __func__);
1340 return rval;
1344 * qla2x00_issue_iocb
1345 * Issue IOCB using mailbox command
1347 * Input:
1348 * ha = adapter state pointer.
1349 * buffer = buffer pointer.
1350 * phys_addr = physical address of buffer.
1351 * size = size of buffer.
1352 * TARGET_QUEUE_LOCK must be released.
1353 * ADAPTER_STATE_LOCK must be released.
1355 * Returns:
1356 * qla2x00 local function return status code.
1358 * Context:
1359 * Kernel context.
1362 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1363 dma_addr_t phys_addr, size_t size, uint32_t tov)
1365 int rval;
1366 mbx_cmd_t mc;
1367 mbx_cmd_t *mcp = &mc;
1369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1370 "Entered %s.\n", __func__);
1372 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1373 mcp->mb[1] = 0;
1374 mcp->mb[2] = MSW(phys_addr);
1375 mcp->mb[3] = LSW(phys_addr);
1376 mcp->mb[6] = MSW(MSD(phys_addr));
1377 mcp->mb[7] = LSW(MSD(phys_addr));
1378 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1379 mcp->in_mb = MBX_2|MBX_0;
1380 mcp->tov = tov;
1381 mcp->flags = 0;
1382 rval = qla2x00_mailbox_command(vha, mcp);
1384 if (rval != QLA_SUCCESS) {
1385 /*EMPTY*/
1386 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1387 } else {
1388 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1390 /* Mask reserved bits. */
1391 sts_entry->entry_status &=
1392 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1393 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1394 "Done %s.\n", __func__);
1397 return rval;
1401 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1402 size_t size)
1404 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1405 MBX_TOV_SECONDS);
1409 * qla2x00_abort_command
1410 * Abort command aborts a specified IOCB.
1412 * Input:
1413 * ha = adapter block pointer.
1414 * sp = SB structure pointer.
1416 * Returns:
1417 * qla2x00 local function return status code.
1419 * Context:
1420 * Kernel context.
1423 qla2x00_abort_command(srb_t *sp)
1425 unsigned long flags = 0;
1426 int rval;
1427 uint32_t handle = 0;
1428 mbx_cmd_t mc;
1429 mbx_cmd_t *mcp = &mc;
1430 fc_port_t *fcport = sp->fcport;
1431 scsi_qla_host_t *vha = fcport->vha;
1432 struct qla_hw_data *ha = vha->hw;
1433 struct req_que *req;
1434 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1437 "Entered %s.\n", __func__);
1439 if (vha->flags.qpairs_available && sp->qpair)
1440 req = sp->qpair->req;
1441 else
1442 req = vha->req;
1444 spin_lock_irqsave(&ha->hardware_lock, flags);
1445 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1446 if (req->outstanding_cmds[handle] == sp)
1447 break;
1449 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1451 if (handle == req->num_outstanding_cmds) {
1452 /* command not found */
1453 return QLA_FUNCTION_FAILED;
1456 mcp->mb[0] = MBC_ABORT_COMMAND;
1457 if (HAS_EXTENDED_IDS(ha))
1458 mcp->mb[1] = fcport->loop_id;
1459 else
1460 mcp->mb[1] = fcport->loop_id << 8;
1461 mcp->mb[2] = (uint16_t)handle;
1462 mcp->mb[3] = (uint16_t)(handle >> 16);
1463 mcp->mb[6] = (uint16_t)cmd->device->lun;
1464 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1465 mcp->in_mb = MBX_0;
1466 mcp->tov = MBX_TOV_SECONDS;
1467 mcp->flags = 0;
1468 rval = qla2x00_mailbox_command(vha, mcp);
1470 if (rval != QLA_SUCCESS) {
1471 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1472 } else {
1473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1474 "Done %s.\n", __func__);
1477 return rval;
1481 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1483 int rval, rval2;
1484 mbx_cmd_t mc;
1485 mbx_cmd_t *mcp = &mc;
1486 scsi_qla_host_t *vha;
1487 struct req_que *req;
1488 struct rsp_que *rsp;
1490 l = l;
1491 vha = fcport->vha;
1493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1494 "Entered %s.\n", __func__);
1496 req = vha->hw->req_q_map[0];
1497 rsp = req->rsp;
1498 mcp->mb[0] = MBC_ABORT_TARGET;
1499 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1500 if (HAS_EXTENDED_IDS(vha->hw)) {
1501 mcp->mb[1] = fcport->loop_id;
1502 mcp->mb[10] = 0;
1503 mcp->out_mb |= MBX_10;
1504 } else {
1505 mcp->mb[1] = fcport->loop_id << 8;
1507 mcp->mb[2] = vha->hw->loop_reset_delay;
1508 mcp->mb[9] = vha->vp_idx;
1510 mcp->in_mb = MBX_0;
1511 mcp->tov = MBX_TOV_SECONDS;
1512 mcp->flags = 0;
1513 rval = qla2x00_mailbox_command(vha, mcp);
1514 if (rval != QLA_SUCCESS) {
1515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1516 "Failed=%x.\n", rval);
1519 /* Issue marker IOCB. */
1520 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1521 MK_SYNC_ID);
1522 if (rval2 != QLA_SUCCESS) {
1523 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1524 "Failed to issue marker IOCB (%x).\n", rval2);
1525 } else {
1526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1527 "Done %s.\n", __func__);
1530 return rval;
1534 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1536 int rval, rval2;
1537 mbx_cmd_t mc;
1538 mbx_cmd_t *mcp = &mc;
1539 scsi_qla_host_t *vha;
1540 struct req_que *req;
1541 struct rsp_que *rsp;
1543 vha = fcport->vha;
1545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1546 "Entered %s.\n", __func__);
1548 req = vha->hw->req_q_map[0];
1549 rsp = req->rsp;
1550 mcp->mb[0] = MBC_LUN_RESET;
1551 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1552 if (HAS_EXTENDED_IDS(vha->hw))
1553 mcp->mb[1] = fcport->loop_id;
1554 else
1555 mcp->mb[1] = fcport->loop_id << 8;
1556 mcp->mb[2] = (u32)l;
1557 mcp->mb[3] = 0;
1558 mcp->mb[9] = vha->vp_idx;
1560 mcp->in_mb = MBX_0;
1561 mcp->tov = MBX_TOV_SECONDS;
1562 mcp->flags = 0;
1563 rval = qla2x00_mailbox_command(vha, mcp);
1564 if (rval != QLA_SUCCESS) {
1565 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1568 /* Issue marker IOCB. */
1569 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1570 MK_SYNC_ID_LUN);
1571 if (rval2 != QLA_SUCCESS) {
1572 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1573 "Failed to issue marker IOCB (%x).\n", rval2);
1574 } else {
1575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1576 "Done %s.\n", __func__);
1579 return rval;
1583 * qla2x00_get_adapter_id
1584 * Get adapter ID and topology.
1586 * Input:
1587 * ha = adapter block pointer.
1588 * id = pointer for loop ID.
1589 * al_pa = pointer for AL_PA.
1590 * area = pointer for area.
1591 * domain = pointer for domain.
1592 * top = pointer for topology.
1593 * TARGET_QUEUE_LOCK must be released.
1594 * ADAPTER_STATE_LOCK must be released.
1596 * Returns:
1597 * qla2x00 local function return status code.
1599 * Context:
1600 * Kernel context.
1603 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1604 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1606 int rval;
1607 mbx_cmd_t mc;
1608 mbx_cmd_t *mcp = &mc;
1610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1611 "Entered %s.\n", __func__);
1613 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1614 mcp->mb[9] = vha->vp_idx;
1615 mcp->out_mb = MBX_9|MBX_0;
1616 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1617 if (IS_CNA_CAPABLE(vha->hw))
1618 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1619 if (IS_FWI2_CAPABLE(vha->hw))
1620 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1621 if (IS_QLA27XX(vha->hw))
1622 mcp->in_mb |= MBX_15;
1623 mcp->tov = MBX_TOV_SECONDS;
1624 mcp->flags = 0;
1625 rval = qla2x00_mailbox_command(vha, mcp);
1626 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1627 rval = QLA_COMMAND_ERROR;
1628 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1629 rval = QLA_INVALID_COMMAND;
1631 /* Return data. */
1632 *id = mcp->mb[1];
1633 *al_pa = LSB(mcp->mb[2]);
1634 *area = MSB(mcp->mb[2]);
1635 *domain = LSB(mcp->mb[3]);
1636 *top = mcp->mb[6];
1637 *sw_cap = mcp->mb[7];
1639 if (rval != QLA_SUCCESS) {
1640 /*EMPTY*/
1641 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1642 } else {
1643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1644 "Done %s.\n", __func__);
1646 if (IS_CNA_CAPABLE(vha->hw)) {
1647 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1648 vha->fcoe_fcf_idx = mcp->mb[10];
1649 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1650 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1651 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1652 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1653 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1654 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1656 /* If FA-WWN supported */
1657 if (IS_FAWWN_CAPABLE(vha->hw)) {
1658 if (mcp->mb[7] & BIT_14) {
1659 vha->port_name[0] = MSB(mcp->mb[16]);
1660 vha->port_name[1] = LSB(mcp->mb[16]);
1661 vha->port_name[2] = MSB(mcp->mb[17]);
1662 vha->port_name[3] = LSB(mcp->mb[17]);
1663 vha->port_name[4] = MSB(mcp->mb[18]);
1664 vha->port_name[5] = LSB(mcp->mb[18]);
1665 vha->port_name[6] = MSB(mcp->mb[19]);
1666 vha->port_name[7] = LSB(mcp->mb[19]);
1667 fc_host_port_name(vha->host) =
1668 wwn_to_u64(vha->port_name);
1669 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1670 "FA-WWN acquired %016llx\n",
1671 wwn_to_u64(vha->port_name));
1675 if (IS_QLA27XX(vha->hw))
1676 vha->bbcr = mcp->mb[15];
1679 return rval;
1683 * qla2x00_get_retry_cnt
1684 * Get current firmware login retry count and delay.
1686 * Input:
1687 * ha = adapter block pointer.
1688 * retry_cnt = pointer to login retry count.
1689 * tov = pointer to login timeout value.
1691 * Returns:
1692 * qla2x00 local function return status code.
1694 * Context:
1695 * Kernel context.
1698 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1699 uint16_t *r_a_tov)
1701 int rval;
1702 uint16_t ratov;
1703 mbx_cmd_t mc;
1704 mbx_cmd_t *mcp = &mc;
1706 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1707 "Entered %s.\n", __func__);
1709 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1710 mcp->out_mb = MBX_0;
1711 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1712 mcp->tov = MBX_TOV_SECONDS;
1713 mcp->flags = 0;
1714 rval = qla2x00_mailbox_command(vha, mcp);
1716 if (rval != QLA_SUCCESS) {
1717 /*EMPTY*/
1718 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1719 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1720 } else {
1721 /* Convert returned data and check our values. */
1722 *r_a_tov = mcp->mb[3] / 2;
1723 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1724 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1725 /* Update to the larger values */
1726 *retry_cnt = (uint8_t)mcp->mb[1];
1727 *tov = ratov;
1730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1731 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1734 return rval;
1738 * qla2x00_init_firmware
1739 * Initialize adapter firmware.
1741 * Input:
1742 * ha = adapter block pointer.
1743 * dptr = Initialization control block pointer.
1744 * size = size of initialization control block.
1745 * TARGET_QUEUE_LOCK must be released.
1746 * ADAPTER_STATE_LOCK must be released.
1748 * Returns:
1749 * qla2x00 local function return status code.
1751 * Context:
1752 * Kernel context.
1755 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1757 int rval;
1758 mbx_cmd_t mc;
1759 mbx_cmd_t *mcp = &mc;
1760 struct qla_hw_data *ha = vha->hw;
1762 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1763 "Entered %s.\n", __func__);
1765 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1766 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1767 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1769 if (ha->flags.npiv_supported)
1770 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1771 else
1772 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1774 mcp->mb[1] = 0;
1775 mcp->mb[2] = MSW(ha->init_cb_dma);
1776 mcp->mb[3] = LSW(ha->init_cb_dma);
1777 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1778 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1779 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1780 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1781 mcp->mb[1] = BIT_0;
1782 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1783 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1784 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1785 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1786 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1787 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1789 /* 1 and 2 should normally be captured. */
1790 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1791 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1792 /* mb3 is additional info about the installed SFP. */
1793 mcp->in_mb |= MBX_3;
1794 mcp->buf_size = size;
1795 mcp->flags = MBX_DMA_OUT;
1796 mcp->tov = MBX_TOV_SECONDS;
1797 rval = qla2x00_mailbox_command(vha, mcp);
1799 if (rval != QLA_SUCCESS) {
1800 /*EMPTY*/
1801 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1802 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1803 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1804 } else {
1805 if (IS_QLA27XX(ha)) {
1806 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1807 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1808 "Invalid SFP/Validation Failed\n");
1810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1811 "Done %s.\n", __func__);
1814 return rval;
1819 * qla2x00_get_port_database
1820 * Issue normal/enhanced get port database mailbox command
1821 * and copy device name as necessary.
1823 * Input:
1824 * ha = adapter state pointer.
1825 * dev = structure pointer.
1826 * opt = enhanced cmd option byte.
1828 * Returns:
1829 * qla2x00 local function return status code.
1831 * Context:
1832 * Kernel context.
1835 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1837 int rval;
1838 mbx_cmd_t mc;
1839 mbx_cmd_t *mcp = &mc;
1840 port_database_t *pd;
1841 struct port_database_24xx *pd24;
1842 dma_addr_t pd_dma;
1843 struct qla_hw_data *ha = vha->hw;
1845 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1846 "Entered %s.\n", __func__);
1848 pd24 = NULL;
1849 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1850 if (pd == NULL) {
1851 ql_log(ql_log_warn, vha, 0x1050,
1852 "Failed to allocate port database structure.\n");
1853 fcport->query = 0;
1854 return QLA_MEMORY_ALLOC_FAILED;
1857 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1858 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1859 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1860 mcp->mb[2] = MSW(pd_dma);
1861 mcp->mb[3] = LSW(pd_dma);
1862 mcp->mb[6] = MSW(MSD(pd_dma));
1863 mcp->mb[7] = LSW(MSD(pd_dma));
1864 mcp->mb[9] = vha->vp_idx;
1865 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1866 mcp->in_mb = MBX_0;
1867 if (IS_FWI2_CAPABLE(ha)) {
1868 mcp->mb[1] = fcport->loop_id;
1869 mcp->mb[10] = opt;
1870 mcp->out_mb |= MBX_10|MBX_1;
1871 mcp->in_mb |= MBX_1;
1872 } else if (HAS_EXTENDED_IDS(ha)) {
1873 mcp->mb[1] = fcport->loop_id;
1874 mcp->mb[10] = opt;
1875 mcp->out_mb |= MBX_10|MBX_1;
1876 } else {
1877 mcp->mb[1] = fcport->loop_id << 8 | opt;
1878 mcp->out_mb |= MBX_1;
1880 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1881 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1882 mcp->flags = MBX_DMA_IN;
1883 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1884 rval = qla2x00_mailbox_command(vha, mcp);
1885 if (rval != QLA_SUCCESS)
1886 goto gpd_error_out;
1888 if (IS_FWI2_CAPABLE(ha)) {
1889 uint64_t zero = 0;
1890 u8 current_login_state, last_login_state;
1892 pd24 = (struct port_database_24xx *) pd;
1894 /* Check for logged in state. */
1895 if (fcport->fc4f_nvme) {
1896 current_login_state = pd24->current_login_state >> 4;
1897 last_login_state = pd24->last_login_state >> 4;
1898 } else {
1899 current_login_state = pd24->current_login_state & 0xf;
1900 last_login_state = pd24->last_login_state & 0xf;
1902 fcport->current_login_state = pd24->current_login_state;
1903 fcport->last_login_state = pd24->last_login_state;
1905 /* Check for logged in state. */
1906 if (current_login_state != PDS_PRLI_COMPLETE &&
1907 last_login_state != PDS_PRLI_COMPLETE) {
1908 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1909 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1910 current_login_state, last_login_state,
1911 fcport->loop_id);
1912 rval = QLA_FUNCTION_FAILED;
1914 if (!fcport->query)
1915 goto gpd_error_out;
1918 if (fcport->loop_id == FC_NO_LOOP_ID ||
1919 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1920 memcmp(fcport->port_name, pd24->port_name, 8))) {
1921 /* We lost the device mid way. */
1922 rval = QLA_NOT_LOGGED_IN;
1923 goto gpd_error_out;
1926 /* Names are little-endian. */
1927 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1928 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1930 /* Get port_id of device. */
1931 fcport->d_id.b.domain = pd24->port_id[0];
1932 fcport->d_id.b.area = pd24->port_id[1];
1933 fcport->d_id.b.al_pa = pd24->port_id[2];
1934 fcport->d_id.b.rsvd_1 = 0;
1936 /* If not target must be initiator or unknown type. */
1937 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1938 fcport->port_type = FCT_INITIATOR;
1939 else
1940 fcport->port_type = FCT_TARGET;
1942 /* Passback COS information. */
1943 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1944 FC_COS_CLASS2 : FC_COS_CLASS3;
1946 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1947 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1948 } else {
1949 uint64_t zero = 0;
1951 /* Check for logged in state. */
1952 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1953 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1954 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1955 "Unable to verify login-state (%x/%x) - "
1956 "portid=%02x%02x%02x.\n", pd->master_state,
1957 pd->slave_state, fcport->d_id.b.domain,
1958 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1959 rval = QLA_FUNCTION_FAILED;
1960 goto gpd_error_out;
1963 if (fcport->loop_id == FC_NO_LOOP_ID ||
1964 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1965 memcmp(fcport->port_name, pd->port_name, 8))) {
1966 /* We lost the device mid way. */
1967 rval = QLA_NOT_LOGGED_IN;
1968 goto gpd_error_out;
1971 /* Names are little-endian. */
1972 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1973 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1975 /* Get port_id of device. */
1976 fcport->d_id.b.domain = pd->port_id[0];
1977 fcport->d_id.b.area = pd->port_id[3];
1978 fcport->d_id.b.al_pa = pd->port_id[2];
1979 fcport->d_id.b.rsvd_1 = 0;
1981 /* If not target must be initiator or unknown type. */
1982 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1983 fcport->port_type = FCT_INITIATOR;
1984 else
1985 fcport->port_type = FCT_TARGET;
1987 /* Passback COS information. */
1988 fcport->supported_classes = (pd->options & BIT_4) ?
1989 FC_COS_CLASS2: FC_COS_CLASS3;
1992 gpd_error_out:
1993 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1994 fcport->query = 0;
1996 if (rval != QLA_SUCCESS) {
1997 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1998 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1999 mcp->mb[0], mcp->mb[1]);
2000 } else {
2001 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2002 "Done %s.\n", __func__);
2005 return rval;
2009 * qla2x00_get_firmware_state
2010 * Get adapter firmware state.
2012 * Input:
2013 * ha = adapter block pointer.
2014 * dptr = pointer for firmware state.
2015 * TARGET_QUEUE_LOCK must be released.
2016 * ADAPTER_STATE_LOCK must be released.
2018 * Returns:
2019 * qla2x00 local function return status code.
2021 * Context:
2022 * Kernel context.
2025 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2027 int rval;
2028 mbx_cmd_t mc;
2029 mbx_cmd_t *mcp = &mc;
2030 struct qla_hw_data *ha = vha->hw;
2032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2033 "Entered %s.\n", __func__);
2035 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2036 mcp->out_mb = MBX_0;
2037 if (IS_FWI2_CAPABLE(vha->hw))
2038 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2039 else
2040 mcp->in_mb = MBX_1|MBX_0;
2041 mcp->tov = MBX_TOV_SECONDS;
2042 mcp->flags = 0;
2043 rval = qla2x00_mailbox_command(vha, mcp);
2045 /* Return firmware states. */
2046 states[0] = mcp->mb[1];
2047 if (IS_FWI2_CAPABLE(vha->hw)) {
2048 states[1] = mcp->mb[2];
2049 states[2] = mcp->mb[3]; /* SFP info */
2050 states[3] = mcp->mb[4];
2051 states[4] = mcp->mb[5];
2052 states[5] = mcp->mb[6]; /* DPORT status */
2055 if (rval != QLA_SUCCESS) {
2056 /*EMPTY*/
2057 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2058 } else {
2059 if (IS_QLA27XX(ha)) {
2060 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2061 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2062 "Invalid SFP/Validation Failed\n");
2064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2065 "Done %s.\n", __func__);
2068 return rval;
2072 * qla2x00_get_port_name
2073 * Issue get port name mailbox command.
2074 * Returned name is in big endian format.
2076 * Input:
2077 * ha = adapter block pointer.
2078 * loop_id = loop ID of device.
2079 * name = pointer for name.
2080 * TARGET_QUEUE_LOCK must be released.
2081 * ADAPTER_STATE_LOCK must be released.
2083 * Returns:
2084 * qla2x00 local function return status code.
2086 * Context:
2087 * Kernel context.
2090 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2091 uint8_t opt)
2093 int rval;
2094 mbx_cmd_t mc;
2095 mbx_cmd_t *mcp = &mc;
2097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2098 "Entered %s.\n", __func__);
2100 mcp->mb[0] = MBC_GET_PORT_NAME;
2101 mcp->mb[9] = vha->vp_idx;
2102 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2103 if (HAS_EXTENDED_IDS(vha->hw)) {
2104 mcp->mb[1] = loop_id;
2105 mcp->mb[10] = opt;
2106 mcp->out_mb |= MBX_10;
2107 } else {
2108 mcp->mb[1] = loop_id << 8 | opt;
2111 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2112 mcp->tov = MBX_TOV_SECONDS;
2113 mcp->flags = 0;
2114 rval = qla2x00_mailbox_command(vha, mcp);
2116 if (rval != QLA_SUCCESS) {
2117 /*EMPTY*/
2118 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2119 } else {
2120 if (name != NULL) {
2121 /* This function returns name in big endian. */
2122 name[0] = MSB(mcp->mb[2]);
2123 name[1] = LSB(mcp->mb[2]);
2124 name[2] = MSB(mcp->mb[3]);
2125 name[3] = LSB(mcp->mb[3]);
2126 name[4] = MSB(mcp->mb[6]);
2127 name[5] = LSB(mcp->mb[6]);
2128 name[6] = MSB(mcp->mb[7]);
2129 name[7] = LSB(mcp->mb[7]);
2132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2133 "Done %s.\n", __func__);
2136 return rval;
2140 * qla24xx_link_initialization
2141 * Issue link initialization mailbox command.
2143 * Input:
2144 * ha = adapter block pointer.
2145 * TARGET_QUEUE_LOCK must be released.
2146 * ADAPTER_STATE_LOCK must be released.
2148 * Returns:
2149 * qla2x00 local function return status code.
2151 * Context:
2152 * Kernel context.
2155 qla24xx_link_initialize(scsi_qla_host_t *vha)
2157 int rval;
2158 mbx_cmd_t mc;
2159 mbx_cmd_t *mcp = &mc;
2161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2162 "Entered %s.\n", __func__);
2164 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2165 return QLA_FUNCTION_FAILED;
2167 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2168 mcp->mb[1] = BIT_4;
2169 if (vha->hw->operating_mode == LOOP)
2170 mcp->mb[1] |= BIT_6;
2171 else
2172 mcp->mb[1] |= BIT_5;
2173 mcp->mb[2] = 0;
2174 mcp->mb[3] = 0;
2175 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2176 mcp->in_mb = MBX_0;
2177 mcp->tov = MBX_TOV_SECONDS;
2178 mcp->flags = 0;
2179 rval = qla2x00_mailbox_command(vha, mcp);
2181 if (rval != QLA_SUCCESS) {
2182 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2183 } else {
2184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2185 "Done %s.\n", __func__);
2188 return rval;
2192 * qla2x00_lip_reset
2193 * Issue LIP reset mailbox command.
2195 * Input:
2196 * ha = adapter block pointer.
2197 * TARGET_QUEUE_LOCK must be released.
2198 * ADAPTER_STATE_LOCK must be released.
2200 * Returns:
2201 * qla2x00 local function return status code.
2203 * Context:
2204 * Kernel context.
2207 qla2x00_lip_reset(scsi_qla_host_t *vha)
2209 int rval;
2210 mbx_cmd_t mc;
2211 mbx_cmd_t *mcp = &mc;
2213 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2214 "Entered %s.\n", __func__);
2216 if (IS_CNA_CAPABLE(vha->hw)) {
2217 /* Logout across all FCFs. */
2218 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2219 mcp->mb[1] = BIT_1;
2220 mcp->mb[2] = 0;
2221 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2222 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2223 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2224 if (N2N_TOPO(vha->hw))
2225 mcp->mb[1] = BIT_4; /* re-init */
2226 else
2227 mcp->mb[1] = BIT_6; /* LIP */
2228 mcp->mb[2] = 0;
2229 mcp->mb[3] = vha->hw->loop_reset_delay;
2230 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2231 } else {
2232 mcp->mb[0] = MBC_LIP_RESET;
2233 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2234 if (HAS_EXTENDED_IDS(vha->hw)) {
2235 mcp->mb[1] = 0x00ff;
2236 mcp->mb[10] = 0;
2237 mcp->out_mb |= MBX_10;
2238 } else {
2239 mcp->mb[1] = 0xff00;
2241 mcp->mb[2] = vha->hw->loop_reset_delay;
2242 mcp->mb[3] = 0;
2244 mcp->in_mb = MBX_0;
2245 mcp->tov = MBX_TOV_SECONDS;
2246 mcp->flags = 0;
2247 rval = qla2x00_mailbox_command(vha, mcp);
2249 if (rval != QLA_SUCCESS) {
2250 /*EMPTY*/
2251 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2252 } else {
2253 /*EMPTY*/
2254 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2255 "Done %s.\n", __func__);
2258 return rval;
2262 * qla2x00_send_sns
2263 * Send SNS command.
2265 * Input:
2266 * ha = adapter block pointer.
2267 * sns = pointer for command.
2268 * cmd_size = command size.
2269 * buf_size = response/command size.
2270 * TARGET_QUEUE_LOCK must be released.
2271 * ADAPTER_STATE_LOCK must be released.
2273 * Returns:
2274 * qla2x00 local function return status code.
2276 * Context:
2277 * Kernel context.
2280 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2281 uint16_t cmd_size, size_t buf_size)
2283 int rval;
2284 mbx_cmd_t mc;
2285 mbx_cmd_t *mcp = &mc;
2287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2288 "Entered %s.\n", __func__);
2290 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2291 "Retry cnt=%d ratov=%d total tov=%d.\n",
2292 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2294 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2295 mcp->mb[1] = cmd_size;
2296 mcp->mb[2] = MSW(sns_phys_address);
2297 mcp->mb[3] = LSW(sns_phys_address);
2298 mcp->mb[6] = MSW(MSD(sns_phys_address));
2299 mcp->mb[7] = LSW(MSD(sns_phys_address));
2300 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2301 mcp->in_mb = MBX_0|MBX_1;
2302 mcp->buf_size = buf_size;
2303 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2304 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2305 rval = qla2x00_mailbox_command(vha, mcp);
2307 if (rval != QLA_SUCCESS) {
2308 /*EMPTY*/
2309 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2310 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2311 rval, mcp->mb[0], mcp->mb[1]);
2312 } else {
2313 /*EMPTY*/
2314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2315 "Done %s.\n", __func__);
2318 return rval;
2322 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2323 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2325 int rval;
2327 struct logio_entry_24xx *lg;
2328 dma_addr_t lg_dma;
2329 uint32_t iop[2];
2330 struct qla_hw_data *ha = vha->hw;
2331 struct req_que *req;
2333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2334 "Entered %s.\n", __func__);
2336 if (vha->vp_idx && vha->qpair)
2337 req = vha->qpair->req;
2338 else
2339 req = ha->req_q_map[0];
2341 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2342 if (lg == NULL) {
2343 ql_log(ql_log_warn, vha, 0x1062,
2344 "Failed to allocate login IOCB.\n");
2345 return QLA_MEMORY_ALLOC_FAILED;
2348 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2349 lg->entry_count = 1;
2350 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2351 lg->nport_handle = cpu_to_le16(loop_id);
2352 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2353 if (opt & BIT_0)
2354 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2355 if (opt & BIT_1)
2356 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2357 lg->port_id[0] = al_pa;
2358 lg->port_id[1] = area;
2359 lg->port_id[2] = domain;
2360 lg->vp_index = vha->vp_idx;
2361 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2362 (ha->r_a_tov / 10 * 2) + 2);
2363 if (rval != QLA_SUCCESS) {
2364 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2365 "Failed to issue login IOCB (%x).\n", rval);
2366 } else if (lg->entry_status != 0) {
2367 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2368 "Failed to complete IOCB -- error status (%x).\n",
2369 lg->entry_status);
2370 rval = QLA_FUNCTION_FAILED;
2371 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2372 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2373 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2375 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2376 "Failed to complete IOCB -- completion status (%x) "
2377 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2378 iop[0], iop[1]);
2380 switch (iop[0]) {
2381 case LSC_SCODE_PORTID_USED:
2382 mb[0] = MBS_PORT_ID_USED;
2383 mb[1] = LSW(iop[1]);
2384 break;
2385 case LSC_SCODE_NPORT_USED:
2386 mb[0] = MBS_LOOP_ID_USED;
2387 break;
2388 case LSC_SCODE_NOLINK:
2389 case LSC_SCODE_NOIOCB:
2390 case LSC_SCODE_NOXCB:
2391 case LSC_SCODE_CMD_FAILED:
2392 case LSC_SCODE_NOFABRIC:
2393 case LSC_SCODE_FW_NOT_READY:
2394 case LSC_SCODE_NOT_LOGGED_IN:
2395 case LSC_SCODE_NOPCB:
2396 case LSC_SCODE_ELS_REJECT:
2397 case LSC_SCODE_CMD_PARAM_ERR:
2398 case LSC_SCODE_NONPORT:
2399 case LSC_SCODE_LOGGED_IN:
2400 case LSC_SCODE_NOFLOGI_ACC:
2401 default:
2402 mb[0] = MBS_COMMAND_ERROR;
2403 break;
2405 } else {
2406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2407 "Done %s.\n", __func__);
2409 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2411 mb[0] = MBS_COMMAND_COMPLETE;
2412 mb[1] = 0;
2413 if (iop[0] & BIT_4) {
2414 if (iop[0] & BIT_8)
2415 mb[1] |= BIT_1;
2416 } else
2417 mb[1] = BIT_0;
2419 /* Passback COS information. */
2420 mb[10] = 0;
2421 if (lg->io_parameter[7] || lg->io_parameter[8])
2422 mb[10] |= BIT_0; /* Class 2. */
2423 if (lg->io_parameter[9] || lg->io_parameter[10])
2424 mb[10] |= BIT_1; /* Class 3. */
2425 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2426 mb[10] |= BIT_7; /* Confirmed Completion
2427 * Allowed
2431 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2433 return rval;
2437 * qla2x00_login_fabric
2438 * Issue login fabric port mailbox command.
2440 * Input:
2441 * ha = adapter block pointer.
2442 * loop_id = device loop ID.
2443 * domain = device domain.
2444 * area = device area.
2445 * al_pa = device AL_PA.
2446 * status = pointer for return status.
2447 * opt = command options.
2448 * TARGET_QUEUE_LOCK must be released.
2449 * ADAPTER_STATE_LOCK must be released.
2451 * Returns:
2452 * qla2x00 local function return status code.
2454 * Context:
2455 * Kernel context.
2458 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2459 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2461 int rval;
2462 mbx_cmd_t mc;
2463 mbx_cmd_t *mcp = &mc;
2464 struct qla_hw_data *ha = vha->hw;
2466 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2467 "Entered %s.\n", __func__);
2469 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2470 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2471 if (HAS_EXTENDED_IDS(ha)) {
2472 mcp->mb[1] = loop_id;
2473 mcp->mb[10] = opt;
2474 mcp->out_mb |= MBX_10;
2475 } else {
2476 mcp->mb[1] = (loop_id << 8) | opt;
2478 mcp->mb[2] = domain;
2479 mcp->mb[3] = area << 8 | al_pa;
2481 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2482 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2483 mcp->flags = 0;
2484 rval = qla2x00_mailbox_command(vha, mcp);
2486 /* Return mailbox statuses. */
2487 if (mb != NULL) {
2488 mb[0] = mcp->mb[0];
2489 mb[1] = mcp->mb[1];
2490 mb[2] = mcp->mb[2];
2491 mb[6] = mcp->mb[6];
2492 mb[7] = mcp->mb[7];
2493 /* COS retrieved from Get-Port-Database mailbox command. */
2494 mb[10] = 0;
2497 if (rval != QLA_SUCCESS) {
2498 /* RLU tmp code: need to change main mailbox_command function to
2499 * return ok even when the mailbox completion value is not
2500 * SUCCESS. The caller needs to be responsible to interpret
2501 * the return values of this mailbox command if we're not
2502 * to change too much of the existing code.
2504 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2505 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2506 mcp->mb[0] == 0x4006)
2507 rval = QLA_SUCCESS;
2509 /*EMPTY*/
2510 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2511 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2512 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2513 } else {
2514 /*EMPTY*/
2515 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2516 "Done %s.\n", __func__);
2519 return rval;
2523 * qla2x00_login_local_device
2524 * Issue login loop port mailbox command.
2526 * Input:
2527 * ha = adapter block pointer.
2528 * loop_id = device loop ID.
2529 * opt = command options.
2531 * Returns:
2532 * Return status code.
2534 * Context:
2535 * Kernel context.
2539 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2540 uint16_t *mb_ret, uint8_t opt)
2542 int rval;
2543 mbx_cmd_t mc;
2544 mbx_cmd_t *mcp = &mc;
2545 struct qla_hw_data *ha = vha->hw;
2547 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2548 "Entered %s.\n", __func__);
2550 if (IS_FWI2_CAPABLE(ha))
2551 return qla24xx_login_fabric(vha, fcport->loop_id,
2552 fcport->d_id.b.domain, fcport->d_id.b.area,
2553 fcport->d_id.b.al_pa, mb_ret, opt);
2555 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2556 if (HAS_EXTENDED_IDS(ha))
2557 mcp->mb[1] = fcport->loop_id;
2558 else
2559 mcp->mb[1] = fcport->loop_id << 8;
2560 mcp->mb[2] = opt;
2561 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2562 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2563 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2564 mcp->flags = 0;
2565 rval = qla2x00_mailbox_command(vha, mcp);
2567 /* Return mailbox statuses. */
2568 if (mb_ret != NULL) {
2569 mb_ret[0] = mcp->mb[0];
2570 mb_ret[1] = mcp->mb[1];
2571 mb_ret[6] = mcp->mb[6];
2572 mb_ret[7] = mcp->mb[7];
2575 if (rval != QLA_SUCCESS) {
2576 /* AV tmp code: need to change main mailbox_command function to
2577 * return ok even when the mailbox completion value is not
2578 * SUCCESS. The caller needs to be responsible to interpret
2579 * the return values of this mailbox command if we're not
2580 * to change too much of the existing code.
2582 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2583 rval = QLA_SUCCESS;
2585 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2586 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2587 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2588 } else {
2589 /*EMPTY*/
2590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2591 "Done %s.\n", __func__);
2594 return (rval);
2598 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2599 uint8_t area, uint8_t al_pa)
2601 int rval;
2602 struct logio_entry_24xx *lg;
2603 dma_addr_t lg_dma;
2604 struct qla_hw_data *ha = vha->hw;
2605 struct req_que *req;
2607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2608 "Entered %s.\n", __func__);
2610 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2611 if (lg == NULL) {
2612 ql_log(ql_log_warn, vha, 0x106e,
2613 "Failed to allocate logout IOCB.\n");
2614 return QLA_MEMORY_ALLOC_FAILED;
2617 req = vha->req;
2618 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2619 lg->entry_count = 1;
2620 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2621 lg->nport_handle = cpu_to_le16(loop_id);
2622 lg->control_flags =
2623 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2624 LCF_FREE_NPORT);
2625 lg->port_id[0] = al_pa;
2626 lg->port_id[1] = area;
2627 lg->port_id[2] = domain;
2628 lg->vp_index = vha->vp_idx;
2629 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2630 (ha->r_a_tov / 10 * 2) + 2);
2631 if (rval != QLA_SUCCESS) {
2632 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2633 "Failed to issue logout IOCB (%x).\n", rval);
2634 } else if (lg->entry_status != 0) {
2635 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2636 "Failed to complete IOCB -- error status (%x).\n",
2637 lg->entry_status);
2638 rval = QLA_FUNCTION_FAILED;
2639 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2640 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2641 "Failed to complete IOCB -- completion status (%x) "
2642 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2643 le32_to_cpu(lg->io_parameter[0]),
2644 le32_to_cpu(lg->io_parameter[1]));
2645 } else {
2646 /*EMPTY*/
2647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2648 "Done %s.\n", __func__);
2651 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2653 return rval;
2657 * qla2x00_fabric_logout
2658 * Issue logout fabric port mailbox command.
2660 * Input:
2661 * ha = adapter block pointer.
2662 * loop_id = device loop ID.
2663 * TARGET_QUEUE_LOCK must be released.
2664 * ADAPTER_STATE_LOCK must be released.
2666 * Returns:
2667 * qla2x00 local function return status code.
2669 * Context:
2670 * Kernel context.
2673 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2674 uint8_t area, uint8_t al_pa)
2676 int rval;
2677 mbx_cmd_t mc;
2678 mbx_cmd_t *mcp = &mc;
2680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2681 "Entered %s.\n", __func__);
2683 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2684 mcp->out_mb = MBX_1|MBX_0;
2685 if (HAS_EXTENDED_IDS(vha->hw)) {
2686 mcp->mb[1] = loop_id;
2687 mcp->mb[10] = 0;
2688 mcp->out_mb |= MBX_10;
2689 } else {
2690 mcp->mb[1] = loop_id << 8;
2693 mcp->in_mb = MBX_1|MBX_0;
2694 mcp->tov = MBX_TOV_SECONDS;
2695 mcp->flags = 0;
2696 rval = qla2x00_mailbox_command(vha, mcp);
2698 if (rval != QLA_SUCCESS) {
2699 /*EMPTY*/
2700 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2701 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2702 } else {
2703 /*EMPTY*/
2704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2705 "Done %s.\n", __func__);
2708 return rval;
2712 * qla2x00_full_login_lip
2713 * Issue full login LIP mailbox command.
2715 * Input:
2716 * ha = adapter block pointer.
2717 * TARGET_QUEUE_LOCK must be released.
2718 * ADAPTER_STATE_LOCK must be released.
2720 * Returns:
2721 * qla2x00 local function return status code.
2723 * Context:
2724 * Kernel context.
2727 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2729 int rval;
2730 mbx_cmd_t mc;
2731 mbx_cmd_t *mcp = &mc;
2733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2734 "Entered %s.\n", __func__);
2736 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2737 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2738 mcp->mb[2] = 0;
2739 mcp->mb[3] = 0;
2740 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2741 mcp->in_mb = MBX_0;
2742 mcp->tov = MBX_TOV_SECONDS;
2743 mcp->flags = 0;
2744 rval = qla2x00_mailbox_command(vha, mcp);
2746 if (rval != QLA_SUCCESS) {
2747 /*EMPTY*/
2748 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2749 } else {
2750 /*EMPTY*/
2751 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2752 "Done %s.\n", __func__);
2755 return rval;
2759 * qla2x00_get_id_list
2761 * Input:
2762 * ha = adapter block pointer.
2764 * Returns:
2765 * qla2x00 local function return status code.
2767 * Context:
2768 * Kernel context.
2771 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2772 uint16_t *entries)
2774 int rval;
2775 mbx_cmd_t mc;
2776 mbx_cmd_t *mcp = &mc;
2778 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2779 "Entered %s.\n", __func__);
2781 if (id_list == NULL)
2782 return QLA_FUNCTION_FAILED;
2784 mcp->mb[0] = MBC_GET_ID_LIST;
2785 mcp->out_mb = MBX_0;
2786 if (IS_FWI2_CAPABLE(vha->hw)) {
2787 mcp->mb[2] = MSW(id_list_dma);
2788 mcp->mb[3] = LSW(id_list_dma);
2789 mcp->mb[6] = MSW(MSD(id_list_dma));
2790 mcp->mb[7] = LSW(MSD(id_list_dma));
2791 mcp->mb[8] = 0;
2792 mcp->mb[9] = vha->vp_idx;
2793 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2794 } else {
2795 mcp->mb[1] = MSW(id_list_dma);
2796 mcp->mb[2] = LSW(id_list_dma);
2797 mcp->mb[3] = MSW(MSD(id_list_dma));
2798 mcp->mb[6] = LSW(MSD(id_list_dma));
2799 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2801 mcp->in_mb = MBX_1|MBX_0;
2802 mcp->tov = MBX_TOV_SECONDS;
2803 mcp->flags = 0;
2804 rval = qla2x00_mailbox_command(vha, mcp);
2806 if (rval != QLA_SUCCESS) {
2807 /*EMPTY*/
2808 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2809 } else {
2810 *entries = mcp->mb[1];
2811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2812 "Done %s.\n", __func__);
2815 return rval;
2819 * qla2x00_get_resource_cnts
2820 * Get current firmware resource counts.
2822 * Input:
2823 * ha = adapter block pointer.
2825 * Returns:
2826 * qla2x00 local function return status code.
2828 * Context:
2829 * Kernel context.
2832 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2834 struct qla_hw_data *ha = vha->hw;
2835 int rval;
2836 mbx_cmd_t mc;
2837 mbx_cmd_t *mcp = &mc;
2839 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2840 "Entered %s.\n", __func__);
2842 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2843 mcp->out_mb = MBX_0;
2844 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2845 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2846 mcp->in_mb |= MBX_12;
2847 mcp->tov = MBX_TOV_SECONDS;
2848 mcp->flags = 0;
2849 rval = qla2x00_mailbox_command(vha, mcp);
2851 if (rval != QLA_SUCCESS) {
2852 /*EMPTY*/
2853 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2854 "Failed mb[0]=%x.\n", mcp->mb[0]);
2855 } else {
2856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2857 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2858 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2859 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2860 mcp->mb[11], mcp->mb[12]);
2862 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2863 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2864 ha->cur_fw_xcb_count = mcp->mb[3];
2865 ha->orig_fw_xcb_count = mcp->mb[6];
2866 ha->cur_fw_iocb_count = mcp->mb[7];
2867 ha->orig_fw_iocb_count = mcp->mb[10];
2868 if (ha->flags.npiv_supported)
2869 ha->max_npiv_vports = mcp->mb[11];
2870 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2871 ha->fw_max_fcf_count = mcp->mb[12];
2874 return (rval);
2878 * qla2x00_get_fcal_position_map
2879 * Get FCAL (LILP) position map using mailbox command
2881 * Input:
2882 * ha = adapter state pointer.
2883 * pos_map = buffer pointer (can be NULL).
2885 * Returns:
2886 * qla2x00 local function return status code.
2888 * Context:
2889 * Kernel context.
2892 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2894 int rval;
2895 mbx_cmd_t mc;
2896 mbx_cmd_t *mcp = &mc;
2897 char *pmap;
2898 dma_addr_t pmap_dma;
2899 struct qla_hw_data *ha = vha->hw;
2901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2902 "Entered %s.\n", __func__);
2904 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2905 if (pmap == NULL) {
2906 ql_log(ql_log_warn, vha, 0x1080,
2907 "Memory alloc failed.\n");
2908 return QLA_MEMORY_ALLOC_FAILED;
2911 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2912 mcp->mb[2] = MSW(pmap_dma);
2913 mcp->mb[3] = LSW(pmap_dma);
2914 mcp->mb[6] = MSW(MSD(pmap_dma));
2915 mcp->mb[7] = LSW(MSD(pmap_dma));
2916 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2917 mcp->in_mb = MBX_1|MBX_0;
2918 mcp->buf_size = FCAL_MAP_SIZE;
2919 mcp->flags = MBX_DMA_IN;
2920 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2921 rval = qla2x00_mailbox_command(vha, mcp);
2923 if (rval == QLA_SUCCESS) {
2924 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2925 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2926 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2927 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2928 pmap, pmap[0] + 1);
2930 if (pos_map)
2931 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2933 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2935 if (rval != QLA_SUCCESS) {
2936 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2937 } else {
2938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2939 "Done %s.\n", __func__);
2942 return rval;
2946 * qla2x00_get_link_status
2948 * Input:
2949 * ha = adapter block pointer.
2950 * loop_id = device loop ID.
2951 * ret_buf = pointer to link status return buffer.
2953 * Returns:
2954 * 0 = success.
2955 * BIT_0 = mem alloc error.
2956 * BIT_1 = mailbox error.
2959 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2960 struct link_statistics *stats, dma_addr_t stats_dma)
2962 int rval;
2963 mbx_cmd_t mc;
2964 mbx_cmd_t *mcp = &mc;
2965 uint32_t *iter = (void *)stats;
2966 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2967 struct qla_hw_data *ha = vha->hw;
2969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2970 "Entered %s.\n", __func__);
2972 mcp->mb[0] = MBC_GET_LINK_STATUS;
2973 mcp->mb[2] = MSW(LSD(stats_dma));
2974 mcp->mb[3] = LSW(LSD(stats_dma));
2975 mcp->mb[6] = MSW(MSD(stats_dma));
2976 mcp->mb[7] = LSW(MSD(stats_dma));
2977 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2978 mcp->in_mb = MBX_0;
2979 if (IS_FWI2_CAPABLE(ha)) {
2980 mcp->mb[1] = loop_id;
2981 mcp->mb[4] = 0;
2982 mcp->mb[10] = 0;
2983 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2984 mcp->in_mb |= MBX_1;
2985 } else if (HAS_EXTENDED_IDS(ha)) {
2986 mcp->mb[1] = loop_id;
2987 mcp->mb[10] = 0;
2988 mcp->out_mb |= MBX_10|MBX_1;
2989 } else {
2990 mcp->mb[1] = loop_id << 8;
2991 mcp->out_mb |= MBX_1;
2993 mcp->tov = MBX_TOV_SECONDS;
2994 mcp->flags = IOCTL_CMD;
2995 rval = qla2x00_mailbox_command(vha, mcp);
2997 if (rval == QLA_SUCCESS) {
2998 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2999 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3000 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3001 rval = QLA_FUNCTION_FAILED;
3002 } else {
3003 /* Re-endianize - firmware data is le32. */
3004 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3005 "Done %s.\n", __func__);
3006 for ( ; dwords--; iter++)
3007 le32_to_cpus(iter);
3009 } else {
3010 /* Failed. */
3011 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3014 return rval;
3018 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3019 dma_addr_t stats_dma, uint16_t options)
3021 int rval;
3022 mbx_cmd_t mc;
3023 mbx_cmd_t *mcp = &mc;
3024 uint32_t *iter, dwords;
3026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3027 "Entered %s.\n", __func__);
3029 memset(&mc, 0, sizeof(mc));
3030 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3031 mc.mb[2] = MSW(stats_dma);
3032 mc.mb[3] = LSW(stats_dma);
3033 mc.mb[6] = MSW(MSD(stats_dma));
3034 mc.mb[7] = LSW(MSD(stats_dma));
3035 mc.mb[8] = sizeof(struct link_statistics) / 4;
3036 mc.mb[9] = cpu_to_le16(vha->vp_idx);
3037 mc.mb[10] = cpu_to_le16(options);
3039 rval = qla24xx_send_mb_cmd(vha, &mc);
3041 if (rval == QLA_SUCCESS) {
3042 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3043 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3044 "Failed mb[0]=%x.\n", mcp->mb[0]);
3045 rval = QLA_FUNCTION_FAILED;
3046 } else {
3047 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3048 "Done %s.\n", __func__);
3049 /* Re-endianize - firmware data is le32. */
3050 dwords = sizeof(struct link_statistics) / 4;
3051 iter = &stats->link_fail_cnt;
3052 for ( ; dwords--; iter++)
3053 le32_to_cpus(iter);
3055 } else {
3056 /* Failed. */
3057 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3060 return rval;
3064 qla24xx_abort_command(srb_t *sp)
3066 int rval;
3067 unsigned long flags = 0;
3069 struct abort_entry_24xx *abt;
3070 dma_addr_t abt_dma;
3071 uint32_t handle;
3072 fc_port_t *fcport = sp->fcport;
3073 struct scsi_qla_host *vha = fcport->vha;
3074 struct qla_hw_data *ha = vha->hw;
3075 struct req_que *req = vha->req;
3077 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3078 "Entered %s.\n", __func__);
3080 if (sp->qpair)
3081 req = sp->qpair->req;
3083 if (ql2xasynctmfenable)
3084 return qla24xx_async_abort_command(sp);
3086 spin_lock_irqsave(&ha->hardware_lock, flags);
3087 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3088 if (req->outstanding_cmds[handle] == sp)
3089 break;
3091 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3092 if (handle == req->num_outstanding_cmds) {
3093 /* Command not found. */
3094 return QLA_FUNCTION_FAILED;
3097 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3098 if (abt == NULL) {
3099 ql_log(ql_log_warn, vha, 0x108d,
3100 "Failed to allocate abort IOCB.\n");
3101 return QLA_MEMORY_ALLOC_FAILED;
3104 abt->entry_type = ABORT_IOCB_TYPE;
3105 abt->entry_count = 1;
3106 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3107 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3108 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3109 abt->port_id[0] = fcport->d_id.b.al_pa;
3110 abt->port_id[1] = fcport->d_id.b.area;
3111 abt->port_id[2] = fcport->d_id.b.domain;
3112 abt->vp_index = fcport->vha->vp_idx;
3114 abt->req_que_no = cpu_to_le16(req->id);
3116 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3117 if (rval != QLA_SUCCESS) {
3118 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3119 "Failed to issue IOCB (%x).\n", rval);
3120 } else if (abt->entry_status != 0) {
3121 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3122 "Failed to complete IOCB -- error status (%x).\n",
3123 abt->entry_status);
3124 rval = QLA_FUNCTION_FAILED;
3125 } else if (abt->nport_handle != cpu_to_le16(0)) {
3126 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3127 "Failed to complete IOCB -- completion status (%x).\n",
3128 le16_to_cpu(abt->nport_handle));
3129 if (abt->nport_handle == CS_IOCB_ERROR)
3130 rval = QLA_FUNCTION_PARAMETER_ERROR;
3131 else
3132 rval = QLA_FUNCTION_FAILED;
3133 } else {
3134 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3135 "Done %s.\n", __func__);
3138 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3140 return rval;
3143 struct tsk_mgmt_cmd {
3144 union {
3145 struct tsk_mgmt_entry tsk;
3146 struct sts_entry_24xx sts;
3147 } p;
3150 static int
3151 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3152 uint64_t l, int tag)
3154 int rval, rval2;
3155 struct tsk_mgmt_cmd *tsk;
3156 struct sts_entry_24xx *sts;
3157 dma_addr_t tsk_dma;
3158 scsi_qla_host_t *vha;
3159 struct qla_hw_data *ha;
3160 struct req_que *req;
3161 struct rsp_que *rsp;
3162 struct qla_qpair *qpair;
3164 vha = fcport->vha;
3165 ha = vha->hw;
3166 req = vha->req;
3168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3169 "Entered %s.\n", __func__);
3171 if (vha->vp_idx && vha->qpair) {
3172 /* NPIV port */
3173 qpair = vha->qpair;
3174 rsp = qpair->rsp;
3175 req = qpair->req;
3176 } else {
3177 rsp = req->rsp;
3180 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3181 if (tsk == NULL) {
3182 ql_log(ql_log_warn, vha, 0x1093,
3183 "Failed to allocate task management IOCB.\n");
3184 return QLA_MEMORY_ALLOC_FAILED;
3187 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3188 tsk->p.tsk.entry_count = 1;
3189 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3190 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3191 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3192 tsk->p.tsk.control_flags = cpu_to_le32(type);
3193 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3194 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3195 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3196 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3197 if (type == TCF_LUN_RESET) {
3198 int_to_scsilun(l, &tsk->p.tsk.lun);
3199 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3200 sizeof(tsk->p.tsk.lun));
3203 sts = &tsk->p.sts;
3204 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3205 if (rval != QLA_SUCCESS) {
3206 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3207 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3208 } else if (sts->entry_status != 0) {
3209 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3210 "Failed to complete IOCB -- error status (%x).\n",
3211 sts->entry_status);
3212 rval = QLA_FUNCTION_FAILED;
3213 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3214 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3215 "Failed to complete IOCB -- completion status (%x).\n",
3216 le16_to_cpu(sts->comp_status));
3217 rval = QLA_FUNCTION_FAILED;
3218 } else if (le16_to_cpu(sts->scsi_status) &
3219 SS_RESPONSE_INFO_LEN_VALID) {
3220 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3222 "Ignoring inconsistent data length -- not enough "
3223 "response info (%d).\n",
3224 le32_to_cpu(sts->rsp_data_len));
3225 } else if (sts->data[3]) {
3226 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3227 "Failed to complete IOCB -- response (%x).\n",
3228 sts->data[3]);
3229 rval = QLA_FUNCTION_FAILED;
3233 /* Issue marker IOCB. */
3234 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3235 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3236 if (rval2 != QLA_SUCCESS) {
3237 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3238 "Failed to issue marker IOCB (%x).\n", rval2);
3239 } else {
3240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3241 "Done %s.\n", __func__);
3244 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3246 return rval;
3250 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3252 struct qla_hw_data *ha = fcport->vha->hw;
3254 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3255 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3257 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3261 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3263 struct qla_hw_data *ha = fcport->vha->hw;
3265 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3266 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3268 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3272 qla2x00_system_error(scsi_qla_host_t *vha)
3274 int rval;
3275 mbx_cmd_t mc;
3276 mbx_cmd_t *mcp = &mc;
3277 struct qla_hw_data *ha = vha->hw;
3279 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3280 return QLA_FUNCTION_FAILED;
3282 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3283 "Entered %s.\n", __func__);
3285 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3286 mcp->out_mb = MBX_0;
3287 mcp->in_mb = MBX_0;
3288 mcp->tov = 5;
3289 mcp->flags = 0;
3290 rval = qla2x00_mailbox_command(vha, mcp);
3292 if (rval != QLA_SUCCESS) {
3293 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3294 } else {
3295 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3296 "Done %s.\n", __func__);
3299 return rval;
3303 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3305 int rval;
3306 mbx_cmd_t mc;
3307 mbx_cmd_t *mcp = &mc;
3309 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3310 !IS_QLA27XX(vha->hw))
3311 return QLA_FUNCTION_FAILED;
3313 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3314 "Entered %s.\n", __func__);
3316 mcp->mb[0] = MBC_WRITE_SERDES;
3317 mcp->mb[1] = addr;
3318 if (IS_QLA2031(vha->hw))
3319 mcp->mb[2] = data & 0xff;
3320 else
3321 mcp->mb[2] = data;
3323 mcp->mb[3] = 0;
3324 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3325 mcp->in_mb = MBX_0;
3326 mcp->tov = MBX_TOV_SECONDS;
3327 mcp->flags = 0;
3328 rval = qla2x00_mailbox_command(vha, mcp);
3330 if (rval != QLA_SUCCESS) {
3331 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3332 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3333 } else {
3334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3335 "Done %s.\n", __func__);
3338 return rval;
3342 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3344 int rval;
3345 mbx_cmd_t mc;
3346 mbx_cmd_t *mcp = &mc;
3348 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3349 !IS_QLA27XX(vha->hw))
3350 return QLA_FUNCTION_FAILED;
3352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3353 "Entered %s.\n", __func__);
3355 mcp->mb[0] = MBC_READ_SERDES;
3356 mcp->mb[1] = addr;
3357 mcp->mb[3] = 0;
3358 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3359 mcp->in_mb = MBX_1|MBX_0;
3360 mcp->tov = MBX_TOV_SECONDS;
3361 mcp->flags = 0;
3362 rval = qla2x00_mailbox_command(vha, mcp);
3364 if (IS_QLA2031(vha->hw))
3365 *data = mcp->mb[1] & 0xff;
3366 else
3367 *data = mcp->mb[1];
3369 if (rval != QLA_SUCCESS) {
3370 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3371 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3372 } else {
3373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3374 "Done %s.\n", __func__);
3377 return rval;
3381 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3383 int rval;
3384 mbx_cmd_t mc;
3385 mbx_cmd_t *mcp = &mc;
3387 if (!IS_QLA8044(vha->hw))
3388 return QLA_FUNCTION_FAILED;
3390 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3391 "Entered %s.\n", __func__);
3393 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3394 mcp->mb[1] = HCS_WRITE_SERDES;
3395 mcp->mb[3] = LSW(addr);
3396 mcp->mb[4] = MSW(addr);
3397 mcp->mb[5] = LSW(data);
3398 mcp->mb[6] = MSW(data);
3399 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3400 mcp->in_mb = MBX_0;
3401 mcp->tov = MBX_TOV_SECONDS;
3402 mcp->flags = 0;
3403 rval = qla2x00_mailbox_command(vha, mcp);
3405 if (rval != QLA_SUCCESS) {
3406 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3407 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3408 } else {
3409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3410 "Done %s.\n", __func__);
3413 return rval;
3417 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3419 int rval;
3420 mbx_cmd_t mc;
3421 mbx_cmd_t *mcp = &mc;
3423 if (!IS_QLA8044(vha->hw))
3424 return QLA_FUNCTION_FAILED;
3426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3427 "Entered %s.\n", __func__);
3429 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3430 mcp->mb[1] = HCS_READ_SERDES;
3431 mcp->mb[3] = LSW(addr);
3432 mcp->mb[4] = MSW(addr);
3433 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3434 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3435 mcp->tov = MBX_TOV_SECONDS;
3436 mcp->flags = 0;
3437 rval = qla2x00_mailbox_command(vha, mcp);
3439 *data = mcp->mb[2] << 16 | mcp->mb[1];
3441 if (rval != QLA_SUCCESS) {
3442 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3443 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3444 } else {
3445 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3446 "Done %s.\n", __func__);
3449 return rval;
3453 * qla2x00_set_serdes_params() -
3454 * @vha: HA context
3455 * @sw_em_1g:
3456 * @sw_em_2g:
3457 * @sw_em_4g:
3459 * Returns
3462 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3463 uint16_t sw_em_2g, uint16_t sw_em_4g)
3465 int rval;
3466 mbx_cmd_t mc;
3467 mbx_cmd_t *mcp = &mc;
3469 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3470 "Entered %s.\n", __func__);
3472 mcp->mb[0] = MBC_SERDES_PARAMS;
3473 mcp->mb[1] = BIT_0;
3474 mcp->mb[2] = sw_em_1g | BIT_15;
3475 mcp->mb[3] = sw_em_2g | BIT_15;
3476 mcp->mb[4] = sw_em_4g | BIT_15;
3477 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3478 mcp->in_mb = MBX_0;
3479 mcp->tov = MBX_TOV_SECONDS;
3480 mcp->flags = 0;
3481 rval = qla2x00_mailbox_command(vha, mcp);
3483 if (rval != QLA_SUCCESS) {
3484 /*EMPTY*/
3485 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3486 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3487 } else {
3488 /*EMPTY*/
3489 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3490 "Done %s.\n", __func__);
3493 return rval;
3497 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3499 int rval;
3500 mbx_cmd_t mc;
3501 mbx_cmd_t *mcp = &mc;
3503 if (!IS_FWI2_CAPABLE(vha->hw))
3504 return QLA_FUNCTION_FAILED;
3506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3507 "Entered %s.\n", __func__);
3509 mcp->mb[0] = MBC_STOP_FIRMWARE;
3510 mcp->mb[1] = 0;
3511 mcp->out_mb = MBX_1|MBX_0;
3512 mcp->in_mb = MBX_0;
3513 mcp->tov = 5;
3514 mcp->flags = 0;
3515 rval = qla2x00_mailbox_command(vha, mcp);
3517 if (rval != QLA_SUCCESS) {
3518 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3519 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3520 rval = QLA_INVALID_COMMAND;
3521 } else {
3522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3523 "Done %s.\n", __func__);
3526 return rval;
3530 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3531 uint16_t buffers)
3533 int rval;
3534 mbx_cmd_t mc;
3535 mbx_cmd_t *mcp = &mc;
3537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3538 "Entered %s.\n", __func__);
3540 if (!IS_FWI2_CAPABLE(vha->hw))
3541 return QLA_FUNCTION_FAILED;
3543 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3544 return QLA_FUNCTION_FAILED;
3546 mcp->mb[0] = MBC_TRACE_CONTROL;
3547 mcp->mb[1] = TC_EFT_ENABLE;
3548 mcp->mb[2] = LSW(eft_dma);
3549 mcp->mb[3] = MSW(eft_dma);
3550 mcp->mb[4] = LSW(MSD(eft_dma));
3551 mcp->mb[5] = MSW(MSD(eft_dma));
3552 mcp->mb[6] = buffers;
3553 mcp->mb[7] = TC_AEN_DISABLE;
3554 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3555 mcp->in_mb = MBX_1|MBX_0;
3556 mcp->tov = MBX_TOV_SECONDS;
3557 mcp->flags = 0;
3558 rval = qla2x00_mailbox_command(vha, mcp);
3559 if (rval != QLA_SUCCESS) {
3560 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3561 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3562 rval, mcp->mb[0], mcp->mb[1]);
3563 } else {
3564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3565 "Done %s.\n", __func__);
3568 return rval;
3572 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3574 int rval;
3575 mbx_cmd_t mc;
3576 mbx_cmd_t *mcp = &mc;
3578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3579 "Entered %s.\n", __func__);
3581 if (!IS_FWI2_CAPABLE(vha->hw))
3582 return QLA_FUNCTION_FAILED;
3584 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3585 return QLA_FUNCTION_FAILED;
3587 mcp->mb[0] = MBC_TRACE_CONTROL;
3588 mcp->mb[1] = TC_EFT_DISABLE;
3589 mcp->out_mb = MBX_1|MBX_0;
3590 mcp->in_mb = MBX_1|MBX_0;
3591 mcp->tov = MBX_TOV_SECONDS;
3592 mcp->flags = 0;
3593 rval = qla2x00_mailbox_command(vha, mcp);
3594 if (rval != QLA_SUCCESS) {
3595 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3596 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3597 rval, mcp->mb[0], mcp->mb[1]);
3598 } else {
3599 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3600 "Done %s.\n", __func__);
3603 return rval;
3607 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3608 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3610 int rval;
3611 mbx_cmd_t mc;
3612 mbx_cmd_t *mcp = &mc;
3614 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3615 "Entered %s.\n", __func__);
3617 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3618 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3619 return QLA_FUNCTION_FAILED;
3621 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3622 return QLA_FUNCTION_FAILED;
3624 mcp->mb[0] = MBC_TRACE_CONTROL;
3625 mcp->mb[1] = TC_FCE_ENABLE;
3626 mcp->mb[2] = LSW(fce_dma);
3627 mcp->mb[3] = MSW(fce_dma);
3628 mcp->mb[4] = LSW(MSD(fce_dma));
3629 mcp->mb[5] = MSW(MSD(fce_dma));
3630 mcp->mb[6] = buffers;
3631 mcp->mb[7] = TC_AEN_DISABLE;
3632 mcp->mb[8] = 0;
3633 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3634 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3635 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3636 MBX_1|MBX_0;
3637 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3638 mcp->tov = MBX_TOV_SECONDS;
3639 mcp->flags = 0;
3640 rval = qla2x00_mailbox_command(vha, mcp);
3641 if (rval != QLA_SUCCESS) {
3642 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3643 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3644 rval, mcp->mb[0], mcp->mb[1]);
3645 } else {
3646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3647 "Done %s.\n", __func__);
3649 if (mb)
3650 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3651 if (dwords)
3652 *dwords = buffers;
3655 return rval;
3659 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3661 int rval;
3662 mbx_cmd_t mc;
3663 mbx_cmd_t *mcp = &mc;
3665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3666 "Entered %s.\n", __func__);
3668 if (!IS_FWI2_CAPABLE(vha->hw))
3669 return QLA_FUNCTION_FAILED;
3671 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3672 return QLA_FUNCTION_FAILED;
3674 mcp->mb[0] = MBC_TRACE_CONTROL;
3675 mcp->mb[1] = TC_FCE_DISABLE;
3676 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3677 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3678 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3679 MBX_1|MBX_0;
3680 mcp->tov = MBX_TOV_SECONDS;
3681 mcp->flags = 0;
3682 rval = qla2x00_mailbox_command(vha, mcp);
3683 if (rval != QLA_SUCCESS) {
3684 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3685 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3686 rval, mcp->mb[0], mcp->mb[1]);
3687 } else {
3688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3689 "Done %s.\n", __func__);
3691 if (wr)
3692 *wr = (uint64_t) mcp->mb[5] << 48 |
3693 (uint64_t) mcp->mb[4] << 32 |
3694 (uint64_t) mcp->mb[3] << 16 |
3695 (uint64_t) mcp->mb[2];
3696 if (rd)
3697 *rd = (uint64_t) mcp->mb[9] << 48 |
3698 (uint64_t) mcp->mb[8] << 32 |
3699 (uint64_t) mcp->mb[7] << 16 |
3700 (uint64_t) mcp->mb[6];
3703 return rval;
3707 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3708 uint16_t *port_speed, uint16_t *mb)
3710 int rval;
3711 mbx_cmd_t mc;
3712 mbx_cmd_t *mcp = &mc;
3714 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3715 "Entered %s.\n", __func__);
3717 if (!IS_IIDMA_CAPABLE(vha->hw))
3718 return QLA_FUNCTION_FAILED;
3720 mcp->mb[0] = MBC_PORT_PARAMS;
3721 mcp->mb[1] = loop_id;
3722 mcp->mb[2] = mcp->mb[3] = 0;
3723 mcp->mb[9] = vha->vp_idx;
3724 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3725 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3726 mcp->tov = MBX_TOV_SECONDS;
3727 mcp->flags = 0;
3728 rval = qla2x00_mailbox_command(vha, mcp);
3730 /* Return mailbox statuses. */
3731 if (mb != NULL) {
3732 mb[0] = mcp->mb[0];
3733 mb[1] = mcp->mb[1];
3734 mb[3] = mcp->mb[3];
3737 if (rval != QLA_SUCCESS) {
3738 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3739 } else {
3740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3741 "Done %s.\n", __func__);
3742 if (port_speed)
3743 *port_speed = mcp->mb[3];
3746 return rval;
3750 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3751 uint16_t port_speed, uint16_t *mb)
3753 int rval;
3754 mbx_cmd_t mc;
3755 mbx_cmd_t *mcp = &mc;
3757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3758 "Entered %s.\n", __func__);
3760 if (!IS_IIDMA_CAPABLE(vha->hw))
3761 return QLA_FUNCTION_FAILED;
3763 mcp->mb[0] = MBC_PORT_PARAMS;
3764 mcp->mb[1] = loop_id;
3765 mcp->mb[2] = BIT_0;
3766 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3767 mcp->mb[9] = vha->vp_idx;
3768 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3769 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3770 mcp->tov = MBX_TOV_SECONDS;
3771 mcp->flags = 0;
3772 rval = qla2x00_mailbox_command(vha, mcp);
3774 /* Return mailbox statuses. */
3775 if (mb != NULL) {
3776 mb[0] = mcp->mb[0];
3777 mb[1] = mcp->mb[1];
3778 mb[3] = mcp->mb[3];
3781 if (rval != QLA_SUCCESS) {
3782 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3783 "Failed=%x.\n", rval);
3784 } else {
3785 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3786 "Done %s.\n", __func__);
3789 return rval;
3792 void
3793 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3794 struct vp_rpt_id_entry_24xx *rptid_entry)
3796 struct qla_hw_data *ha = vha->hw;
3797 scsi_qla_host_t *vp = NULL;
3798 unsigned long flags;
3799 int found;
3800 port_id_t id;
3801 struct fc_port *fcport;
3803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3804 "Entered %s.\n", __func__);
3806 if (rptid_entry->entry_status != 0)
3807 return;
3809 id.b.domain = rptid_entry->port_id[2];
3810 id.b.area = rptid_entry->port_id[1];
3811 id.b.al_pa = rptid_entry->port_id[0];
3812 id.b.rsvd_1 = 0;
3813 ha->flags.n2n_ae = 0;
3815 if (rptid_entry->format == 0) {
3816 /* loop */
3817 ql_dbg(ql_dbg_async, vha, 0x10b7,
3818 "Format 0 : Number of VPs setup %d, number of "
3819 "VPs acquired %d.\n", rptid_entry->vp_setup,
3820 rptid_entry->vp_acquired);
3821 ql_dbg(ql_dbg_async, vha, 0x10b8,
3822 "Primary port id %02x%02x%02x.\n",
3823 rptid_entry->port_id[2], rptid_entry->port_id[1],
3824 rptid_entry->port_id[0]);
3825 ha->current_topology = ISP_CFG_NL;
3826 qlt_update_host_map(vha, id);
3828 } else if (rptid_entry->format == 1) {
3829 /* fabric */
3830 ql_dbg(ql_dbg_async, vha, 0x10b9,
3831 "Format 1: VP[%d] enabled - status %d - with "
3832 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3833 rptid_entry->vp_status,
3834 rptid_entry->port_id[2], rptid_entry->port_id[1],
3835 rptid_entry->port_id[0]);
3836 ql_dbg(ql_dbg_async, vha, 0x5075,
3837 "Format 1: Remote WWPN %8phC.\n",
3838 rptid_entry->u.f1.port_name);
3840 ql_dbg(ql_dbg_async, vha, 0x5075,
3841 "Format 1: WWPN %8phC.\n",
3842 vha->port_name);
3844 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3845 case TOPO_N2N:
3846 ha->current_topology = ISP_CFG_N;
3847 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3848 fcport = qla2x00_find_fcport_by_wwpn(vha,
3849 rptid_entry->u.f1.port_name, 1);
3850 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3852 if (fcport) {
3853 fcport->plogi_nack_done_deadline = jiffies + HZ;
3854 fcport->dm_login_expire = jiffies + 3*HZ;
3855 fcport->scan_state = QLA_FCPORT_FOUND;
3856 switch (fcport->disc_state) {
3857 case DSC_DELETED:
3858 set_bit(RELOGIN_NEEDED,
3859 &vha->dpc_flags);
3860 break;
3861 case DSC_DELETE_PEND:
3862 break;
3863 default:
3864 qlt_schedule_sess_for_deletion(fcport);
3865 break;
3867 } else {
3868 id.b24 = 0;
3869 if (wwn_to_u64(vha->port_name) >
3870 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3871 vha->d_id.b24 = 0;
3872 vha->d_id.b.al_pa = 1;
3873 ha->flags.n2n_bigger = 1;
3874 ha->flags.n2n_ae = 0;
3876 id.b.al_pa = 2;
3877 ql_dbg(ql_dbg_async, vha, 0x5075,
3878 "Format 1: assign local id %x remote id %x\n",
3879 vha->d_id.b24, id.b24);
3880 } else {
3881 ql_dbg(ql_dbg_async, vha, 0x5075,
3882 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3883 rptid_entry->u.f1.port_name);
3884 ha->flags.n2n_bigger = 0;
3885 ha->flags.n2n_ae = 1;
3887 qla24xx_post_newsess_work(vha, &id,
3888 rptid_entry->u.f1.port_name,
3889 rptid_entry->u.f1.node_name,
3890 NULL,
3891 FC4_TYPE_UNKNOWN);
3894 /* if our portname is higher then initiate N2N login */
3896 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3897 return;
3898 break;
3899 case TOPO_FL:
3900 ha->current_topology = ISP_CFG_FL;
3901 break;
3902 case TOPO_F:
3903 ha->current_topology = ISP_CFG_F;
3904 break;
3905 default:
3906 break;
3909 ha->flags.gpsc_supported = 1;
3910 ha->current_topology = ISP_CFG_F;
3911 /* buffer to buffer credit flag */
3912 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3914 if (rptid_entry->vp_idx == 0) {
3915 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3916 /* FA-WWN is only for physical port */
3917 if (qla_ini_mode_enabled(vha) &&
3918 ha->flags.fawwpn_enabled &&
3919 (rptid_entry->u.f1.flags &
3920 BIT_6)) {
3921 memcpy(vha->port_name,
3922 rptid_entry->u.f1.port_name,
3923 WWN_SIZE);
3926 qlt_update_host_map(vha, id);
3929 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3930 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3931 } else {
3932 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3933 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3934 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3935 "Could not acquire ID for VP[%d].\n",
3936 rptid_entry->vp_idx);
3937 return;
3940 found = 0;
3941 spin_lock_irqsave(&ha->vport_slock, flags);
3942 list_for_each_entry(vp, &ha->vp_list, list) {
3943 if (rptid_entry->vp_idx == vp->vp_idx) {
3944 found = 1;
3945 break;
3948 spin_unlock_irqrestore(&ha->vport_slock, flags);
3950 if (!found)
3951 return;
3953 qlt_update_host_map(vp, id);
3956 * Cannot configure here as we are still sitting on the
3957 * response queue. Handle it in dpc context.
3959 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3960 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3961 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3963 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3964 qla2xxx_wake_dpc(vha);
3965 } else if (rptid_entry->format == 2) {
3966 ql_dbg(ql_dbg_async, vha, 0x505f,
3967 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3968 rptid_entry->port_id[2], rptid_entry->port_id[1],
3969 rptid_entry->port_id[0]);
3971 ql_dbg(ql_dbg_async, vha, 0x5075,
3972 "N2N: Remote WWPN %8phC.\n",
3973 rptid_entry->u.f2.port_name);
3975 /* N2N. direct connect */
3976 ha->current_topology = ISP_CFG_N;
3977 ha->flags.rida_fmt2 = 1;
3978 vha->d_id.b.domain = rptid_entry->port_id[2];
3979 vha->d_id.b.area = rptid_entry->port_id[1];
3980 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3982 ha->flags.n2n_ae = 1;
3983 spin_lock_irqsave(&ha->vport_slock, flags);
3984 qlt_update_vp_map(vha, SET_AL_PA);
3985 spin_unlock_irqrestore(&ha->vport_slock, flags);
3987 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3988 fcport->scan_state = QLA_FCPORT_SCAN;
3991 fcport = qla2x00_find_fcport_by_wwpn(vha,
3992 rptid_entry->u.f2.port_name, 1);
3994 if (fcport) {
3995 fcport->login_retry = vha->hw->login_retry_count;
3996 fcport->plogi_nack_done_deadline = jiffies + HZ;
3997 fcport->scan_state = QLA_FCPORT_FOUND;
4003 * qla24xx_modify_vp_config
4004 * Change VP configuration for vha
4006 * Input:
4007 * vha = adapter block pointer.
4009 * Returns:
4010 * qla2xxx local function return status code.
4012 * Context:
4013 * Kernel context.
4016 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4018 int rval;
4019 struct vp_config_entry_24xx *vpmod;
4020 dma_addr_t vpmod_dma;
4021 struct qla_hw_data *ha = vha->hw;
4022 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4024 /* This can be called by the parent */
4026 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4027 "Entered %s.\n", __func__);
4029 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4030 if (!vpmod) {
4031 ql_log(ql_log_warn, vha, 0x10bc,
4032 "Failed to allocate modify VP IOCB.\n");
4033 return QLA_MEMORY_ALLOC_FAILED;
4036 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4037 vpmod->entry_count = 1;
4038 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4039 vpmod->vp_count = 1;
4040 vpmod->vp_index1 = vha->vp_idx;
4041 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4043 qlt_modify_vp_config(vha, vpmod);
4045 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4046 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4047 vpmod->entry_count = 1;
4049 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4050 if (rval != QLA_SUCCESS) {
4051 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4052 "Failed to issue VP config IOCB (%x).\n", rval);
4053 } else if (vpmod->comp_status != 0) {
4054 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4055 "Failed to complete IOCB -- error status (%x).\n",
4056 vpmod->comp_status);
4057 rval = QLA_FUNCTION_FAILED;
4058 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4059 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4060 "Failed to complete IOCB -- completion status (%x).\n",
4061 le16_to_cpu(vpmod->comp_status));
4062 rval = QLA_FUNCTION_FAILED;
4063 } else {
4064 /* EMPTY */
4065 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4066 "Done %s.\n", __func__);
4067 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4069 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4071 return rval;
4075 * qla2x00_send_change_request
4076 * Receive or disable RSCN request from fabric controller
4078 * Input:
4079 * ha = adapter block pointer
4080 * format = registration format:
4081 * 0 - Reserved
4082 * 1 - Fabric detected registration
4083 * 2 - N_port detected registration
4084 * 3 - Full registration
4085 * FF - clear registration
4086 * vp_idx = Virtual port index
4088 * Returns:
4089 * qla2x00 local function return status code.
4091 * Context:
4092 * Kernel Context
4096 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4097 uint16_t vp_idx)
4099 int rval;
4100 mbx_cmd_t mc;
4101 mbx_cmd_t *mcp = &mc;
4103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4104 "Entered %s.\n", __func__);
4106 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4107 mcp->mb[1] = format;
4108 mcp->mb[9] = vp_idx;
4109 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4110 mcp->in_mb = MBX_0|MBX_1;
4111 mcp->tov = MBX_TOV_SECONDS;
4112 mcp->flags = 0;
4113 rval = qla2x00_mailbox_command(vha, mcp);
4115 if (rval == QLA_SUCCESS) {
4116 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4117 rval = BIT_1;
4119 } else
4120 rval = BIT_1;
4122 return rval;
4126 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4127 uint32_t size)
4129 int rval;
4130 mbx_cmd_t mc;
4131 mbx_cmd_t *mcp = &mc;
4133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4134 "Entered %s.\n", __func__);
4136 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4137 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4138 mcp->mb[8] = MSW(addr);
4139 mcp->out_mb = MBX_8|MBX_0;
4140 } else {
4141 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4142 mcp->out_mb = MBX_0;
4144 mcp->mb[1] = LSW(addr);
4145 mcp->mb[2] = MSW(req_dma);
4146 mcp->mb[3] = LSW(req_dma);
4147 mcp->mb[6] = MSW(MSD(req_dma));
4148 mcp->mb[7] = LSW(MSD(req_dma));
4149 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4150 if (IS_FWI2_CAPABLE(vha->hw)) {
4151 mcp->mb[4] = MSW(size);
4152 mcp->mb[5] = LSW(size);
4153 mcp->out_mb |= MBX_5|MBX_4;
4154 } else {
4155 mcp->mb[4] = LSW(size);
4156 mcp->out_mb |= MBX_4;
4159 mcp->in_mb = MBX_0;
4160 mcp->tov = MBX_TOV_SECONDS;
4161 mcp->flags = 0;
4162 rval = qla2x00_mailbox_command(vha, mcp);
4164 if (rval != QLA_SUCCESS) {
4165 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4166 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4167 } else {
4168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4169 "Done %s.\n", __func__);
4172 return rval;
4174 /* 84XX Support **************************************************************/
4176 struct cs84xx_mgmt_cmd {
4177 union {
4178 struct verify_chip_entry_84xx req;
4179 struct verify_chip_rsp_84xx rsp;
4180 } p;
4184 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4186 int rval, retry;
4187 struct cs84xx_mgmt_cmd *mn;
4188 dma_addr_t mn_dma;
4189 uint16_t options;
4190 unsigned long flags;
4191 struct qla_hw_data *ha = vha->hw;
4193 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4194 "Entered %s.\n", __func__);
4196 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4197 if (mn == NULL) {
4198 return QLA_MEMORY_ALLOC_FAILED;
4201 /* Force Update? */
4202 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4203 /* Diagnostic firmware? */
4204 /* options |= MENLO_DIAG_FW; */
4205 /* We update the firmware with only one data sequence. */
4206 options |= VCO_END_OF_DATA;
4208 do {
4209 retry = 0;
4210 memset(mn, 0, sizeof(*mn));
4211 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4212 mn->p.req.entry_count = 1;
4213 mn->p.req.options = cpu_to_le16(options);
4215 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4216 "Dump of Verify Request.\n");
4217 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4218 (uint8_t *)mn, sizeof(*mn));
4220 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4221 if (rval != QLA_SUCCESS) {
4222 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4223 "Failed to issue verify IOCB (%x).\n", rval);
4224 goto verify_done;
4227 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4228 "Dump of Verify Response.\n");
4229 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4230 (uint8_t *)mn, sizeof(*mn));
4232 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4233 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4234 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4236 "cs=%x fc=%x.\n", status[0], status[1]);
4238 if (status[0] != CS_COMPLETE) {
4239 rval = QLA_FUNCTION_FAILED;
4240 if (!(options & VCO_DONT_UPDATE_FW)) {
4241 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4242 "Firmware update failed. Retrying "
4243 "without update firmware.\n");
4244 options |= VCO_DONT_UPDATE_FW;
4245 options &= ~VCO_FORCE_UPDATE;
4246 retry = 1;
4248 } else {
4249 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4250 "Firmware updated to %x.\n",
4251 le32_to_cpu(mn->p.rsp.fw_ver));
4253 /* NOTE: we only update OP firmware. */
4254 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4255 ha->cs84xx->op_fw_version =
4256 le32_to_cpu(mn->p.rsp.fw_ver);
4257 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4258 flags);
4260 } while (retry);
4262 verify_done:
4263 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4265 if (rval != QLA_SUCCESS) {
4266 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4267 "Failed=%x.\n", rval);
4268 } else {
4269 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4270 "Done %s.\n", __func__);
4273 return rval;
4277 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4279 int rval;
4280 unsigned long flags;
4281 mbx_cmd_t mc;
4282 mbx_cmd_t *mcp = &mc;
4283 struct qla_hw_data *ha = vha->hw;
4285 if (!ha->flags.fw_started)
4286 return QLA_SUCCESS;
4288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4289 "Entered %s.\n", __func__);
4291 if (IS_SHADOW_REG_CAPABLE(ha))
4292 req->options |= BIT_13;
4294 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4295 mcp->mb[1] = req->options;
4296 mcp->mb[2] = MSW(LSD(req->dma));
4297 mcp->mb[3] = LSW(LSD(req->dma));
4298 mcp->mb[6] = MSW(MSD(req->dma));
4299 mcp->mb[7] = LSW(MSD(req->dma));
4300 mcp->mb[5] = req->length;
4301 if (req->rsp)
4302 mcp->mb[10] = req->rsp->id;
4303 mcp->mb[12] = req->qos;
4304 mcp->mb[11] = req->vp_idx;
4305 mcp->mb[13] = req->rid;
4306 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4307 mcp->mb[15] = 0;
4309 mcp->mb[4] = req->id;
4310 /* que in ptr index */
4311 mcp->mb[8] = 0;
4312 /* que out ptr index */
4313 mcp->mb[9] = *req->out_ptr = 0;
4314 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4315 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4316 mcp->in_mb = MBX_0;
4317 mcp->flags = MBX_DMA_OUT;
4318 mcp->tov = MBX_TOV_SECONDS * 2;
4320 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4321 mcp->in_mb |= MBX_1;
4322 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4323 mcp->out_mb |= MBX_15;
4324 /* debug q create issue in SR-IOV */
4325 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4328 spin_lock_irqsave(&ha->hardware_lock, flags);
4329 if (!(req->options & BIT_0)) {
4330 WRT_REG_DWORD(req->req_q_in, 0);
4331 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4332 WRT_REG_DWORD(req->req_q_out, 0);
4334 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4336 rval = qla2x00_mailbox_command(vha, mcp);
4337 if (rval != QLA_SUCCESS) {
4338 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4339 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4340 } else {
4341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4342 "Done %s.\n", __func__);
4345 return rval;
4349 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4351 int rval;
4352 unsigned long flags;
4353 mbx_cmd_t mc;
4354 mbx_cmd_t *mcp = &mc;
4355 struct qla_hw_data *ha = vha->hw;
4357 if (!ha->flags.fw_started)
4358 return QLA_SUCCESS;
4360 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4361 "Entered %s.\n", __func__);
4363 if (IS_SHADOW_REG_CAPABLE(ha))
4364 rsp->options |= BIT_13;
4366 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4367 mcp->mb[1] = rsp->options;
4368 mcp->mb[2] = MSW(LSD(rsp->dma));
4369 mcp->mb[3] = LSW(LSD(rsp->dma));
4370 mcp->mb[6] = MSW(MSD(rsp->dma));
4371 mcp->mb[7] = LSW(MSD(rsp->dma));
4372 mcp->mb[5] = rsp->length;
4373 mcp->mb[14] = rsp->msix->entry;
4374 mcp->mb[13] = rsp->rid;
4375 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4376 mcp->mb[15] = 0;
4378 mcp->mb[4] = rsp->id;
4379 /* que in ptr index */
4380 mcp->mb[8] = *rsp->in_ptr = 0;
4381 /* que out ptr index */
4382 mcp->mb[9] = 0;
4383 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4384 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4385 mcp->in_mb = MBX_0;
4386 mcp->flags = MBX_DMA_OUT;
4387 mcp->tov = MBX_TOV_SECONDS * 2;
4389 if (IS_QLA81XX(ha)) {
4390 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4391 mcp->in_mb |= MBX_1;
4392 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4393 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4394 mcp->in_mb |= MBX_1;
4395 /* debug q create issue in SR-IOV */
4396 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4399 spin_lock_irqsave(&ha->hardware_lock, flags);
4400 if (!(rsp->options & BIT_0)) {
4401 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4402 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4403 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4406 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4408 rval = qla2x00_mailbox_command(vha, mcp);
4409 if (rval != QLA_SUCCESS) {
4410 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4411 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4412 } else {
4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4414 "Done %s.\n", __func__);
4417 return rval;
4421 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4423 int rval;
4424 mbx_cmd_t mc;
4425 mbx_cmd_t *mcp = &mc;
4427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4428 "Entered %s.\n", __func__);
4430 mcp->mb[0] = MBC_IDC_ACK;
4431 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4432 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4433 mcp->in_mb = MBX_0;
4434 mcp->tov = MBX_TOV_SECONDS;
4435 mcp->flags = 0;
4436 rval = qla2x00_mailbox_command(vha, mcp);
4438 if (rval != QLA_SUCCESS) {
4439 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4440 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4441 } else {
4442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4443 "Done %s.\n", __func__);
4446 return rval;
4450 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4452 int rval;
4453 mbx_cmd_t mc;
4454 mbx_cmd_t *mcp = &mc;
4456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4457 "Entered %s.\n", __func__);
4459 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4460 !IS_QLA27XX(vha->hw))
4461 return QLA_FUNCTION_FAILED;
4463 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4464 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4465 mcp->out_mb = MBX_1|MBX_0;
4466 mcp->in_mb = MBX_1|MBX_0;
4467 mcp->tov = MBX_TOV_SECONDS;
4468 mcp->flags = 0;
4469 rval = qla2x00_mailbox_command(vha, mcp);
4471 if (rval != QLA_SUCCESS) {
4472 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4473 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4474 rval, mcp->mb[0], mcp->mb[1]);
4475 } else {
4476 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4477 "Done %s.\n", __func__);
4478 *sector_size = mcp->mb[1];
4481 return rval;
4485 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4487 int rval;
4488 mbx_cmd_t mc;
4489 mbx_cmd_t *mcp = &mc;
4491 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4492 !IS_QLA27XX(vha->hw))
4493 return QLA_FUNCTION_FAILED;
4495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4496 "Entered %s.\n", __func__);
4498 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4499 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4500 FAC_OPT_CMD_WRITE_PROTECT;
4501 mcp->out_mb = MBX_1|MBX_0;
4502 mcp->in_mb = MBX_1|MBX_0;
4503 mcp->tov = MBX_TOV_SECONDS;
4504 mcp->flags = 0;
4505 rval = qla2x00_mailbox_command(vha, mcp);
4507 if (rval != QLA_SUCCESS) {
4508 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4509 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4510 rval, mcp->mb[0], mcp->mb[1]);
4511 } else {
4512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4513 "Done %s.\n", __func__);
4516 return rval;
4520 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4522 int rval;
4523 mbx_cmd_t mc;
4524 mbx_cmd_t *mcp = &mc;
4526 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4527 !IS_QLA27XX(vha->hw))
4528 return QLA_FUNCTION_FAILED;
4530 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4531 "Entered %s.\n", __func__);
4533 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4534 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4535 mcp->mb[2] = LSW(start);
4536 mcp->mb[3] = MSW(start);
4537 mcp->mb[4] = LSW(finish);
4538 mcp->mb[5] = MSW(finish);
4539 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4540 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4541 mcp->tov = MBX_TOV_SECONDS;
4542 mcp->flags = 0;
4543 rval = qla2x00_mailbox_command(vha, mcp);
4545 if (rval != QLA_SUCCESS) {
4546 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4547 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4548 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4549 } else {
4550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4551 "Done %s.\n", __func__);
4554 return rval;
4558 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4560 int rval = 0;
4561 mbx_cmd_t mc;
4562 mbx_cmd_t *mcp = &mc;
4564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4565 "Entered %s.\n", __func__);
4567 mcp->mb[0] = MBC_RESTART_MPI_FW;
4568 mcp->out_mb = MBX_0;
4569 mcp->in_mb = MBX_0|MBX_1;
4570 mcp->tov = MBX_TOV_SECONDS;
4571 mcp->flags = 0;
4572 rval = qla2x00_mailbox_command(vha, mcp);
4574 if (rval != QLA_SUCCESS) {
4575 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4576 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4577 rval, mcp->mb[0], mcp->mb[1]);
4578 } else {
4579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4580 "Done %s.\n", __func__);
4583 return rval;
4587 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4589 int rval;
4590 mbx_cmd_t mc;
4591 mbx_cmd_t *mcp = &mc;
4592 int i;
4593 int len;
4594 uint16_t *str;
4595 struct qla_hw_data *ha = vha->hw;
4597 if (!IS_P3P_TYPE(ha))
4598 return QLA_FUNCTION_FAILED;
4600 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4601 "Entered %s.\n", __func__);
4603 str = (void *)version;
4604 len = strlen(version);
4606 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4607 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4608 mcp->out_mb = MBX_1|MBX_0;
4609 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4610 mcp->mb[i] = cpu_to_le16p(str);
4611 mcp->out_mb |= 1<<i;
4613 for (; i < 16; i++) {
4614 mcp->mb[i] = 0;
4615 mcp->out_mb |= 1<<i;
4617 mcp->in_mb = MBX_1|MBX_0;
4618 mcp->tov = MBX_TOV_SECONDS;
4619 mcp->flags = 0;
4620 rval = qla2x00_mailbox_command(vha, mcp);
4622 if (rval != QLA_SUCCESS) {
4623 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4624 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4625 } else {
4626 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4627 "Done %s.\n", __func__);
4630 return rval;
4634 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4636 int rval;
4637 mbx_cmd_t mc;
4638 mbx_cmd_t *mcp = &mc;
4639 int len;
4640 uint16_t dwlen;
4641 uint8_t *str;
4642 dma_addr_t str_dma;
4643 struct qla_hw_data *ha = vha->hw;
4645 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4646 IS_P3P_TYPE(ha))
4647 return QLA_FUNCTION_FAILED;
4649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4650 "Entered %s.\n", __func__);
4652 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4653 if (!str) {
4654 ql_log(ql_log_warn, vha, 0x117f,
4655 "Failed to allocate driver version param.\n");
4656 return QLA_MEMORY_ALLOC_FAILED;
4659 memcpy(str, "\x7\x3\x11\x0", 4);
4660 dwlen = str[0];
4661 len = dwlen * 4 - 4;
4662 memset(str + 4, 0, len);
4663 if (len > strlen(version))
4664 len = strlen(version);
4665 memcpy(str + 4, version, len);
4667 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4668 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4669 mcp->mb[2] = MSW(LSD(str_dma));
4670 mcp->mb[3] = LSW(LSD(str_dma));
4671 mcp->mb[6] = MSW(MSD(str_dma));
4672 mcp->mb[7] = LSW(MSD(str_dma));
4673 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4674 mcp->in_mb = MBX_1|MBX_0;
4675 mcp->tov = MBX_TOV_SECONDS;
4676 mcp->flags = 0;
4677 rval = qla2x00_mailbox_command(vha, mcp);
4679 if (rval != QLA_SUCCESS) {
4680 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4681 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4682 } else {
4683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4684 "Done %s.\n", __func__);
4687 dma_pool_free(ha->s_dma_pool, str, str_dma);
4689 return rval;
4693 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4694 void *buf, uint16_t bufsiz)
4696 int rval, i;
4697 mbx_cmd_t mc;
4698 mbx_cmd_t *mcp = &mc;
4699 uint32_t *bp;
4701 if (!IS_FWI2_CAPABLE(vha->hw))
4702 return QLA_FUNCTION_FAILED;
4704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4705 "Entered %s.\n", __func__);
4707 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4708 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4709 mcp->mb[2] = MSW(buf_dma);
4710 mcp->mb[3] = LSW(buf_dma);
4711 mcp->mb[6] = MSW(MSD(buf_dma));
4712 mcp->mb[7] = LSW(MSD(buf_dma));
4713 mcp->mb[8] = bufsiz/4;
4714 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4715 mcp->in_mb = MBX_1|MBX_0;
4716 mcp->tov = MBX_TOV_SECONDS;
4717 mcp->flags = 0;
4718 rval = qla2x00_mailbox_command(vha, mcp);
4720 if (rval != QLA_SUCCESS) {
4721 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4722 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4723 } else {
4724 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4725 "Done %s.\n", __func__);
4726 bp = (uint32_t *) buf;
4727 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4728 *bp = le32_to_cpu(*bp);
4731 return rval;
4734 static int
4735 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4737 int rval;
4738 mbx_cmd_t mc;
4739 mbx_cmd_t *mcp = &mc;
4741 if (!IS_FWI2_CAPABLE(vha->hw))
4742 return QLA_FUNCTION_FAILED;
4744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4745 "Entered %s.\n", __func__);
4747 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4748 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4749 mcp->out_mb = MBX_1|MBX_0;
4750 mcp->in_mb = MBX_1|MBX_0;
4751 mcp->tov = MBX_TOV_SECONDS;
4752 mcp->flags = 0;
4753 rval = qla2x00_mailbox_command(vha, mcp);
4754 *temp = mcp->mb[1];
4756 if (rval != QLA_SUCCESS) {
4757 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4758 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4759 } else {
4760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4761 "Done %s.\n", __func__);
4764 return rval;
4768 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4769 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4771 int rval;
4772 mbx_cmd_t mc;
4773 mbx_cmd_t *mcp = &mc;
4774 struct qla_hw_data *ha = vha->hw;
4776 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4777 "Entered %s.\n", __func__);
4779 if (!IS_FWI2_CAPABLE(ha))
4780 return QLA_FUNCTION_FAILED;
4782 if (len == 1)
4783 opt |= BIT_0;
4785 mcp->mb[0] = MBC_READ_SFP;
4786 mcp->mb[1] = dev;
4787 mcp->mb[2] = MSW(sfp_dma);
4788 mcp->mb[3] = LSW(sfp_dma);
4789 mcp->mb[6] = MSW(MSD(sfp_dma));
4790 mcp->mb[7] = LSW(MSD(sfp_dma));
4791 mcp->mb[8] = len;
4792 mcp->mb[9] = off;
4793 mcp->mb[10] = opt;
4794 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4795 mcp->in_mb = MBX_1|MBX_0;
4796 mcp->tov = MBX_TOV_SECONDS;
4797 mcp->flags = 0;
4798 rval = qla2x00_mailbox_command(vha, mcp);
4800 if (opt & BIT_0)
4801 *sfp = mcp->mb[1];
4803 if (rval != QLA_SUCCESS) {
4804 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4805 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4806 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4807 mcp->mb[1] == 0x22)
4808 /* sfp is not there */
4809 rval = QLA_INTERFACE_ERROR;
4810 } else {
4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4812 "Done %s.\n", __func__);
4815 return rval;
4819 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4820 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4822 int rval;
4823 mbx_cmd_t mc;
4824 mbx_cmd_t *mcp = &mc;
4825 struct qla_hw_data *ha = vha->hw;
4827 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4828 "Entered %s.\n", __func__);
4830 if (!IS_FWI2_CAPABLE(ha))
4831 return QLA_FUNCTION_FAILED;
4833 if (len == 1)
4834 opt |= BIT_0;
4836 if (opt & BIT_0)
4837 len = *sfp;
4839 mcp->mb[0] = MBC_WRITE_SFP;
4840 mcp->mb[1] = dev;
4841 mcp->mb[2] = MSW(sfp_dma);
4842 mcp->mb[3] = LSW(sfp_dma);
4843 mcp->mb[6] = MSW(MSD(sfp_dma));
4844 mcp->mb[7] = LSW(MSD(sfp_dma));
4845 mcp->mb[8] = len;
4846 mcp->mb[9] = off;
4847 mcp->mb[10] = opt;
4848 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4849 mcp->in_mb = MBX_1|MBX_0;
4850 mcp->tov = MBX_TOV_SECONDS;
4851 mcp->flags = 0;
4852 rval = qla2x00_mailbox_command(vha, mcp);
4854 if (rval != QLA_SUCCESS) {
4855 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4856 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4857 } else {
4858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4859 "Done %s.\n", __func__);
4862 return rval;
4866 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4867 uint16_t size_in_bytes, uint16_t *actual_size)
4869 int rval;
4870 mbx_cmd_t mc;
4871 mbx_cmd_t *mcp = &mc;
4873 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4874 "Entered %s.\n", __func__);
4876 if (!IS_CNA_CAPABLE(vha->hw))
4877 return QLA_FUNCTION_FAILED;
4879 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4880 mcp->mb[2] = MSW(stats_dma);
4881 mcp->mb[3] = LSW(stats_dma);
4882 mcp->mb[6] = MSW(MSD(stats_dma));
4883 mcp->mb[7] = LSW(MSD(stats_dma));
4884 mcp->mb[8] = size_in_bytes >> 2;
4885 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4886 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4887 mcp->tov = MBX_TOV_SECONDS;
4888 mcp->flags = 0;
4889 rval = qla2x00_mailbox_command(vha, mcp);
4891 if (rval != QLA_SUCCESS) {
4892 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4893 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4894 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4895 } else {
4896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4897 "Done %s.\n", __func__);
4900 *actual_size = mcp->mb[2] << 2;
4903 return rval;
4907 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4908 uint16_t size)
4910 int rval;
4911 mbx_cmd_t mc;
4912 mbx_cmd_t *mcp = &mc;
4914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4915 "Entered %s.\n", __func__);
4917 if (!IS_CNA_CAPABLE(vha->hw))
4918 return QLA_FUNCTION_FAILED;
4920 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4921 mcp->mb[1] = 0;
4922 mcp->mb[2] = MSW(tlv_dma);
4923 mcp->mb[3] = LSW(tlv_dma);
4924 mcp->mb[6] = MSW(MSD(tlv_dma));
4925 mcp->mb[7] = LSW(MSD(tlv_dma));
4926 mcp->mb[8] = size;
4927 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4928 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4929 mcp->tov = MBX_TOV_SECONDS;
4930 mcp->flags = 0;
4931 rval = qla2x00_mailbox_command(vha, mcp);
4933 if (rval != QLA_SUCCESS) {
4934 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4935 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4936 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4937 } else {
4938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4939 "Done %s.\n", __func__);
4942 return rval;
4946 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4948 int rval;
4949 mbx_cmd_t mc;
4950 mbx_cmd_t *mcp = &mc;
4952 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4953 "Entered %s.\n", __func__);
4955 if (!IS_FWI2_CAPABLE(vha->hw))
4956 return QLA_FUNCTION_FAILED;
4958 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4959 mcp->mb[1] = LSW(risc_addr);
4960 mcp->mb[8] = MSW(risc_addr);
4961 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4962 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4963 mcp->tov = 30;
4964 mcp->flags = 0;
4965 rval = qla2x00_mailbox_command(vha, mcp);
4966 if (rval != QLA_SUCCESS) {
4967 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4968 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4969 } else {
4970 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4971 "Done %s.\n", __func__);
4972 *data = mcp->mb[3] << 16 | mcp->mb[2];
4975 return rval;
4979 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4980 uint16_t *mresp)
4982 int rval;
4983 mbx_cmd_t mc;
4984 mbx_cmd_t *mcp = &mc;
4986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4987 "Entered %s.\n", __func__);
4989 memset(mcp->mb, 0 , sizeof(mcp->mb));
4990 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4991 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4993 /* transfer count */
4994 mcp->mb[10] = LSW(mreq->transfer_size);
4995 mcp->mb[11] = MSW(mreq->transfer_size);
4997 /* send data address */
4998 mcp->mb[14] = LSW(mreq->send_dma);
4999 mcp->mb[15] = MSW(mreq->send_dma);
5000 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5001 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5003 /* receive data address */
5004 mcp->mb[16] = LSW(mreq->rcv_dma);
5005 mcp->mb[17] = MSW(mreq->rcv_dma);
5006 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5007 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5009 /* Iteration count */
5010 mcp->mb[18] = LSW(mreq->iteration_count);
5011 mcp->mb[19] = MSW(mreq->iteration_count);
5013 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5014 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5015 if (IS_CNA_CAPABLE(vha->hw))
5016 mcp->out_mb |= MBX_2;
5017 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5019 mcp->buf_size = mreq->transfer_size;
5020 mcp->tov = MBX_TOV_SECONDS;
5021 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5023 rval = qla2x00_mailbox_command(vha, mcp);
5025 if (rval != QLA_SUCCESS) {
5026 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5027 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5028 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5029 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5030 } else {
5031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5032 "Done %s.\n", __func__);
5035 /* Copy mailbox information */
5036 memcpy( mresp, mcp->mb, 64);
5037 return rval;
5041 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5042 uint16_t *mresp)
5044 int rval;
5045 mbx_cmd_t mc;
5046 mbx_cmd_t *mcp = &mc;
5047 struct qla_hw_data *ha = vha->hw;
5049 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5050 "Entered %s.\n", __func__);
5052 memset(mcp->mb, 0 , sizeof(mcp->mb));
5053 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5054 /* BIT_6 specifies 64bit address */
5055 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5056 if (IS_CNA_CAPABLE(ha)) {
5057 mcp->mb[2] = vha->fcoe_fcf_idx;
5059 mcp->mb[16] = LSW(mreq->rcv_dma);
5060 mcp->mb[17] = MSW(mreq->rcv_dma);
5061 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5062 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5064 mcp->mb[10] = LSW(mreq->transfer_size);
5066 mcp->mb[14] = LSW(mreq->send_dma);
5067 mcp->mb[15] = MSW(mreq->send_dma);
5068 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5069 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5071 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5072 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5073 if (IS_CNA_CAPABLE(ha))
5074 mcp->out_mb |= MBX_2;
5076 mcp->in_mb = MBX_0;
5077 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5078 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5079 mcp->in_mb |= MBX_1;
5080 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5081 mcp->in_mb |= MBX_3;
5083 mcp->tov = MBX_TOV_SECONDS;
5084 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5085 mcp->buf_size = mreq->transfer_size;
5087 rval = qla2x00_mailbox_command(vha, mcp);
5089 if (rval != QLA_SUCCESS) {
5090 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5091 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5092 rval, mcp->mb[0], mcp->mb[1]);
5093 } else {
5094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5095 "Done %s.\n", __func__);
5098 /* Copy mailbox information */
5099 memcpy(mresp, mcp->mb, 64);
5100 return rval;
5104 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5106 int rval;
5107 mbx_cmd_t mc;
5108 mbx_cmd_t *mcp = &mc;
5110 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5111 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5113 mcp->mb[0] = MBC_ISP84XX_RESET;
5114 mcp->mb[1] = enable_diagnostic;
5115 mcp->out_mb = MBX_1|MBX_0;
5116 mcp->in_mb = MBX_1|MBX_0;
5117 mcp->tov = MBX_TOV_SECONDS;
5118 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5119 rval = qla2x00_mailbox_command(vha, mcp);
5121 if (rval != QLA_SUCCESS)
5122 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5123 else
5124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5125 "Done %s.\n", __func__);
5127 return rval;
5131 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5133 int rval;
5134 mbx_cmd_t mc;
5135 mbx_cmd_t *mcp = &mc;
5137 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5138 "Entered %s.\n", __func__);
5140 if (!IS_FWI2_CAPABLE(vha->hw))
5141 return QLA_FUNCTION_FAILED;
5143 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5144 mcp->mb[1] = LSW(risc_addr);
5145 mcp->mb[2] = LSW(data);
5146 mcp->mb[3] = MSW(data);
5147 mcp->mb[8] = MSW(risc_addr);
5148 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5149 mcp->in_mb = MBX_0;
5150 mcp->tov = 30;
5151 mcp->flags = 0;
5152 rval = qla2x00_mailbox_command(vha, mcp);
5153 if (rval != QLA_SUCCESS) {
5154 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5155 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5156 } else {
5157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5158 "Done %s.\n", __func__);
5161 return rval;
5165 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5167 int rval;
5168 uint32_t stat, timer;
5169 uint16_t mb0 = 0;
5170 struct qla_hw_data *ha = vha->hw;
5171 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5173 rval = QLA_SUCCESS;
5175 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5176 "Entered %s.\n", __func__);
5178 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5180 /* Write the MBC data to the registers */
5181 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5182 WRT_REG_WORD(&reg->mailbox1, mb[0]);
5183 WRT_REG_WORD(&reg->mailbox2, mb[1]);
5184 WRT_REG_WORD(&reg->mailbox3, mb[2]);
5185 WRT_REG_WORD(&reg->mailbox4, mb[3]);
5187 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5189 /* Poll for MBC interrupt */
5190 for (timer = 6000000; timer; timer--) {
5191 /* Check for pending interrupts. */
5192 stat = RD_REG_DWORD(&reg->host_status);
5193 if (stat & HSRX_RISC_INT) {
5194 stat &= 0xff;
5196 if (stat == 0x1 || stat == 0x2 ||
5197 stat == 0x10 || stat == 0x11) {
5198 set_bit(MBX_INTERRUPT,
5199 &ha->mbx_cmd_flags);
5200 mb0 = RD_REG_WORD(&reg->mailbox0);
5201 WRT_REG_DWORD(&reg->hccr,
5202 HCCRX_CLR_RISC_INT);
5203 RD_REG_DWORD(&reg->hccr);
5204 break;
5207 udelay(5);
5210 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5211 rval = mb0 & MBS_MASK;
5212 else
5213 rval = QLA_FUNCTION_FAILED;
5215 if (rval != QLA_SUCCESS) {
5216 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5217 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5218 } else {
5219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5220 "Done %s.\n", __func__);
5223 return rval;
5227 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5229 int rval;
5230 mbx_cmd_t mc;
5231 mbx_cmd_t *mcp = &mc;
5232 struct qla_hw_data *ha = vha->hw;
5234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5235 "Entered %s.\n", __func__);
5237 if (!IS_FWI2_CAPABLE(ha))
5238 return QLA_FUNCTION_FAILED;
5240 mcp->mb[0] = MBC_DATA_RATE;
5241 mcp->mb[1] = 0;
5242 mcp->out_mb = MBX_1|MBX_0;
5243 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5244 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5245 mcp->in_mb |= MBX_3;
5246 mcp->tov = MBX_TOV_SECONDS;
5247 mcp->flags = 0;
5248 rval = qla2x00_mailbox_command(vha, mcp);
5249 if (rval != QLA_SUCCESS) {
5250 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5251 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5252 } else {
5253 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5254 "Done %s.\n", __func__);
5255 if (mcp->mb[1] != 0x7)
5256 ha->link_data_rate = mcp->mb[1];
5259 return rval;
5263 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5265 int rval;
5266 mbx_cmd_t mc;
5267 mbx_cmd_t *mcp = &mc;
5268 struct qla_hw_data *ha = vha->hw;
5270 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5271 "Entered %s.\n", __func__);
5273 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5274 !IS_QLA27XX(ha))
5275 return QLA_FUNCTION_FAILED;
5276 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5277 mcp->out_mb = MBX_0;
5278 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5279 mcp->tov = MBX_TOV_SECONDS;
5280 mcp->flags = 0;
5282 rval = qla2x00_mailbox_command(vha, mcp);
5284 if (rval != QLA_SUCCESS) {
5285 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5286 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5287 } else {
5288 /* Copy all bits to preserve original value */
5289 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5292 "Done %s.\n", __func__);
5294 return rval;
5298 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5300 int rval;
5301 mbx_cmd_t mc;
5302 mbx_cmd_t *mcp = &mc;
5304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5305 "Entered %s.\n", __func__);
5307 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5308 /* Copy all bits to preserve original setting */
5309 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5310 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5311 mcp->in_mb = MBX_0;
5312 mcp->tov = MBX_TOV_SECONDS;
5313 mcp->flags = 0;
5314 rval = qla2x00_mailbox_command(vha, mcp);
5316 if (rval != QLA_SUCCESS) {
5317 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5318 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5319 } else
5320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5321 "Done %s.\n", __func__);
5323 return rval;
5328 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5329 uint16_t *mb)
5331 int rval;
5332 mbx_cmd_t mc;
5333 mbx_cmd_t *mcp = &mc;
5334 struct qla_hw_data *ha = vha->hw;
5336 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5337 "Entered %s.\n", __func__);
5339 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5340 return QLA_FUNCTION_FAILED;
5342 mcp->mb[0] = MBC_PORT_PARAMS;
5343 mcp->mb[1] = loop_id;
5344 if (ha->flags.fcp_prio_enabled)
5345 mcp->mb[2] = BIT_1;
5346 else
5347 mcp->mb[2] = BIT_2;
5348 mcp->mb[4] = priority & 0xf;
5349 mcp->mb[9] = vha->vp_idx;
5350 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5351 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5352 mcp->tov = 30;
5353 mcp->flags = 0;
5354 rval = qla2x00_mailbox_command(vha, mcp);
5355 if (mb != NULL) {
5356 mb[0] = mcp->mb[0];
5357 mb[1] = mcp->mb[1];
5358 mb[3] = mcp->mb[3];
5359 mb[4] = mcp->mb[4];
5362 if (rval != QLA_SUCCESS) {
5363 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5364 } else {
5365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5366 "Done %s.\n", __func__);
5369 return rval;
5373 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5375 int rval = QLA_FUNCTION_FAILED;
5376 struct qla_hw_data *ha = vha->hw;
5377 uint8_t byte;
5379 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5380 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5381 "Thermal not supported by this card.\n");
5382 return rval;
5385 if (IS_QLA25XX(ha)) {
5386 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5387 ha->pdev->subsystem_device == 0x0175) {
5388 rval = qla2x00_read_sfp(vha, 0, &byte,
5389 0x98, 0x1, 1, BIT_13|BIT_0);
5390 *temp = byte;
5391 return rval;
5393 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5394 ha->pdev->subsystem_device == 0x338e) {
5395 rval = qla2x00_read_sfp(vha, 0, &byte,
5396 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5397 *temp = byte;
5398 return rval;
5400 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5401 "Thermal not supported by this card.\n");
5402 return rval;
5405 if (IS_QLA82XX(ha)) {
5406 *temp = qla82xx_read_temperature(vha);
5407 rval = QLA_SUCCESS;
5408 return rval;
5409 } else if (IS_QLA8044(ha)) {
5410 *temp = qla8044_read_temperature(vha);
5411 rval = QLA_SUCCESS;
5412 return rval;
5415 rval = qla2x00_read_asic_temperature(vha, temp);
5416 return rval;
5420 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5422 int rval;
5423 struct qla_hw_data *ha = vha->hw;
5424 mbx_cmd_t mc;
5425 mbx_cmd_t *mcp = &mc;
5427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5428 "Entered %s.\n", __func__);
5430 if (!IS_FWI2_CAPABLE(ha))
5431 return QLA_FUNCTION_FAILED;
5433 memset(mcp, 0, sizeof(mbx_cmd_t));
5434 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5435 mcp->mb[1] = 1;
5437 mcp->out_mb = MBX_1|MBX_0;
5438 mcp->in_mb = MBX_0;
5439 mcp->tov = 30;
5440 mcp->flags = 0;
5442 rval = qla2x00_mailbox_command(vha, mcp);
5443 if (rval != QLA_SUCCESS) {
5444 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5445 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5446 } else {
5447 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5448 "Done %s.\n", __func__);
5451 return rval;
5455 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5457 int rval;
5458 struct qla_hw_data *ha = vha->hw;
5459 mbx_cmd_t mc;
5460 mbx_cmd_t *mcp = &mc;
5462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5463 "Entered %s.\n", __func__);
5465 if (!IS_P3P_TYPE(ha))
5466 return QLA_FUNCTION_FAILED;
5468 memset(mcp, 0, sizeof(mbx_cmd_t));
5469 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5470 mcp->mb[1] = 0;
5472 mcp->out_mb = MBX_1|MBX_0;
5473 mcp->in_mb = MBX_0;
5474 mcp->tov = 30;
5475 mcp->flags = 0;
5477 rval = qla2x00_mailbox_command(vha, mcp);
5478 if (rval != QLA_SUCCESS) {
5479 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5481 } else {
5482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5483 "Done %s.\n", __func__);
5486 return rval;
5490 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5492 struct qla_hw_data *ha = vha->hw;
5493 mbx_cmd_t mc;
5494 mbx_cmd_t *mcp = &mc;
5495 int rval = QLA_FUNCTION_FAILED;
5497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5498 "Entered %s.\n", __func__);
5500 memset(mcp->mb, 0 , sizeof(mcp->mb));
5501 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5502 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5503 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5504 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5506 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5507 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5508 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5510 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5511 mcp->tov = MBX_TOV_SECONDS;
5512 rval = qla2x00_mailbox_command(vha, mcp);
5514 /* Always copy back return mailbox values. */
5515 if (rval != QLA_SUCCESS) {
5516 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5517 "mailbox command FAILED=0x%x, subcode=%x.\n",
5518 (mcp->mb[1] << 16) | mcp->mb[0],
5519 (mcp->mb[3] << 16) | mcp->mb[2]);
5520 } else {
5521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5522 "Done %s.\n", __func__);
5523 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5524 if (!ha->md_template_size) {
5525 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5526 "Null template size obtained.\n");
5527 rval = QLA_FUNCTION_FAILED;
5530 return rval;
5534 qla82xx_md_get_template(scsi_qla_host_t *vha)
5536 struct qla_hw_data *ha = vha->hw;
5537 mbx_cmd_t mc;
5538 mbx_cmd_t *mcp = &mc;
5539 int rval = QLA_FUNCTION_FAILED;
5541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5542 "Entered %s.\n", __func__);
5544 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5545 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5546 if (!ha->md_tmplt_hdr) {
5547 ql_log(ql_log_warn, vha, 0x1124,
5548 "Unable to allocate memory for Minidump template.\n");
5549 return rval;
5552 memset(mcp->mb, 0 , sizeof(mcp->mb));
5553 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5554 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5555 mcp->mb[2] = LSW(RQST_TMPLT);
5556 mcp->mb[3] = MSW(RQST_TMPLT);
5557 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5558 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5559 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5560 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5561 mcp->mb[8] = LSW(ha->md_template_size);
5562 mcp->mb[9] = MSW(ha->md_template_size);
5564 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5565 mcp->tov = MBX_TOV_SECONDS;
5566 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5567 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5568 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5569 rval = qla2x00_mailbox_command(vha, mcp);
5571 if (rval != QLA_SUCCESS) {
5572 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5573 "mailbox command FAILED=0x%x, subcode=%x.\n",
5574 ((mcp->mb[1] << 16) | mcp->mb[0]),
5575 ((mcp->mb[3] << 16) | mcp->mb[2]));
5576 } else
5577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5578 "Done %s.\n", __func__);
5579 return rval;
5583 qla8044_md_get_template(scsi_qla_host_t *vha)
5585 struct qla_hw_data *ha = vha->hw;
5586 mbx_cmd_t mc;
5587 mbx_cmd_t *mcp = &mc;
5588 int rval = QLA_FUNCTION_FAILED;
5589 int offset = 0, size = MINIDUMP_SIZE_36K;
5590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5591 "Entered %s.\n", __func__);
5593 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5594 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5595 if (!ha->md_tmplt_hdr) {
5596 ql_log(ql_log_warn, vha, 0xb11b,
5597 "Unable to allocate memory for Minidump template.\n");
5598 return rval;
5601 memset(mcp->mb, 0 , sizeof(mcp->mb));
5602 while (offset < ha->md_template_size) {
5603 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5604 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5605 mcp->mb[2] = LSW(RQST_TMPLT);
5606 mcp->mb[3] = MSW(RQST_TMPLT);
5607 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5608 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5609 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5610 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5611 mcp->mb[8] = LSW(size);
5612 mcp->mb[9] = MSW(size);
5613 mcp->mb[10] = offset & 0x0000FFFF;
5614 mcp->mb[11] = offset & 0xFFFF0000;
5615 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5616 mcp->tov = MBX_TOV_SECONDS;
5617 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5618 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5619 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5620 rval = qla2x00_mailbox_command(vha, mcp);
5622 if (rval != QLA_SUCCESS) {
5623 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5624 "mailbox command FAILED=0x%x, subcode=%x.\n",
5625 ((mcp->mb[1] << 16) | mcp->mb[0]),
5626 ((mcp->mb[3] << 16) | mcp->mb[2]));
5627 return rval;
5628 } else
5629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5630 "Done %s.\n", __func__);
5631 offset = offset + size;
5633 return rval;
5637 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5639 int rval;
5640 struct qla_hw_data *ha = vha->hw;
5641 mbx_cmd_t mc;
5642 mbx_cmd_t *mcp = &mc;
5644 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5645 return QLA_FUNCTION_FAILED;
5647 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5648 "Entered %s.\n", __func__);
5650 memset(mcp, 0, sizeof(mbx_cmd_t));
5651 mcp->mb[0] = MBC_SET_LED_CONFIG;
5652 mcp->mb[1] = led_cfg[0];
5653 mcp->mb[2] = led_cfg[1];
5654 if (IS_QLA8031(ha)) {
5655 mcp->mb[3] = led_cfg[2];
5656 mcp->mb[4] = led_cfg[3];
5657 mcp->mb[5] = led_cfg[4];
5658 mcp->mb[6] = led_cfg[5];
5661 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5662 if (IS_QLA8031(ha))
5663 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5664 mcp->in_mb = MBX_0;
5665 mcp->tov = 30;
5666 mcp->flags = 0;
5668 rval = qla2x00_mailbox_command(vha, mcp);
5669 if (rval != QLA_SUCCESS) {
5670 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5671 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5672 } else {
5673 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5674 "Done %s.\n", __func__);
5677 return rval;
5681 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5683 int rval;
5684 struct qla_hw_data *ha = vha->hw;
5685 mbx_cmd_t mc;
5686 mbx_cmd_t *mcp = &mc;
5688 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5689 return QLA_FUNCTION_FAILED;
5691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5692 "Entered %s.\n", __func__);
5694 memset(mcp, 0, sizeof(mbx_cmd_t));
5695 mcp->mb[0] = MBC_GET_LED_CONFIG;
5697 mcp->out_mb = MBX_0;
5698 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5699 if (IS_QLA8031(ha))
5700 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5701 mcp->tov = 30;
5702 mcp->flags = 0;
5704 rval = qla2x00_mailbox_command(vha, mcp);
5705 if (rval != QLA_SUCCESS) {
5706 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5707 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5708 } else {
5709 led_cfg[0] = mcp->mb[1];
5710 led_cfg[1] = mcp->mb[2];
5711 if (IS_QLA8031(ha)) {
5712 led_cfg[2] = mcp->mb[3];
5713 led_cfg[3] = mcp->mb[4];
5714 led_cfg[4] = mcp->mb[5];
5715 led_cfg[5] = mcp->mb[6];
5717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5718 "Done %s.\n", __func__);
5721 return rval;
5725 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5727 int rval;
5728 struct qla_hw_data *ha = vha->hw;
5729 mbx_cmd_t mc;
5730 mbx_cmd_t *mcp = &mc;
5732 if (!IS_P3P_TYPE(ha))
5733 return QLA_FUNCTION_FAILED;
5735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5736 "Entered %s.\n", __func__);
5738 memset(mcp, 0, sizeof(mbx_cmd_t));
5739 mcp->mb[0] = MBC_SET_LED_CONFIG;
5740 if (enable)
5741 mcp->mb[7] = 0xE;
5742 else
5743 mcp->mb[7] = 0xD;
5745 mcp->out_mb = MBX_7|MBX_0;
5746 mcp->in_mb = MBX_0;
5747 mcp->tov = MBX_TOV_SECONDS;
5748 mcp->flags = 0;
5750 rval = qla2x00_mailbox_command(vha, mcp);
5751 if (rval != QLA_SUCCESS) {
5752 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5753 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5754 } else {
5755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5756 "Done %s.\n", __func__);
5759 return rval;
5763 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5765 int rval;
5766 struct qla_hw_data *ha = vha->hw;
5767 mbx_cmd_t mc;
5768 mbx_cmd_t *mcp = &mc;
5770 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5771 return QLA_FUNCTION_FAILED;
5773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5774 "Entered %s.\n", __func__);
5776 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5777 mcp->mb[1] = LSW(reg);
5778 mcp->mb[2] = MSW(reg);
5779 mcp->mb[3] = LSW(data);
5780 mcp->mb[4] = MSW(data);
5781 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5783 mcp->in_mb = MBX_1|MBX_0;
5784 mcp->tov = MBX_TOV_SECONDS;
5785 mcp->flags = 0;
5786 rval = qla2x00_mailbox_command(vha, mcp);
5788 if (rval != QLA_SUCCESS) {
5789 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5790 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5791 } else {
5792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5793 "Done %s.\n", __func__);
5796 return rval;
5800 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5802 int rval;
5803 struct qla_hw_data *ha = vha->hw;
5804 mbx_cmd_t mc;
5805 mbx_cmd_t *mcp = &mc;
5807 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5808 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5809 "Implicit LOGO Unsupported.\n");
5810 return QLA_FUNCTION_FAILED;
5814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5815 "Entering %s.\n", __func__);
5817 /* Perform Implicit LOGO. */
5818 mcp->mb[0] = MBC_PORT_LOGOUT;
5819 mcp->mb[1] = fcport->loop_id;
5820 mcp->mb[10] = BIT_15;
5821 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5822 mcp->in_mb = MBX_0;
5823 mcp->tov = MBX_TOV_SECONDS;
5824 mcp->flags = 0;
5825 rval = qla2x00_mailbox_command(vha, mcp);
5826 if (rval != QLA_SUCCESS)
5827 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5828 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5829 else
5830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5831 "Done %s.\n", __func__);
5833 return rval;
5837 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5839 int rval;
5840 mbx_cmd_t mc;
5841 mbx_cmd_t *mcp = &mc;
5842 struct qla_hw_data *ha = vha->hw;
5843 unsigned long retry_max_time = jiffies + (2 * HZ);
5845 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5846 return QLA_FUNCTION_FAILED;
5848 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5850 retry_rd_reg:
5851 mcp->mb[0] = MBC_READ_REMOTE_REG;
5852 mcp->mb[1] = LSW(reg);
5853 mcp->mb[2] = MSW(reg);
5854 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5855 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5856 mcp->tov = MBX_TOV_SECONDS;
5857 mcp->flags = 0;
5858 rval = qla2x00_mailbox_command(vha, mcp);
5860 if (rval != QLA_SUCCESS) {
5861 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5862 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5863 rval, mcp->mb[0], mcp->mb[1]);
5864 } else {
5865 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5866 if (*data == QLA8XXX_BAD_VALUE) {
5868 * During soft-reset CAMRAM register reads might
5869 * return 0xbad0bad0. So retry for MAX of 2 sec
5870 * while reading camram registers.
5872 if (time_after(jiffies, retry_max_time)) {
5873 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5874 "Failure to read CAMRAM register. "
5875 "data=0x%x.\n", *data);
5876 return QLA_FUNCTION_FAILED;
5878 msleep(100);
5879 goto retry_rd_reg;
5881 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5884 return rval;
5888 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5890 int rval;
5891 mbx_cmd_t mc;
5892 mbx_cmd_t *mcp = &mc;
5893 struct qla_hw_data *ha = vha->hw;
5895 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5896 return QLA_FUNCTION_FAILED;
5898 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5900 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5901 mcp->out_mb = MBX_0;
5902 mcp->in_mb = MBX_1|MBX_0;
5903 mcp->tov = MBX_TOV_SECONDS;
5904 mcp->flags = 0;
5905 rval = qla2x00_mailbox_command(vha, mcp);
5907 if (rval != QLA_SUCCESS) {
5908 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5909 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5910 rval, mcp->mb[0], mcp->mb[1]);
5911 ha->isp_ops->fw_dump(vha, 0);
5912 } else {
5913 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5916 return rval;
5920 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5921 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5923 int rval;
5924 mbx_cmd_t mc;
5925 mbx_cmd_t *mcp = &mc;
5926 uint8_t subcode = (uint8_t)options;
5927 struct qla_hw_data *ha = vha->hw;
5929 if (!IS_QLA8031(ha))
5930 return QLA_FUNCTION_FAILED;
5932 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5934 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5935 mcp->mb[1] = options;
5936 mcp->out_mb = MBX_1|MBX_0;
5937 if (subcode & BIT_2) {
5938 mcp->mb[2] = LSW(start_addr);
5939 mcp->mb[3] = MSW(start_addr);
5940 mcp->mb[4] = LSW(end_addr);
5941 mcp->mb[5] = MSW(end_addr);
5942 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5944 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5945 if (!(subcode & (BIT_2 | BIT_5)))
5946 mcp->in_mb |= MBX_4|MBX_3;
5947 mcp->tov = MBX_TOV_SECONDS;
5948 mcp->flags = 0;
5949 rval = qla2x00_mailbox_command(vha, mcp);
5951 if (rval != QLA_SUCCESS) {
5952 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5953 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5954 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5955 mcp->mb[4]);
5956 ha->isp_ops->fw_dump(vha, 0);
5957 } else {
5958 if (subcode & BIT_5)
5959 *sector_size = mcp->mb[1];
5960 else if (subcode & (BIT_6 | BIT_7)) {
5961 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5962 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5963 } else if (subcode & (BIT_3 | BIT_4)) {
5964 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5965 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5967 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5970 return rval;
5974 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5975 uint32_t size)
5977 int rval;
5978 mbx_cmd_t mc;
5979 mbx_cmd_t *mcp = &mc;
5981 if (!IS_MCTP_CAPABLE(vha->hw))
5982 return QLA_FUNCTION_FAILED;
5984 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5985 "Entered %s.\n", __func__);
5987 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5988 mcp->mb[1] = LSW(addr);
5989 mcp->mb[2] = MSW(req_dma);
5990 mcp->mb[3] = LSW(req_dma);
5991 mcp->mb[4] = MSW(size);
5992 mcp->mb[5] = LSW(size);
5993 mcp->mb[6] = MSW(MSD(req_dma));
5994 mcp->mb[7] = LSW(MSD(req_dma));
5995 mcp->mb[8] = MSW(addr);
5996 /* Setting RAM ID to valid */
5997 /* For MCTP RAM ID is 0x40 */
5998 mcp->mb[10] = BIT_7 | 0x40;
6000 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6001 MBX_0;
6003 mcp->in_mb = MBX_0;
6004 mcp->tov = MBX_TOV_SECONDS;
6005 mcp->flags = 0;
6006 rval = qla2x00_mailbox_command(vha, mcp);
6008 if (rval != QLA_SUCCESS) {
6009 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6010 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6011 } else {
6012 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6013 "Done %s.\n", __func__);
6016 return rval;
6020 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6021 void *dd_buf, uint size, uint options)
6023 int rval;
6024 mbx_cmd_t mc;
6025 mbx_cmd_t *mcp = &mc;
6026 dma_addr_t dd_dma;
6028 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
6029 return QLA_FUNCTION_FAILED;
6031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6032 "Entered %s.\n", __func__);
6034 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6035 dd_buf, size, DMA_FROM_DEVICE);
6036 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6037 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6038 return QLA_MEMORY_ALLOC_FAILED;
6041 memset(dd_buf, 0, size);
6043 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6044 mcp->mb[1] = options;
6045 mcp->mb[2] = MSW(LSD(dd_dma));
6046 mcp->mb[3] = LSW(LSD(dd_dma));
6047 mcp->mb[6] = MSW(MSD(dd_dma));
6048 mcp->mb[7] = LSW(MSD(dd_dma));
6049 mcp->mb[8] = size;
6050 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6051 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6052 mcp->buf_size = size;
6053 mcp->flags = MBX_DMA_IN;
6054 mcp->tov = MBX_TOV_SECONDS * 4;
6055 rval = qla2x00_mailbox_command(vha, mcp);
6057 if (rval != QLA_SUCCESS) {
6058 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6059 } else {
6060 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6061 "Done %s.\n", __func__);
6064 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6065 size, DMA_FROM_DEVICE);
6067 return rval;
6070 static void qla2x00_async_mb_sp_done(void *s, int res)
6072 struct srb *sp = s;
6074 sp->u.iocb_cmd.u.mbx.rc = res;
6076 complete(&sp->u.iocb_cmd.u.mbx.comp);
6077 /* don't free sp here. Let the caller do the free */
6081 * This mailbox uses the iocb interface to send MB command.
6082 * This allows non-critial (non chip setup) command to go
6083 * out in parrallel.
6085 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6087 int rval = QLA_FUNCTION_FAILED;
6088 srb_t *sp;
6089 struct srb_iocb *c;
6091 if (!vha->hw->flags.fw_started)
6092 goto done;
6094 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6095 if (!sp)
6096 goto done;
6098 sp->type = SRB_MB_IOCB;
6099 sp->name = mb_to_str(mcp->mb[0]);
6101 c = &sp->u.iocb_cmd;
6102 c->timeout = qla2x00_async_iocb_timeout;
6103 init_completion(&c->u.mbx.comp);
6105 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6107 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6109 sp->done = qla2x00_async_mb_sp_done;
6111 rval = qla2x00_start_sp(sp);
6112 if (rval != QLA_SUCCESS) {
6113 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6114 "%s: %s Failed submission. %x.\n",
6115 __func__, sp->name, rval);
6116 goto done_free_sp;
6119 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6120 sp->name, sp->handle);
6122 wait_for_completion(&c->u.mbx.comp);
6123 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6125 rval = c->u.mbx.rc;
6126 switch (rval) {
6127 case QLA_FUNCTION_TIMEOUT:
6128 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6129 __func__, sp->name, rval);
6130 break;
6131 case QLA_SUCCESS:
6132 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6133 __func__, sp->name);
6134 break;
6135 default:
6136 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6137 __func__, sp->name, rval);
6138 break;
6141 done_free_sp:
6142 sp->free(sp);
6143 done:
6144 return rval;
6148 * qla24xx_gpdb_wait
6149 * NOTE: Do not call this routine from DPC thread
6151 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6153 int rval = QLA_FUNCTION_FAILED;
6154 dma_addr_t pd_dma;
6155 struct port_database_24xx *pd;
6156 struct qla_hw_data *ha = vha->hw;
6157 mbx_cmd_t mc;
6159 if (!vha->hw->flags.fw_started)
6160 goto done;
6162 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6163 if (pd == NULL) {
6164 ql_log(ql_log_warn, vha, 0xd047,
6165 "Failed to allocate port database structure.\n");
6166 goto done_free_sp;
6169 memset(&mc, 0, sizeof(mc));
6170 mc.mb[0] = MBC_GET_PORT_DATABASE;
6171 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6172 mc.mb[2] = MSW(pd_dma);
6173 mc.mb[3] = LSW(pd_dma);
6174 mc.mb[6] = MSW(MSD(pd_dma));
6175 mc.mb[7] = LSW(MSD(pd_dma));
6176 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6177 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6179 rval = qla24xx_send_mb_cmd(vha, &mc);
6180 if (rval != QLA_SUCCESS) {
6181 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6182 "%s: %8phC fail\n", __func__, fcport->port_name);
6183 goto done_free_sp;
6186 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6188 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6189 __func__, fcport->port_name);
6191 done_free_sp:
6192 if (pd)
6193 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6194 done:
6195 return rval;
6198 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6199 struct port_database_24xx *pd)
6201 int rval = QLA_SUCCESS;
6202 uint64_t zero = 0;
6203 u8 current_login_state, last_login_state;
6205 if (fcport->fc4f_nvme) {
6206 current_login_state = pd->current_login_state >> 4;
6207 last_login_state = pd->last_login_state >> 4;
6208 } else {
6209 current_login_state = pd->current_login_state & 0xf;
6210 last_login_state = pd->last_login_state & 0xf;
6213 /* Check for logged in state. */
6214 if (current_login_state != PDS_PRLI_COMPLETE) {
6215 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6216 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6217 current_login_state, last_login_state, fcport->loop_id);
6218 rval = QLA_FUNCTION_FAILED;
6219 goto gpd_error_out;
6222 if (fcport->loop_id == FC_NO_LOOP_ID ||
6223 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6224 memcmp(fcport->port_name, pd->port_name, 8))) {
6225 /* We lost the device mid way. */
6226 rval = QLA_NOT_LOGGED_IN;
6227 goto gpd_error_out;
6230 /* Names are little-endian. */
6231 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6232 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6234 /* Get port_id of device. */
6235 fcport->d_id.b.domain = pd->port_id[0];
6236 fcport->d_id.b.area = pd->port_id[1];
6237 fcport->d_id.b.al_pa = pd->port_id[2];
6238 fcport->d_id.b.rsvd_1 = 0;
6240 if (fcport->fc4f_nvme) {
6241 fcport->nvme_prli_service_param =
6242 pd->prli_nvme_svc_param_word_3;
6243 fcport->port_type = FCT_NVME;
6244 } else {
6245 /* If not target must be initiator or unknown type. */
6246 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6247 fcport->port_type = FCT_INITIATOR;
6248 else
6249 fcport->port_type = FCT_TARGET;
6251 /* Passback COS information. */
6252 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6253 FC_COS_CLASS2 : FC_COS_CLASS3;
6255 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6256 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6257 fcport->conf_compl_supported = 1;
6260 gpd_error_out:
6261 return rval;
6265 * qla24xx_gidlist__wait
6266 * NOTE: don't call this routine from DPC thread.
6268 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6269 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6271 int rval = QLA_FUNCTION_FAILED;
6272 mbx_cmd_t mc;
6274 if (!vha->hw->flags.fw_started)
6275 goto done;
6277 memset(&mc, 0, sizeof(mc));
6278 mc.mb[0] = MBC_GET_ID_LIST;
6279 mc.mb[2] = MSW(id_list_dma);
6280 mc.mb[3] = LSW(id_list_dma);
6281 mc.mb[6] = MSW(MSD(id_list_dma));
6282 mc.mb[7] = LSW(MSD(id_list_dma));
6283 mc.mb[8] = 0;
6284 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6286 rval = qla24xx_send_mb_cmd(vha, &mc);
6287 if (rval != QLA_SUCCESS) {
6288 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6289 "%s: fail\n", __func__);
6290 } else {
6291 *entries = mc.mb[1];
6292 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6293 "%s: done\n", __func__);
6295 done:
6296 return rval;
6299 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6301 int rval;
6302 mbx_cmd_t mc;
6303 mbx_cmd_t *mcp = &mc;
6305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6306 "Entered %s\n", __func__);
6308 memset(mcp->mb, 0 , sizeof(mcp->mb));
6309 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6310 mcp->mb[1] = cpu_to_le16(1);
6311 mcp->mb[2] = cpu_to_le16(value);
6312 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6313 mcp->in_mb = MBX_2 | MBX_0;
6314 mcp->tov = MBX_TOV_SECONDS;
6315 mcp->flags = 0;
6317 rval = qla2x00_mailbox_command(vha, mcp);
6319 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6320 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6322 return rval;
6325 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6327 int rval;
6328 mbx_cmd_t mc;
6329 mbx_cmd_t *mcp = &mc;
6331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6332 "Entered %s\n", __func__);
6334 memset(mcp->mb, 0, sizeof(mcp->mb));
6335 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6336 mcp->mb[1] = cpu_to_le16(0);
6337 mcp->out_mb = MBX_1 | MBX_0;
6338 mcp->in_mb = MBX_2 | MBX_0;
6339 mcp->tov = MBX_TOV_SECONDS;
6340 mcp->flags = 0;
6342 rval = qla2x00_mailbox_command(vha, mcp);
6343 if (rval == QLA_SUCCESS)
6344 *value = mc.mb[2];
6346 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6347 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6349 return rval;
6353 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6355 struct qla_hw_data *ha = vha->hw;
6356 uint16_t iter, addr, offset;
6357 dma_addr_t phys_addr;
6358 int rval, c;
6359 u8 *sfp_data;
6361 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6362 addr = 0xa0;
6363 phys_addr = ha->sfp_data_dma;
6364 sfp_data = ha->sfp_data;
6365 offset = c = 0;
6367 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6368 if (iter == 4) {
6369 /* Skip to next device address. */
6370 addr = 0xa2;
6371 offset = 0;
6374 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6375 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6376 if (rval != QLA_SUCCESS) {
6377 ql_log(ql_log_warn, vha, 0x706d,
6378 "Unable to read SFP data (%x/%x/%x).\n", rval,
6379 addr, offset);
6381 return rval;
6384 if (buf && (c < count)) {
6385 u16 sz;
6387 if ((count - c) >= SFP_BLOCK_SIZE)
6388 sz = SFP_BLOCK_SIZE;
6389 else
6390 sz = count - c;
6392 memcpy(buf, sfp_data, sz);
6393 buf += SFP_BLOCK_SIZE;
6394 c += sz;
6396 phys_addr += SFP_BLOCK_SIZE;
6397 sfp_data += SFP_BLOCK_SIZE;
6398 offset += SFP_BLOCK_SIZE;
6401 return rval;
6404 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6405 uint16_t *out_mb, int out_mb_sz)
6407 int rval = QLA_FUNCTION_FAILED;
6408 mbx_cmd_t mc;
6410 if (!vha->hw->flags.fw_started)
6411 goto done;
6413 memset(&mc, 0, sizeof(mc));
6414 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6416 rval = qla24xx_send_mb_cmd(vha, &mc);
6417 if (rval != QLA_SUCCESS) {
6418 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6419 "%s: fail\n", __func__);
6420 } else {
6421 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6422 memcpy(out_mb, mc.mb, out_mb_sz);
6423 else
6424 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6426 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6427 "%s: done\n", __func__);
6429 done:
6430 return rval;