WIP FPC-III support
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_mbx.c
blobd7d4ab65009c48c2cb97544672e3f1e0e69b1ab3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
6 #include "qla_def.h"
7 #include "qla_target.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
12 static struct mb_cmd_name {
13 uint16_t cmd;
14 const char *str;
15 } mb_str[] = {
16 {MBC_GET_PORT_DATABASE, "GPDB"},
17 {MBC_GET_ID_LIST, "GIDList"},
18 {MBC_GET_LINK_PRIV_STATS, "Stats"},
19 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
22 static const char *mb_to_str(uint16_t cmd)
24 int i;
25 struct mb_cmd_name *e;
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 e = mb_str + i;
29 if (cmd == e->cmd)
30 return e->str;
32 return "unknown";
35 static struct rom_cmd {
36 uint16_t cmd;
37 } rom_cmds[] = {
38 { MBC_LOAD_RAM },
39 { MBC_EXECUTE_FIRMWARE },
40 { MBC_READ_RAM_WORD },
41 { MBC_MAILBOX_REGISTER_TEST },
42 { MBC_VERIFY_CHECKSUM },
43 { MBC_GET_FIRMWARE_VERSION },
44 { MBC_LOAD_RISC_RAM },
45 { MBC_DUMP_RISC_RAM },
46 { MBC_LOAD_RISC_RAM_EXTENDED },
47 { MBC_DUMP_RISC_RAM_EXTENDED },
48 { MBC_WRITE_RAM_WORD_EXTENDED },
49 { MBC_READ_RAM_EXTENDED },
50 { MBC_GET_RESOURCE_COUNTS },
51 { MBC_SET_FIRMWARE_OPTION },
52 { MBC_MID_INITIALIZE_FIRMWARE },
53 { MBC_GET_FIRMWARE_STATE },
54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 { MBC_GET_RETRY_COUNT },
56 { MBC_TRACE_CONTROL },
57 { MBC_INITIALIZE_MULTIQ },
58 { MBC_IOCB_COMMAND_A64 },
59 { MBC_GET_ADAPTER_LOOP_ID },
60 { MBC_READ_SFP },
61 { MBC_SET_RNID_PARAMS },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
66 static int is_rom_cmd(uint16_t cmd)
68 int i;
69 struct rom_cmd *wc;
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
72 wc = rom_cmds + i;
73 if (wc->cmd == cmd)
74 return 1;
77 return 0;
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
84 * Input:
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
88 * Output:
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
91 * Returns:
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
96 * Context:
97 * Kernel context.
99 static int
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
102 int rval, i;
103 unsigned long flags = 0;
104 device_reg_t *reg;
105 uint8_t abort_active;
106 uint8_t io_lock_on;
107 uint16_t command = 0;
108 uint16_t *iptr;
109 __le16 __iomem *optr;
110 uint32_t cnt;
111 uint32_t mboxes;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 u32 chip_reset;
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
120 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "PCI channel failed permanently, exiting.\n");
123 return QLA_FUNCTION_TIMEOUT;
126 if (vha->device_flags & DFLG_DEV_FAILED) {
127 ql_log(ql_log_warn, vha, 0x1002,
128 "Device in failed state, exiting.\n");
129 return QLA_FUNCTION_TIMEOUT;
132 /* if PCI error, then avoid mbx processing.*/
133 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
134 test_bit(UNLOADING, &base_vha->dpc_flags)) {
135 ql_log(ql_log_warn, vha, 0xd04e,
136 "PCI error, exiting.\n");
137 return QLA_FUNCTION_TIMEOUT;
140 reg = ha->iobase;
141 io_lock_on = base_vha->flags.init_done;
143 rval = QLA_SUCCESS;
144 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 chip_reset = ha->chip_reset;
147 if (ha->flags.pci_channel_io_perm_failure) {
148 ql_log(ql_log_warn, vha, 0x1003,
149 "Perm failure on EEH timeout MBX, exiting.\n");
150 return QLA_FUNCTION_TIMEOUT;
153 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
154 /* Setting Link-Down error */
155 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
156 ql_log(ql_log_warn, vha, 0x1004,
157 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
158 return QLA_FUNCTION_TIMEOUT;
161 /* check if ISP abort is active and return cmd with timeout */
162 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
163 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
165 !is_rom_cmd(mcp->mb[0])) {
166 ql_log(ql_log_info, vha, 0x1005,
167 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
168 mcp->mb[0]);
169 return QLA_FUNCTION_TIMEOUT;
172 atomic_inc(&ha->num_pend_mbx_stage1);
174 * Wait for active mailbox commands to finish by waiting at most tov
175 * seconds. This is to serialize actual issuing of mailbox cmds during
176 * non ISP abort time.
178 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
179 /* Timeout occurred. Return error. */
180 ql_log(ql_log_warn, vha, 0xd035,
181 "Cmd access timeout, cmd=0x%x, Exiting.\n",
182 mcp->mb[0]);
183 atomic_dec(&ha->num_pend_mbx_stage1);
184 return QLA_FUNCTION_TIMEOUT;
186 atomic_dec(&ha->num_pend_mbx_stage1);
187 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
188 rval = QLA_ABORTED;
189 goto premature_exit;
193 /* Save mailbox command for debug */
194 ha->mcp = mcp;
196 ql_dbg(ql_dbg_mbx, vha, 0x1006,
197 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
199 spin_lock_irqsave(&ha->hardware_lock, flags);
201 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
202 ha->flags.mbox_busy) {
203 rval = QLA_ABORTED;
204 spin_unlock_irqrestore(&ha->hardware_lock, flags);
205 goto premature_exit;
207 ha->flags.mbox_busy = 1;
209 /* Load mailbox registers. */
210 if (IS_P3P_TYPE(ha))
211 optr = &reg->isp82.mailbox_in[0];
212 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
213 optr = &reg->isp24.mailbox0;
214 else
215 optr = MAILBOX_REG(ha, &reg->isp, 0);
217 iptr = mcp->mb;
218 command = mcp->mb[0];
219 mboxes = mcp->out_mb;
221 ql_dbg(ql_dbg_mbx, vha, 0x1111,
222 "Mailbox registers (OUT):\n");
223 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
224 if (IS_QLA2200(ha) && cnt == 8)
225 optr = MAILBOX_REG(ha, &reg->isp, 8);
226 if (mboxes & BIT_0) {
227 ql_dbg(ql_dbg_mbx, vha, 0x1112,
228 "mbox[%d]<-0x%04x\n", cnt, *iptr);
229 wrt_reg_word(optr, *iptr);
232 mboxes >>= 1;
233 optr++;
234 iptr++;
237 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
238 "I/O Address = %p.\n", optr);
240 /* Issue set host interrupt command to send cmd out. */
241 ha->flags.mbox_int = 0;
242 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
244 /* Unlock mbx registers and wait for interrupt */
245 ql_dbg(ql_dbg_mbx, vha, 0x100f,
246 "Going to unlock irq & waiting for interrupts. "
247 "jiffies=%lx.\n", jiffies);
249 /* Wait for mbx cmd completion until timeout */
250 atomic_inc(&ha->num_pend_mbx_stage2);
251 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
252 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
254 if (IS_P3P_TYPE(ha))
255 wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
256 else if (IS_FWI2_CAPABLE(ha))
257 wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
258 else
259 wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
260 spin_unlock_irqrestore(&ha->hardware_lock, flags);
262 wait_time = jiffies;
263 atomic_inc(&ha->num_pend_mbx_stage3);
264 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
265 mcp->tov * HZ)) {
266 if (chip_reset != ha->chip_reset) {
267 spin_lock_irqsave(&ha->hardware_lock, flags);
268 ha->flags.mbox_busy = 0;
269 spin_unlock_irqrestore(&ha->hardware_lock,
270 flags);
271 atomic_dec(&ha->num_pend_mbx_stage2);
272 atomic_dec(&ha->num_pend_mbx_stage3);
273 rval = QLA_ABORTED;
274 goto premature_exit;
276 ql_dbg(ql_dbg_mbx, vha, 0x117a,
277 "cmd=%x Timeout.\n", command);
278 spin_lock_irqsave(&ha->hardware_lock, flags);
279 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
280 spin_unlock_irqrestore(&ha->hardware_lock, flags);
282 } else if (ha->flags.purge_mbox ||
283 chip_reset != ha->chip_reset) {
284 spin_lock_irqsave(&ha->hardware_lock, flags);
285 ha->flags.mbox_busy = 0;
286 spin_unlock_irqrestore(&ha->hardware_lock, flags);
287 atomic_dec(&ha->num_pend_mbx_stage2);
288 atomic_dec(&ha->num_pend_mbx_stage3);
289 rval = QLA_ABORTED;
290 goto premature_exit;
292 atomic_dec(&ha->num_pend_mbx_stage3);
294 if (time_after(jiffies, wait_time + 5 * HZ))
295 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
296 command, jiffies_to_msecs(jiffies - wait_time));
297 } else {
298 ql_dbg(ql_dbg_mbx, vha, 0x1011,
299 "Cmd=%x Polling Mode.\n", command);
301 if (IS_P3P_TYPE(ha)) {
302 if (rd_reg_dword(&reg->isp82.hint) &
303 HINT_MBX_INT_PENDING) {
304 ha->flags.mbox_busy = 0;
305 spin_unlock_irqrestore(&ha->hardware_lock,
306 flags);
307 atomic_dec(&ha->num_pend_mbx_stage2);
308 ql_dbg(ql_dbg_mbx, vha, 0x1012,
309 "Pending mailbox timeout, exiting.\n");
310 rval = QLA_FUNCTION_TIMEOUT;
311 goto premature_exit;
313 wrt_reg_dword(&reg->isp82.hint, HINT_MBX_INT_PENDING);
314 } else if (IS_FWI2_CAPABLE(ha))
315 wrt_reg_dword(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
316 else
317 wrt_reg_word(&reg->isp.hccr, HCCR_SET_HOST_INT);
318 spin_unlock_irqrestore(&ha->hardware_lock, flags);
320 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
321 while (!ha->flags.mbox_int) {
322 if (ha->flags.purge_mbox ||
323 chip_reset != ha->chip_reset) {
324 spin_lock_irqsave(&ha->hardware_lock, flags);
325 ha->flags.mbox_busy = 0;
326 spin_unlock_irqrestore(&ha->hardware_lock,
327 flags);
328 atomic_dec(&ha->num_pend_mbx_stage2);
329 rval = QLA_ABORTED;
330 goto premature_exit;
333 if (time_after(jiffies, wait_time))
334 break;
336 /* Check for pending interrupts. */
337 qla2x00_poll(ha->rsp_q_map[0]);
339 if (!ha->flags.mbox_int &&
340 !(IS_QLA2200(ha) &&
341 command == MBC_LOAD_RISC_RAM_EXTENDED))
342 msleep(10);
343 } /* while */
344 ql_dbg(ql_dbg_mbx, vha, 0x1013,
345 "Waited %d sec.\n",
346 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
348 atomic_dec(&ha->num_pend_mbx_stage2);
350 /* Check whether we timed out */
351 if (ha->flags.mbox_int) {
352 uint16_t *iptr2;
354 ql_dbg(ql_dbg_mbx, vha, 0x1014,
355 "Cmd=%x completed.\n", command);
357 /* Got interrupt. Clear the flag. */
358 ha->flags.mbox_int = 0;
359 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
361 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
362 spin_lock_irqsave(&ha->hardware_lock, flags);
363 ha->flags.mbox_busy = 0;
364 spin_unlock_irqrestore(&ha->hardware_lock, flags);
366 /* Setting Link-Down error */
367 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
368 ha->mcp = NULL;
369 rval = QLA_FUNCTION_FAILED;
370 ql_log(ql_log_warn, vha, 0xd048,
371 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
372 goto premature_exit;
375 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
376 ql_dbg(ql_dbg_mbx, vha, 0x11ff,
377 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
378 MBS_COMMAND_COMPLETE);
379 rval = QLA_FUNCTION_FAILED;
382 /* Load return mailbox registers. */
383 iptr2 = mcp->mb;
384 iptr = (uint16_t *)&ha->mailbox_out[0];
385 mboxes = mcp->in_mb;
387 ql_dbg(ql_dbg_mbx, vha, 0x1113,
388 "Mailbox registers (IN):\n");
389 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
390 if (mboxes & BIT_0) {
391 *iptr2 = *iptr;
392 ql_dbg(ql_dbg_mbx, vha, 0x1114,
393 "mbox[%d]->0x%04x\n", cnt, *iptr2);
396 mboxes >>= 1;
397 iptr2++;
398 iptr++;
400 } else {
402 uint16_t mb[8];
403 uint32_t ictrl, host_status, hccr;
404 uint16_t w;
406 if (IS_FWI2_CAPABLE(ha)) {
407 mb[0] = rd_reg_word(&reg->isp24.mailbox0);
408 mb[1] = rd_reg_word(&reg->isp24.mailbox1);
409 mb[2] = rd_reg_word(&reg->isp24.mailbox2);
410 mb[3] = rd_reg_word(&reg->isp24.mailbox3);
411 mb[7] = rd_reg_word(&reg->isp24.mailbox7);
412 ictrl = rd_reg_dword(&reg->isp24.ictrl);
413 host_status = rd_reg_dword(&reg->isp24.host_status);
414 hccr = rd_reg_dword(&reg->isp24.hccr);
416 ql_log(ql_log_warn, vha, 0xd04c,
417 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
418 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
419 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
420 mb[7], host_status, hccr);
422 } else {
423 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
424 ictrl = rd_reg_word(&reg->isp.ictrl);
425 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
426 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
427 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
429 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
431 /* Capture FW dump only, if PCI device active */
432 if (!pci_channel_offline(vha->hw->pdev)) {
433 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
434 if (w == 0xffff || ictrl == 0xffffffff ||
435 (chip_reset != ha->chip_reset)) {
436 /* This is special case if there is unload
437 * of driver happening and if PCI device go
438 * into bad state due to PCI error condition
439 * then only PCI ERR flag would be set.
440 * we will do premature exit for above case.
442 spin_lock_irqsave(&ha->hardware_lock, flags);
443 ha->flags.mbox_busy = 0;
444 spin_unlock_irqrestore(&ha->hardware_lock,
445 flags);
446 rval = QLA_FUNCTION_TIMEOUT;
447 goto premature_exit;
450 /* Attempt to capture firmware dump for further
451 * anallysis of the current formware state. we do not
452 * need to do this if we are intentionally generating
453 * a dump
455 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
456 qla2xxx_dump_fw(vha);
457 rval = QLA_FUNCTION_TIMEOUT;
460 spin_lock_irqsave(&ha->hardware_lock, flags);
461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(&ha->hardware_lock, flags);
464 /* Clean up */
465 ha->mcp = NULL;
467 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
468 ql_dbg(ql_dbg_mbx, vha, 0x101a,
469 "Checking for additional resp interrupt.\n");
471 /* polling mode for non isp_abort commands. */
472 qla2x00_poll(ha->rsp_q_map[0]);
475 if (rval == QLA_FUNCTION_TIMEOUT &&
476 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
477 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
478 ha->flags.eeh_busy) {
479 /* not in dpc. schedule it for dpc to take over. */
480 ql_dbg(ql_dbg_mbx, vha, 0x101b,
481 "Timeout, schedule isp_abort_needed.\n");
483 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
484 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
485 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
486 if (IS_QLA82XX(ha)) {
487 ql_dbg(ql_dbg_mbx, vha, 0x112a,
488 "disabling pause transmit on port "
489 "0 & 1.\n");
490 qla82xx_wr_32(ha,
491 QLA82XX_CRB_NIU + 0x98,
492 CRB_NIU_XG_PAUSE_CTL_P0|
493 CRB_NIU_XG_PAUSE_CTL_P1);
495 ql_log(ql_log_info, base_vha, 0x101c,
496 "Mailbox cmd timeout occurred, cmd=0x%x, "
497 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
498 "abort.\n", command, mcp->mb[0],
499 ha->flags.eeh_busy);
500 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
501 qla2xxx_wake_dpc(vha);
503 } else if (current == ha->dpc_thread) {
504 /* call abort directly since we are in the DPC thread */
505 ql_dbg(ql_dbg_mbx, vha, 0x101d,
506 "Timeout, calling abort_isp.\n");
508 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
509 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
510 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
511 if (IS_QLA82XX(ha)) {
512 ql_dbg(ql_dbg_mbx, vha, 0x112b,
513 "disabling pause transmit on port "
514 "0 & 1.\n");
515 qla82xx_wr_32(ha,
516 QLA82XX_CRB_NIU + 0x98,
517 CRB_NIU_XG_PAUSE_CTL_P0|
518 CRB_NIU_XG_PAUSE_CTL_P1);
520 ql_log(ql_log_info, base_vha, 0x101e,
521 "Mailbox cmd timeout occurred, cmd=0x%x, "
522 "mb[0]=0x%x. Scheduling ISP abort ",
523 command, mcp->mb[0]);
524 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
525 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
526 /* Allow next mbx cmd to come in. */
527 complete(&ha->mbx_cmd_comp);
528 if (ha->isp_ops->abort_isp(vha)) {
529 /* Failed. retry later. */
530 set_bit(ISP_ABORT_NEEDED,
531 &vha->dpc_flags);
533 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
534 ql_dbg(ql_dbg_mbx, vha, 0x101f,
535 "Finished abort_isp.\n");
536 goto mbx_done;
541 premature_exit:
542 /* Allow next mbx cmd to come in. */
543 complete(&ha->mbx_cmd_comp);
545 mbx_done:
546 if (rval == QLA_ABORTED) {
547 ql_log(ql_log_info, vha, 0xd035,
548 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
549 mcp->mb[0]);
550 } else if (rval) {
551 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
552 pr_warn("%s [%s]-%04x:%ld: **** Failed=%x", QL_MSGHDR,
553 dev_name(&ha->pdev->dev), 0x1020+0x800,
554 vha->host_no, rval);
555 mboxes = mcp->in_mb;
556 cnt = 4;
557 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
558 if (mboxes & BIT_0) {
559 printk(" mb[%u]=%x", i, mcp->mb[i]);
560 cnt--;
562 pr_warn(" cmd=%x ****\n", command);
564 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
565 ql_dbg(ql_dbg_mbx, vha, 0x1198,
566 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
567 rd_reg_dword(&reg->isp24.host_status),
568 rd_reg_dword(&reg->isp24.ictrl),
569 rd_reg_dword(&reg->isp24.istatus));
570 } else {
571 ql_dbg(ql_dbg_mbx, vha, 0x1206,
572 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
573 rd_reg_word(&reg->isp.ctrl_status),
574 rd_reg_word(&reg->isp.ictrl),
575 rd_reg_word(&reg->isp.istatus));
577 } else {
578 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
581 return rval;
585 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
586 uint32_t risc_code_size)
588 int rval;
589 struct qla_hw_data *ha = vha->hw;
590 mbx_cmd_t mc;
591 mbx_cmd_t *mcp = &mc;
593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
594 "Entered %s.\n", __func__);
596 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
597 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
598 mcp->mb[8] = MSW(risc_addr);
599 mcp->out_mb = MBX_8|MBX_0;
600 } else {
601 mcp->mb[0] = MBC_LOAD_RISC_RAM;
602 mcp->out_mb = MBX_0;
604 mcp->mb[1] = LSW(risc_addr);
605 mcp->mb[2] = MSW(req_dma);
606 mcp->mb[3] = LSW(req_dma);
607 mcp->mb[6] = MSW(MSD(req_dma));
608 mcp->mb[7] = LSW(MSD(req_dma));
609 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
610 if (IS_FWI2_CAPABLE(ha)) {
611 mcp->mb[4] = MSW(risc_code_size);
612 mcp->mb[5] = LSW(risc_code_size);
613 mcp->out_mb |= MBX_5|MBX_4;
614 } else {
615 mcp->mb[4] = LSW(risc_code_size);
616 mcp->out_mb |= MBX_4;
619 mcp->in_mb = MBX_1|MBX_0;
620 mcp->tov = MBX_TOV_SECONDS;
621 mcp->flags = 0;
622 rval = qla2x00_mailbox_command(vha, mcp);
624 if (rval != QLA_SUCCESS) {
625 ql_dbg(ql_dbg_mbx, vha, 0x1023,
626 "Failed=%x mb[0]=%x mb[1]=%x.\n",
627 rval, mcp->mb[0], mcp->mb[1]);
628 } else {
629 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
630 "Done %s.\n", __func__);
633 return rval;
636 #define NVME_ENABLE_FLAG BIT_3
639 * qla2x00_execute_fw
640 * Start adapter firmware.
642 * Input:
643 * ha = adapter block pointer.
644 * TARGET_QUEUE_LOCK must be released.
645 * ADAPTER_STATE_LOCK must be released.
647 * Returns:
648 * qla2x00 local function return status code.
650 * Context:
651 * Kernel context.
654 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
656 int rval;
657 struct qla_hw_data *ha = vha->hw;
658 mbx_cmd_t mc;
659 mbx_cmd_t *mcp = &mc;
660 u8 semaphore = 0;
661 #define EXE_FW_FORCE_SEMAPHORE BIT_7
662 u8 retry = 3;
664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
665 "Entered %s.\n", __func__);
667 again:
668 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
669 mcp->out_mb = MBX_0;
670 mcp->in_mb = MBX_0;
671 if (IS_FWI2_CAPABLE(ha)) {
672 mcp->mb[1] = MSW(risc_addr);
673 mcp->mb[2] = LSW(risc_addr);
674 mcp->mb[3] = 0;
675 mcp->mb[4] = 0;
676 mcp->mb[11] = 0;
678 /* Enable BPM? */
679 if (ha->flags.lr_detected) {
680 mcp->mb[4] = BIT_0;
681 if (IS_BPM_RANGE_CAPABLE(ha))
682 mcp->mb[4] |=
683 ha->lr_distance << LR_DIST_FW_POS;
686 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
687 mcp->mb[4] |= NVME_ENABLE_FLAG;
689 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
690 struct nvram_81xx *nv = ha->nvram;
691 /* set minimum speed if specified in nvram */
692 if (nv->min_supported_speed >= 2 &&
693 nv->min_supported_speed <= 5) {
694 mcp->mb[4] |= BIT_4;
695 mcp->mb[11] |= nv->min_supported_speed & 0xF;
696 mcp->out_mb |= MBX_11;
697 mcp->in_mb |= BIT_5;
698 vha->min_supported_speed =
699 nv->min_supported_speed;
703 if (ha->flags.exlogins_enabled)
704 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
706 if (ha->flags.exchoffld_enabled)
707 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
709 if (semaphore)
710 mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE;
712 mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
713 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
714 } else {
715 mcp->mb[1] = LSW(risc_addr);
716 mcp->out_mb |= MBX_1;
717 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
718 mcp->mb[2] = 0;
719 mcp->out_mb |= MBX_2;
723 mcp->tov = MBX_TOV_SECONDS;
724 mcp->flags = 0;
725 rval = qla2x00_mailbox_command(vha, mcp);
727 if (rval != QLA_SUCCESS) {
728 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
729 mcp->mb[1] == 0x27 && retry) {
730 semaphore = 1;
731 retry--;
732 ql_dbg(ql_dbg_async, vha, 0x1026,
733 "Exe FW: force semaphore.\n");
734 goto again;
737 ql_dbg(ql_dbg_mbx, vha, 0x1026,
738 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
739 return rval;
742 if (!IS_FWI2_CAPABLE(ha))
743 goto done;
745 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
746 ql_dbg(ql_dbg_mbx, vha, 0x119a,
747 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
748 ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]);
749 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
750 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
751 ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n",
752 ha->max_supported_speed == 0 ? "16Gps" :
753 ha->max_supported_speed == 1 ? "32Gps" :
754 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
755 if (vha->min_supported_speed) {
756 ha->min_supported_speed = mcp->mb[5] &
757 (BIT_0 | BIT_1 | BIT_2);
758 ql_dbg(ql_dbg_mbx, vha, 0x119c,
759 "min_supported_speed=%s.\n",
760 ha->min_supported_speed == 6 ? "64Gps" :
761 ha->min_supported_speed == 5 ? "32Gps" :
762 ha->min_supported_speed == 4 ? "16Gps" :
763 ha->min_supported_speed == 3 ? "8Gps" :
764 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
768 done:
769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
770 "Done %s.\n", __func__);
772 return rval;
776 * qla_get_exlogin_status
777 * Get extended login status
778 * uses the memory offload control/status Mailbox
780 * Input:
781 * ha: adapter state pointer.
782 * fwopt: firmware options
784 * Returns:
785 * qla2x00 local function status
787 * Context:
788 * Kernel context.
790 #define FETCH_XLOGINS_STAT 0x8
792 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
793 uint16_t *ex_logins_cnt)
795 int rval;
796 mbx_cmd_t mc;
797 mbx_cmd_t *mcp = &mc;
799 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
800 "Entered %s\n", __func__);
802 memset(mcp->mb, 0 , sizeof(mcp->mb));
803 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
804 mcp->mb[1] = FETCH_XLOGINS_STAT;
805 mcp->out_mb = MBX_1|MBX_0;
806 mcp->in_mb = MBX_10|MBX_4|MBX_0;
807 mcp->tov = MBX_TOV_SECONDS;
808 mcp->flags = 0;
810 rval = qla2x00_mailbox_command(vha, mcp);
811 if (rval != QLA_SUCCESS) {
812 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
813 } else {
814 *buf_sz = mcp->mb[4];
815 *ex_logins_cnt = mcp->mb[10];
817 ql_log(ql_log_info, vha, 0x1190,
818 "buffer size 0x%x, exchange login count=%d\n",
819 mcp->mb[4], mcp->mb[10]);
821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
822 "Done %s.\n", __func__);
825 return rval;
829 * qla_set_exlogin_mem_cfg
830 * set extended login memory configuration
831 * Mbx needs to be issues before init_cb is set
833 * Input:
834 * ha: adapter state pointer.
835 * buffer: buffer pointer
836 * phys_addr: physical address of buffer
837 * size: size of buffer
838 * TARGET_QUEUE_LOCK must be released
839 * ADAPTER_STATE_LOCK must be release
841 * Returns:
842 * qla2x00 local funxtion status code.
844 * Context:
845 * Kernel context.
847 #define CONFIG_XLOGINS_MEM 0x9
849 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
851 int rval;
852 mbx_cmd_t mc;
853 mbx_cmd_t *mcp = &mc;
854 struct qla_hw_data *ha = vha->hw;
856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
857 "Entered %s.\n", __func__);
859 memset(mcp->mb, 0 , sizeof(mcp->mb));
860 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
861 mcp->mb[1] = CONFIG_XLOGINS_MEM;
862 mcp->mb[2] = MSW(phys_addr);
863 mcp->mb[3] = LSW(phys_addr);
864 mcp->mb[6] = MSW(MSD(phys_addr));
865 mcp->mb[7] = LSW(MSD(phys_addr));
866 mcp->mb[8] = MSW(ha->exlogin_size);
867 mcp->mb[9] = LSW(ha->exlogin_size);
868 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
869 mcp->in_mb = MBX_11|MBX_0;
870 mcp->tov = MBX_TOV_SECONDS;
871 mcp->flags = 0;
872 rval = qla2x00_mailbox_command(vha, mcp);
873 if (rval != QLA_SUCCESS) {
874 ql_dbg(ql_dbg_mbx, vha, 0x111b,
875 "EXlogin Failed=%x. MB0=%x MB11=%x\n",
876 rval, mcp->mb[0], mcp->mb[11]);
877 } else {
878 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
879 "Done %s.\n", __func__);
882 return rval;
886 * qla_get_exchoffld_status
887 * Get exchange offload status
888 * uses the memory offload control/status Mailbox
890 * Input:
891 * ha: adapter state pointer.
892 * fwopt: firmware options
894 * Returns:
895 * qla2x00 local function status
897 * Context:
898 * Kernel context.
900 #define FETCH_XCHOFFLD_STAT 0x2
902 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
903 uint16_t *ex_logins_cnt)
905 int rval;
906 mbx_cmd_t mc;
907 mbx_cmd_t *mcp = &mc;
909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
910 "Entered %s\n", __func__);
912 memset(mcp->mb, 0 , sizeof(mcp->mb));
913 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
914 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
915 mcp->out_mb = MBX_1|MBX_0;
916 mcp->in_mb = MBX_10|MBX_4|MBX_0;
917 mcp->tov = MBX_TOV_SECONDS;
918 mcp->flags = 0;
920 rval = qla2x00_mailbox_command(vha, mcp);
921 if (rval != QLA_SUCCESS) {
922 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
923 } else {
924 *buf_sz = mcp->mb[4];
925 *ex_logins_cnt = mcp->mb[10];
927 ql_log(ql_log_info, vha, 0x118e,
928 "buffer size 0x%x, exchange offload count=%d\n",
929 mcp->mb[4], mcp->mb[10]);
931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
932 "Done %s.\n", __func__);
935 return rval;
939 * qla_set_exchoffld_mem_cfg
940 * Set exchange offload memory configuration
941 * Mbx needs to be issues before init_cb is set
943 * Input:
944 * ha: adapter state pointer.
945 * buffer: buffer pointer
946 * phys_addr: physical address of buffer
947 * size: size of buffer
948 * TARGET_QUEUE_LOCK must be released
949 * ADAPTER_STATE_LOCK must be release
951 * Returns:
952 * qla2x00 local funxtion status code.
954 * Context:
955 * Kernel context.
957 #define CONFIG_XCHOFFLD_MEM 0x3
959 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
961 int rval;
962 mbx_cmd_t mc;
963 mbx_cmd_t *mcp = &mc;
964 struct qla_hw_data *ha = vha->hw;
966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
967 "Entered %s.\n", __func__);
969 memset(mcp->mb, 0 , sizeof(mcp->mb));
970 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
971 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
972 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
973 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
974 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
975 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
976 mcp->mb[8] = MSW(ha->exchoffld_size);
977 mcp->mb[9] = LSW(ha->exchoffld_size);
978 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
979 mcp->in_mb = MBX_11|MBX_0;
980 mcp->tov = MBX_TOV_SECONDS;
981 mcp->flags = 0;
982 rval = qla2x00_mailbox_command(vha, mcp);
983 if (rval != QLA_SUCCESS) {
984 /*EMPTY*/
985 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
986 } else {
987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
988 "Done %s.\n", __func__);
991 return rval;
995 * qla2x00_get_fw_version
996 * Get firmware version.
998 * Input:
999 * ha: adapter state pointer.
1000 * major: pointer for major number.
1001 * minor: pointer for minor number.
1002 * subminor: pointer for subminor number.
1004 * Returns:
1005 * qla2x00 local function return status code.
1007 * Context:
1008 * Kernel context.
1011 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1013 int rval;
1014 mbx_cmd_t mc;
1015 mbx_cmd_t *mcp = &mc;
1016 struct qla_hw_data *ha = vha->hw;
1018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1019 "Entered %s.\n", __func__);
1021 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1022 mcp->out_mb = MBX_0;
1023 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1024 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1025 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1026 if (IS_FWI2_CAPABLE(ha))
1027 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1028 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1029 mcp->in_mb |=
1030 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1031 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
1033 mcp->flags = 0;
1034 mcp->tov = MBX_TOV_SECONDS;
1035 rval = qla2x00_mailbox_command(vha, mcp);
1036 if (rval != QLA_SUCCESS)
1037 goto failed;
1039 /* Return mailbox data. */
1040 ha->fw_major_version = mcp->mb[1];
1041 ha->fw_minor_version = mcp->mb[2];
1042 ha->fw_subminor_version = mcp->mb[3];
1043 ha->fw_attributes = mcp->mb[6];
1044 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1045 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1046 else
1047 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1049 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1050 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1051 ha->mpi_version[1] = mcp->mb[11] >> 8;
1052 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1053 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1054 ha->phy_version[0] = mcp->mb[8] & 0xff;
1055 ha->phy_version[1] = mcp->mb[9] >> 8;
1056 ha->phy_version[2] = mcp->mb[9] & 0xff;
1059 if (IS_FWI2_CAPABLE(ha)) {
1060 ha->fw_attributes_h = mcp->mb[15];
1061 ha->fw_attributes_ext[0] = mcp->mb[16];
1062 ha->fw_attributes_ext[1] = mcp->mb[17];
1063 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1064 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1065 __func__, mcp->mb[15], mcp->mb[6]);
1066 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1067 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1068 __func__, mcp->mb[17], mcp->mb[16]);
1070 if (ha->fw_attributes_h & 0x4)
1071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1072 "%s: Firmware supports Extended Login 0x%x\n",
1073 __func__, ha->fw_attributes_h);
1075 if (ha->fw_attributes_h & 0x8)
1076 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1077 "%s: Firmware supports Exchange Offload 0x%x\n",
1078 __func__, ha->fw_attributes_h);
1081 * FW supports nvme and driver load parameter requested nvme.
1082 * BIT 26 of fw_attributes indicates NVMe support.
1084 if ((ha->fw_attributes_h &
1085 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1086 ql2xnvmeenable) {
1087 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1088 vha->flags.nvme_first_burst = 1;
1090 vha->flags.nvme_enabled = 1;
1091 ql_log(ql_log_info, vha, 0xd302,
1092 "%s: FC-NVMe is Enabled (0x%x)\n",
1093 __func__, ha->fw_attributes_h);
1096 /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
1097 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1098 ql_log(ql_log_info, vha, 0xd302,
1099 "Firmware supports NVMe2 0x%x\n",
1100 ha->fw_attributes_ext[0]);
1101 vha->flags.nvme2_enabled = 1;
1105 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1106 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1107 ha->serdes_version[1] = mcp->mb[8] >> 8;
1108 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1109 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1110 ha->mpi_version[1] = mcp->mb[11] >> 8;
1111 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1112 ha->pep_version[0] = mcp->mb[13] & 0xff;
1113 ha->pep_version[1] = mcp->mb[14] >> 8;
1114 ha->pep_version[2] = mcp->mb[14] & 0xff;
1115 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1116 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1117 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1118 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1119 if (IS_QLA28XX(ha)) {
1120 if (mcp->mb[16] & BIT_10)
1121 ha->flags.secure_fw = 1;
1123 ql_log(ql_log_info, vha, 0xffff,
1124 "Secure Flash Update in FW: %s\n",
1125 (ha->flags.secure_fw) ? "Supported" :
1126 "Not Supported");
1129 if (ha->flags.scm_supported_a &&
1130 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1131 ha->flags.scm_supported_f = 1;
1132 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1134 ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
1135 (ha->flags.scm_supported_f) ? "Supported" :
1136 "Not Supported");
1138 if (vha->flags.nvme2_enabled) {
1139 /* set BIT_15 of special feature control block for SLER */
1140 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1141 /* set BIT_14 of special feature control block for PI CTRL*/
1142 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1146 failed:
1147 if (rval != QLA_SUCCESS) {
1148 /*EMPTY*/
1149 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1150 } else {
1151 /*EMPTY*/
1152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1153 "Done %s.\n", __func__);
1155 return rval;
1159 * qla2x00_get_fw_options
1160 * Set firmware options.
1162 * Input:
1163 * ha = adapter block pointer.
1164 * fwopt = pointer for firmware options.
1166 * Returns:
1167 * qla2x00 local function return status code.
1169 * Context:
1170 * Kernel context.
1173 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1175 int rval;
1176 mbx_cmd_t mc;
1177 mbx_cmd_t *mcp = &mc;
1179 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1180 "Entered %s.\n", __func__);
1182 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1183 mcp->out_mb = MBX_0;
1184 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1185 mcp->tov = MBX_TOV_SECONDS;
1186 mcp->flags = 0;
1187 rval = qla2x00_mailbox_command(vha, mcp);
1189 if (rval != QLA_SUCCESS) {
1190 /*EMPTY*/
1191 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1192 } else {
1193 fwopts[0] = mcp->mb[0];
1194 fwopts[1] = mcp->mb[1];
1195 fwopts[2] = mcp->mb[2];
1196 fwopts[3] = mcp->mb[3];
1198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1199 "Done %s.\n", __func__);
1202 return rval;
1207 * qla2x00_set_fw_options
1208 * Set firmware options.
1210 * Input:
1211 * ha = adapter block pointer.
1212 * fwopt = pointer for firmware options.
1214 * Returns:
1215 * qla2x00 local function return status code.
1217 * Context:
1218 * Kernel context.
1221 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1223 int rval;
1224 mbx_cmd_t mc;
1225 mbx_cmd_t *mcp = &mc;
1227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1228 "Entered %s.\n", __func__);
1230 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1231 mcp->mb[1] = fwopts[1];
1232 mcp->mb[2] = fwopts[2];
1233 mcp->mb[3] = fwopts[3];
1234 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1235 mcp->in_mb = MBX_0;
1236 if (IS_FWI2_CAPABLE(vha->hw)) {
1237 mcp->in_mb |= MBX_1;
1238 mcp->mb[10] = fwopts[10];
1239 mcp->out_mb |= MBX_10;
1240 } else {
1241 mcp->mb[10] = fwopts[10];
1242 mcp->mb[11] = fwopts[11];
1243 mcp->mb[12] = 0; /* Undocumented, but used */
1244 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1246 mcp->tov = MBX_TOV_SECONDS;
1247 mcp->flags = 0;
1248 rval = qla2x00_mailbox_command(vha, mcp);
1250 fwopts[0] = mcp->mb[0];
1252 if (rval != QLA_SUCCESS) {
1253 /*EMPTY*/
1254 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1255 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1256 } else {
1257 /*EMPTY*/
1258 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1259 "Done %s.\n", __func__);
1262 return rval;
1266 * qla2x00_mbx_reg_test
1267 * Mailbox register wrap test.
1269 * Input:
1270 * ha = adapter block pointer.
1271 * TARGET_QUEUE_LOCK must be released.
1272 * ADAPTER_STATE_LOCK must be released.
1274 * Returns:
1275 * qla2x00 local function return status code.
1277 * Context:
1278 * Kernel context.
1281 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1283 int rval;
1284 mbx_cmd_t mc;
1285 mbx_cmd_t *mcp = &mc;
1287 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1288 "Entered %s.\n", __func__);
1290 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1291 mcp->mb[1] = 0xAAAA;
1292 mcp->mb[2] = 0x5555;
1293 mcp->mb[3] = 0xAA55;
1294 mcp->mb[4] = 0x55AA;
1295 mcp->mb[5] = 0xA5A5;
1296 mcp->mb[6] = 0x5A5A;
1297 mcp->mb[7] = 0x2525;
1298 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1299 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1300 mcp->tov = MBX_TOV_SECONDS;
1301 mcp->flags = 0;
1302 rval = qla2x00_mailbox_command(vha, mcp);
1304 if (rval == QLA_SUCCESS) {
1305 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1306 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1307 rval = QLA_FUNCTION_FAILED;
1308 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1309 mcp->mb[7] != 0x2525)
1310 rval = QLA_FUNCTION_FAILED;
1313 if (rval != QLA_SUCCESS) {
1314 /*EMPTY*/
1315 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1316 } else {
1317 /*EMPTY*/
1318 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1319 "Done %s.\n", __func__);
1322 return rval;
1326 * qla2x00_verify_checksum
1327 * Verify firmware checksum.
1329 * Input:
1330 * ha = adapter block pointer.
1331 * TARGET_QUEUE_LOCK must be released.
1332 * ADAPTER_STATE_LOCK must be released.
1334 * Returns:
1335 * qla2x00 local function return status code.
1337 * Context:
1338 * Kernel context.
1341 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1343 int rval;
1344 mbx_cmd_t mc;
1345 mbx_cmd_t *mcp = &mc;
1347 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1348 "Entered %s.\n", __func__);
1350 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1351 mcp->out_mb = MBX_0;
1352 mcp->in_mb = MBX_0;
1353 if (IS_FWI2_CAPABLE(vha->hw)) {
1354 mcp->mb[1] = MSW(risc_addr);
1355 mcp->mb[2] = LSW(risc_addr);
1356 mcp->out_mb |= MBX_2|MBX_1;
1357 mcp->in_mb |= MBX_2|MBX_1;
1358 } else {
1359 mcp->mb[1] = LSW(risc_addr);
1360 mcp->out_mb |= MBX_1;
1361 mcp->in_mb |= MBX_1;
1364 mcp->tov = MBX_TOV_SECONDS;
1365 mcp->flags = 0;
1366 rval = qla2x00_mailbox_command(vha, mcp);
1368 if (rval != QLA_SUCCESS) {
1369 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1370 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1371 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1372 } else {
1373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1374 "Done %s.\n", __func__);
1377 return rval;
1381 * qla2x00_issue_iocb
1382 * Issue IOCB using mailbox command
1384 * Input:
1385 * ha = adapter state pointer.
1386 * buffer = buffer pointer.
1387 * phys_addr = physical address of buffer.
1388 * size = size of buffer.
1389 * TARGET_QUEUE_LOCK must be released.
1390 * ADAPTER_STATE_LOCK must be released.
1392 * Returns:
1393 * qla2x00 local function return status code.
1395 * Context:
1396 * Kernel context.
1399 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1400 dma_addr_t phys_addr, size_t size, uint32_t tov)
1402 int rval;
1403 mbx_cmd_t mc;
1404 mbx_cmd_t *mcp = &mc;
1406 if (!vha->hw->flags.fw_started)
1407 return QLA_INVALID_COMMAND;
1409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1410 "Entered %s.\n", __func__);
1412 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1413 mcp->mb[1] = 0;
1414 mcp->mb[2] = MSW(LSD(phys_addr));
1415 mcp->mb[3] = LSW(LSD(phys_addr));
1416 mcp->mb[6] = MSW(MSD(phys_addr));
1417 mcp->mb[7] = LSW(MSD(phys_addr));
1418 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1419 mcp->in_mb = MBX_1|MBX_0;
1420 mcp->tov = tov;
1421 mcp->flags = 0;
1422 rval = qla2x00_mailbox_command(vha, mcp);
1424 if (rval != QLA_SUCCESS) {
1425 /*EMPTY*/
1426 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1427 } else {
1428 sts_entry_t *sts_entry = buffer;
1430 /* Mask reserved bits. */
1431 sts_entry->entry_status &=
1432 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1433 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1434 "Done %s (status=%x).\n", __func__,
1435 sts_entry->entry_status);
1438 return rval;
1442 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1443 size_t size)
1445 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1446 MBX_TOV_SECONDS);
1450 * qla2x00_abort_command
1451 * Abort command aborts a specified IOCB.
1453 * Input:
1454 * ha = adapter block pointer.
1455 * sp = SB structure pointer.
1457 * Returns:
1458 * qla2x00 local function return status code.
1460 * Context:
1461 * Kernel context.
1464 qla2x00_abort_command(srb_t *sp)
1466 unsigned long flags = 0;
1467 int rval;
1468 uint32_t handle = 0;
1469 mbx_cmd_t mc;
1470 mbx_cmd_t *mcp = &mc;
1471 fc_port_t *fcport = sp->fcport;
1472 scsi_qla_host_t *vha = fcport->vha;
1473 struct qla_hw_data *ha = vha->hw;
1474 struct req_que *req;
1475 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1477 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1478 "Entered %s.\n", __func__);
1480 if (sp->qpair)
1481 req = sp->qpair->req;
1482 else
1483 req = vha->req;
1485 spin_lock_irqsave(&ha->hardware_lock, flags);
1486 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1487 if (req->outstanding_cmds[handle] == sp)
1488 break;
1490 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1492 if (handle == req->num_outstanding_cmds) {
1493 /* command not found */
1494 return QLA_FUNCTION_FAILED;
1497 mcp->mb[0] = MBC_ABORT_COMMAND;
1498 if (HAS_EXTENDED_IDS(ha))
1499 mcp->mb[1] = fcport->loop_id;
1500 else
1501 mcp->mb[1] = fcport->loop_id << 8;
1502 mcp->mb[2] = (uint16_t)handle;
1503 mcp->mb[3] = (uint16_t)(handle >> 16);
1504 mcp->mb[6] = (uint16_t)cmd->device->lun;
1505 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1506 mcp->in_mb = MBX_0;
1507 mcp->tov = MBX_TOV_SECONDS;
1508 mcp->flags = 0;
1509 rval = qla2x00_mailbox_command(vha, mcp);
1511 if (rval != QLA_SUCCESS) {
1512 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1513 } else {
1514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1515 "Done %s.\n", __func__);
1518 return rval;
1522 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1524 int rval, rval2;
1525 mbx_cmd_t mc;
1526 mbx_cmd_t *mcp = &mc;
1527 scsi_qla_host_t *vha;
1529 vha = fcport->vha;
1531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1532 "Entered %s.\n", __func__);
1534 mcp->mb[0] = MBC_ABORT_TARGET;
1535 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1536 if (HAS_EXTENDED_IDS(vha->hw)) {
1537 mcp->mb[1] = fcport->loop_id;
1538 mcp->mb[10] = 0;
1539 mcp->out_mb |= MBX_10;
1540 } else {
1541 mcp->mb[1] = fcport->loop_id << 8;
1543 mcp->mb[2] = vha->hw->loop_reset_delay;
1544 mcp->mb[9] = vha->vp_idx;
1546 mcp->in_mb = MBX_0;
1547 mcp->tov = MBX_TOV_SECONDS;
1548 mcp->flags = 0;
1549 rval = qla2x00_mailbox_command(vha, mcp);
1550 if (rval != QLA_SUCCESS) {
1551 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1552 "Failed=%x.\n", rval);
1555 /* Issue marker IOCB. */
1556 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1557 MK_SYNC_ID);
1558 if (rval2 != QLA_SUCCESS) {
1559 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1560 "Failed to issue marker IOCB (%x).\n", rval2);
1561 } else {
1562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1563 "Done %s.\n", __func__);
1566 return rval;
1570 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1572 int rval, rval2;
1573 mbx_cmd_t mc;
1574 mbx_cmd_t *mcp = &mc;
1575 scsi_qla_host_t *vha;
1577 vha = fcport->vha;
1579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1580 "Entered %s.\n", __func__);
1582 mcp->mb[0] = MBC_LUN_RESET;
1583 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1584 if (HAS_EXTENDED_IDS(vha->hw))
1585 mcp->mb[1] = fcport->loop_id;
1586 else
1587 mcp->mb[1] = fcport->loop_id << 8;
1588 mcp->mb[2] = (u32)l;
1589 mcp->mb[3] = 0;
1590 mcp->mb[9] = vha->vp_idx;
1592 mcp->in_mb = MBX_0;
1593 mcp->tov = MBX_TOV_SECONDS;
1594 mcp->flags = 0;
1595 rval = qla2x00_mailbox_command(vha, mcp);
1596 if (rval != QLA_SUCCESS) {
1597 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1600 /* Issue marker IOCB. */
1601 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1602 MK_SYNC_ID_LUN);
1603 if (rval2 != QLA_SUCCESS) {
1604 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1605 "Failed to issue marker IOCB (%x).\n", rval2);
1606 } else {
1607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1608 "Done %s.\n", __func__);
1611 return rval;
1615 * qla2x00_get_adapter_id
1616 * Get adapter ID and topology.
1618 * Input:
1619 * ha = adapter block pointer.
1620 * id = pointer for loop ID.
1621 * al_pa = pointer for AL_PA.
1622 * area = pointer for area.
1623 * domain = pointer for domain.
1624 * top = pointer for topology.
1625 * TARGET_QUEUE_LOCK must be released.
1626 * ADAPTER_STATE_LOCK must be released.
1628 * Returns:
1629 * qla2x00 local function return status code.
1631 * Context:
1632 * Kernel context.
1635 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1636 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1638 int rval;
1639 mbx_cmd_t mc;
1640 mbx_cmd_t *mcp = &mc;
1642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1643 "Entered %s.\n", __func__);
1645 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1646 mcp->mb[9] = vha->vp_idx;
1647 mcp->out_mb = MBX_9|MBX_0;
1648 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1649 if (IS_CNA_CAPABLE(vha->hw))
1650 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1651 if (IS_FWI2_CAPABLE(vha->hw))
1652 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1653 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1654 mcp->in_mb |= MBX_15;
1655 mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
1658 mcp->tov = MBX_TOV_SECONDS;
1659 mcp->flags = 0;
1660 rval = qla2x00_mailbox_command(vha, mcp);
1661 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1662 rval = QLA_COMMAND_ERROR;
1663 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1664 rval = QLA_INVALID_COMMAND;
1666 /* Return data. */
1667 *id = mcp->mb[1];
1668 *al_pa = LSB(mcp->mb[2]);
1669 *area = MSB(mcp->mb[2]);
1670 *domain = LSB(mcp->mb[3]);
1671 *top = mcp->mb[6];
1672 *sw_cap = mcp->mb[7];
1674 if (rval != QLA_SUCCESS) {
1675 /*EMPTY*/
1676 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1677 } else {
1678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1679 "Done %s.\n", __func__);
1681 if (IS_CNA_CAPABLE(vha->hw)) {
1682 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1683 vha->fcoe_fcf_idx = mcp->mb[10];
1684 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1685 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1686 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1687 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1688 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1689 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1691 /* If FA-WWN supported */
1692 if (IS_FAWWN_CAPABLE(vha->hw)) {
1693 if (mcp->mb[7] & BIT_14) {
1694 vha->port_name[0] = MSB(mcp->mb[16]);
1695 vha->port_name[1] = LSB(mcp->mb[16]);
1696 vha->port_name[2] = MSB(mcp->mb[17]);
1697 vha->port_name[3] = LSB(mcp->mb[17]);
1698 vha->port_name[4] = MSB(mcp->mb[18]);
1699 vha->port_name[5] = LSB(mcp->mb[18]);
1700 vha->port_name[6] = MSB(mcp->mb[19]);
1701 vha->port_name[7] = LSB(mcp->mb[19]);
1702 fc_host_port_name(vha->host) =
1703 wwn_to_u64(vha->port_name);
1704 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1705 "FA-WWN acquired %016llx\n",
1706 wwn_to_u64(vha->port_name));
1710 if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
1711 vha->bbcr = mcp->mb[15];
1712 if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) {
1713 ql_log(ql_log_info, vha, 0x11a4,
1714 "SCM: EDC ELS completed, flags 0x%x\n",
1715 mcp->mb[21]);
1717 if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) {
1718 vha->hw->flags.scm_enabled = 1;
1719 vha->scm_fabric_connection_flags |=
1720 SCM_FLAG_RDF_COMPLETED;
1721 ql_log(ql_log_info, vha, 0x11a5,
1722 "SCM: RDF ELS completed, flags 0x%x\n",
1723 mcp->mb[23]);
1728 return rval;
1732 * qla2x00_get_retry_cnt
1733 * Get current firmware login retry count and delay.
1735 * Input:
1736 * ha = adapter block pointer.
1737 * retry_cnt = pointer to login retry count.
1738 * tov = pointer to login timeout value.
1740 * Returns:
1741 * qla2x00 local function return status code.
1743 * Context:
1744 * Kernel context.
1747 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1748 uint16_t *r_a_tov)
1750 int rval;
1751 uint16_t ratov;
1752 mbx_cmd_t mc;
1753 mbx_cmd_t *mcp = &mc;
1755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1756 "Entered %s.\n", __func__);
1758 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1759 mcp->out_mb = MBX_0;
1760 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1761 mcp->tov = MBX_TOV_SECONDS;
1762 mcp->flags = 0;
1763 rval = qla2x00_mailbox_command(vha, mcp);
1765 if (rval != QLA_SUCCESS) {
1766 /*EMPTY*/
1767 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1768 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1769 } else {
1770 /* Convert returned data and check our values. */
1771 *r_a_tov = mcp->mb[3] / 2;
1772 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1773 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1774 /* Update to the larger values */
1775 *retry_cnt = (uint8_t)mcp->mb[1];
1776 *tov = ratov;
1779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1780 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1783 return rval;
1787 * qla2x00_init_firmware
1788 * Initialize adapter firmware.
1790 * Input:
1791 * ha = adapter block pointer.
1792 * dptr = Initialization control block pointer.
1793 * size = size of initialization control block.
1794 * TARGET_QUEUE_LOCK must be released.
1795 * ADAPTER_STATE_LOCK must be released.
1797 * Returns:
1798 * qla2x00 local function return status code.
1800 * Context:
1801 * Kernel context.
1804 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1806 int rval;
1807 mbx_cmd_t mc;
1808 mbx_cmd_t *mcp = &mc;
1809 struct qla_hw_data *ha = vha->hw;
1811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1812 "Entered %s.\n", __func__);
1814 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1815 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1816 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1818 if (ha->flags.npiv_supported)
1819 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1820 else
1821 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1823 mcp->mb[1] = 0;
1824 mcp->mb[2] = MSW(ha->init_cb_dma);
1825 mcp->mb[3] = LSW(ha->init_cb_dma);
1826 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1827 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1828 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1829 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1830 mcp->mb[1] = BIT_0;
1831 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1832 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1833 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1834 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1835 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1836 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1839 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1840 mcp->mb[1] |= BIT_1;
1841 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1842 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1843 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1844 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1845 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1846 mcp->out_mb |= MBX_19|MBX_18|MBX_17|MBX_16|MBX_15;
1849 /* 1 and 2 should normally be captured. */
1850 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1851 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1852 /* mb3 is additional info about the installed SFP. */
1853 mcp->in_mb |= MBX_3;
1854 mcp->buf_size = size;
1855 mcp->flags = MBX_DMA_OUT;
1856 mcp->tov = MBX_TOV_SECONDS;
1857 rval = qla2x00_mailbox_command(vha, mcp);
1859 if (rval != QLA_SUCCESS) {
1860 /*EMPTY*/
1861 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1862 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n",
1863 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1864 if (ha->init_cb) {
1865 ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n");
1866 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1867 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1869 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1870 ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n");
1871 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha,
1872 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1874 } else {
1875 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1876 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1877 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1878 "Invalid SFP/Validation Failed\n");
1880 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1881 "Done %s.\n", __func__);
1884 return rval;
1889 * qla2x00_get_port_database
1890 * Issue normal/enhanced get port database mailbox command
1891 * and copy device name as necessary.
1893 * Input:
1894 * ha = adapter state pointer.
1895 * dev = structure pointer.
1896 * opt = enhanced cmd option byte.
1898 * Returns:
1899 * qla2x00 local function return status code.
1901 * Context:
1902 * Kernel context.
1905 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1907 int rval;
1908 mbx_cmd_t mc;
1909 mbx_cmd_t *mcp = &mc;
1910 port_database_t *pd;
1911 struct port_database_24xx *pd24;
1912 dma_addr_t pd_dma;
1913 struct qla_hw_data *ha = vha->hw;
1915 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1916 "Entered %s.\n", __func__);
1918 pd24 = NULL;
1919 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1920 if (pd == NULL) {
1921 ql_log(ql_log_warn, vha, 0x1050,
1922 "Failed to allocate port database structure.\n");
1923 fcport->query = 0;
1924 return QLA_MEMORY_ALLOC_FAILED;
1927 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1928 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1929 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1930 mcp->mb[2] = MSW(pd_dma);
1931 mcp->mb[3] = LSW(pd_dma);
1932 mcp->mb[6] = MSW(MSD(pd_dma));
1933 mcp->mb[7] = LSW(MSD(pd_dma));
1934 mcp->mb[9] = vha->vp_idx;
1935 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1936 mcp->in_mb = MBX_0;
1937 if (IS_FWI2_CAPABLE(ha)) {
1938 mcp->mb[1] = fcport->loop_id;
1939 mcp->mb[10] = opt;
1940 mcp->out_mb |= MBX_10|MBX_1;
1941 mcp->in_mb |= MBX_1;
1942 } else if (HAS_EXTENDED_IDS(ha)) {
1943 mcp->mb[1] = fcport->loop_id;
1944 mcp->mb[10] = opt;
1945 mcp->out_mb |= MBX_10|MBX_1;
1946 } else {
1947 mcp->mb[1] = fcport->loop_id << 8 | opt;
1948 mcp->out_mb |= MBX_1;
1950 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1951 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1952 mcp->flags = MBX_DMA_IN;
1953 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1954 rval = qla2x00_mailbox_command(vha, mcp);
1955 if (rval != QLA_SUCCESS)
1956 goto gpd_error_out;
1958 if (IS_FWI2_CAPABLE(ha)) {
1959 uint64_t zero = 0;
1960 u8 current_login_state, last_login_state;
1962 pd24 = (struct port_database_24xx *) pd;
1964 /* Check for logged in state. */
1965 if (NVME_TARGET(ha, fcport)) {
1966 current_login_state = pd24->current_login_state >> 4;
1967 last_login_state = pd24->last_login_state >> 4;
1968 } else {
1969 current_login_state = pd24->current_login_state & 0xf;
1970 last_login_state = pd24->last_login_state & 0xf;
1972 fcport->current_login_state = pd24->current_login_state;
1973 fcport->last_login_state = pd24->last_login_state;
1975 /* Check for logged in state. */
1976 if (current_login_state != PDS_PRLI_COMPLETE &&
1977 last_login_state != PDS_PRLI_COMPLETE) {
1978 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1979 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1980 current_login_state, last_login_state,
1981 fcport->loop_id);
1982 rval = QLA_FUNCTION_FAILED;
1984 if (!fcport->query)
1985 goto gpd_error_out;
1988 if (fcport->loop_id == FC_NO_LOOP_ID ||
1989 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1990 memcmp(fcport->port_name, pd24->port_name, 8))) {
1991 /* We lost the device mid way. */
1992 rval = QLA_NOT_LOGGED_IN;
1993 goto gpd_error_out;
1996 /* Names are little-endian. */
1997 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1998 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
2000 /* Get port_id of device. */
2001 fcport->d_id.b.domain = pd24->port_id[0];
2002 fcport->d_id.b.area = pd24->port_id[1];
2003 fcport->d_id.b.al_pa = pd24->port_id[2];
2004 fcport->d_id.b.rsvd_1 = 0;
2006 /* If not target must be initiator or unknown type. */
2007 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
2008 fcport->port_type = FCT_INITIATOR;
2009 else
2010 fcport->port_type = FCT_TARGET;
2012 /* Passback COS information. */
2013 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
2014 FC_COS_CLASS2 : FC_COS_CLASS3;
2016 if (pd24->prli_svc_param_word_3[0] & BIT_7)
2017 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
2018 } else {
2019 uint64_t zero = 0;
2021 /* Check for logged in state. */
2022 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
2023 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
2024 ql_dbg(ql_dbg_mbx, vha, 0x100a,
2025 "Unable to verify login-state (%x/%x) - "
2026 "portid=%02x%02x%02x.\n", pd->master_state,
2027 pd->slave_state, fcport->d_id.b.domain,
2028 fcport->d_id.b.area, fcport->d_id.b.al_pa);
2029 rval = QLA_FUNCTION_FAILED;
2030 goto gpd_error_out;
2033 if (fcport->loop_id == FC_NO_LOOP_ID ||
2034 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
2035 memcmp(fcport->port_name, pd->port_name, 8))) {
2036 /* We lost the device mid way. */
2037 rval = QLA_NOT_LOGGED_IN;
2038 goto gpd_error_out;
2041 /* Names are little-endian. */
2042 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
2043 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
2045 /* Get port_id of device. */
2046 fcport->d_id.b.domain = pd->port_id[0];
2047 fcport->d_id.b.area = pd->port_id[3];
2048 fcport->d_id.b.al_pa = pd->port_id[2];
2049 fcport->d_id.b.rsvd_1 = 0;
2051 /* If not target must be initiator or unknown type. */
2052 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2053 fcport->port_type = FCT_INITIATOR;
2054 else
2055 fcport->port_type = FCT_TARGET;
2057 /* Passback COS information. */
2058 fcport->supported_classes = (pd->options & BIT_4) ?
2059 FC_COS_CLASS2 : FC_COS_CLASS3;
2062 gpd_error_out:
2063 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2064 fcport->query = 0;
2066 if (rval != QLA_SUCCESS) {
2067 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2068 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2069 mcp->mb[0], mcp->mb[1]);
2070 } else {
2071 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2072 "Done %s.\n", __func__);
2075 return rval;
2079 qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle,
2080 struct port_database_24xx *pdb)
2082 mbx_cmd_t mc;
2083 mbx_cmd_t *mcp = &mc;
2084 dma_addr_t pdb_dma;
2085 int rval;
2087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1115,
2088 "Entered %s.\n", __func__);
2090 memset(pdb, 0, sizeof(*pdb));
2092 pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb,
2093 sizeof(*pdb), DMA_FROM_DEVICE);
2094 if (!pdb_dma) {
2095 ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n");
2096 return QLA_MEMORY_ALLOC_FAILED;
2099 mcp->mb[0] = MBC_GET_PORT_DATABASE;
2100 mcp->mb[1] = nport_handle;
2101 mcp->mb[2] = MSW(LSD(pdb_dma));
2102 mcp->mb[3] = LSW(LSD(pdb_dma));
2103 mcp->mb[6] = MSW(MSD(pdb_dma));
2104 mcp->mb[7] = LSW(MSD(pdb_dma));
2105 mcp->mb[9] = 0;
2106 mcp->mb[10] = 0;
2107 mcp->out_mb = MBX_10|MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2108 mcp->in_mb = MBX_1|MBX_0;
2109 mcp->buf_size = sizeof(*pdb);
2110 mcp->flags = MBX_DMA_IN;
2111 mcp->tov = vha->hw->login_timeout * 2;
2112 rval = qla2x00_mailbox_command(vha, mcp);
2114 if (rval != QLA_SUCCESS) {
2115 ql_dbg(ql_dbg_mbx, vha, 0x111a,
2116 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2117 rval, mcp->mb[0], mcp->mb[1]);
2118 } else {
2119 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111b,
2120 "Done %s.\n", __func__);
2123 dma_unmap_single(&vha->hw->pdev->dev, pdb_dma,
2124 sizeof(*pdb), DMA_FROM_DEVICE);
2126 return rval;
2130 * qla2x00_get_firmware_state
2131 * Get adapter firmware state.
2133 * Input:
2134 * ha = adapter block pointer.
2135 * dptr = pointer for firmware state.
2136 * TARGET_QUEUE_LOCK must be released.
2137 * ADAPTER_STATE_LOCK must be released.
2139 * Returns:
2140 * qla2x00 local function return status code.
2142 * Context:
2143 * Kernel context.
2146 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2148 int rval;
2149 mbx_cmd_t mc;
2150 mbx_cmd_t *mcp = &mc;
2151 struct qla_hw_data *ha = vha->hw;
2153 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2154 "Entered %s.\n", __func__);
2156 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2157 mcp->out_mb = MBX_0;
2158 if (IS_FWI2_CAPABLE(vha->hw))
2159 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2160 else
2161 mcp->in_mb = MBX_1|MBX_0;
2162 mcp->tov = MBX_TOV_SECONDS;
2163 mcp->flags = 0;
2164 rval = qla2x00_mailbox_command(vha, mcp);
2166 /* Return firmware states. */
2167 states[0] = mcp->mb[1];
2168 if (IS_FWI2_CAPABLE(vha->hw)) {
2169 states[1] = mcp->mb[2];
2170 states[2] = mcp->mb[3]; /* SFP info */
2171 states[3] = mcp->mb[4];
2172 states[4] = mcp->mb[5];
2173 states[5] = mcp->mb[6]; /* DPORT status */
2176 if (rval != QLA_SUCCESS) {
2177 /*EMPTY*/
2178 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2179 } else {
2180 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2181 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2182 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2183 "Invalid SFP/Validation Failed\n");
2185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2186 "Done %s.\n", __func__);
2189 return rval;
2193 * qla2x00_get_port_name
2194 * Issue get port name mailbox command.
2195 * Returned name is in big endian format.
2197 * Input:
2198 * ha = adapter block pointer.
2199 * loop_id = loop ID of device.
2200 * name = pointer for name.
2201 * TARGET_QUEUE_LOCK must be released.
2202 * ADAPTER_STATE_LOCK must be released.
2204 * Returns:
2205 * qla2x00 local function return status code.
2207 * Context:
2208 * Kernel context.
2211 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2212 uint8_t opt)
2214 int rval;
2215 mbx_cmd_t mc;
2216 mbx_cmd_t *mcp = &mc;
2218 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2219 "Entered %s.\n", __func__);
2221 mcp->mb[0] = MBC_GET_PORT_NAME;
2222 mcp->mb[9] = vha->vp_idx;
2223 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2224 if (HAS_EXTENDED_IDS(vha->hw)) {
2225 mcp->mb[1] = loop_id;
2226 mcp->mb[10] = opt;
2227 mcp->out_mb |= MBX_10;
2228 } else {
2229 mcp->mb[1] = loop_id << 8 | opt;
2232 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2233 mcp->tov = MBX_TOV_SECONDS;
2234 mcp->flags = 0;
2235 rval = qla2x00_mailbox_command(vha, mcp);
2237 if (rval != QLA_SUCCESS) {
2238 /*EMPTY*/
2239 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2240 } else {
2241 if (name != NULL) {
2242 /* This function returns name in big endian. */
2243 name[0] = MSB(mcp->mb[2]);
2244 name[1] = LSB(mcp->mb[2]);
2245 name[2] = MSB(mcp->mb[3]);
2246 name[3] = LSB(mcp->mb[3]);
2247 name[4] = MSB(mcp->mb[6]);
2248 name[5] = LSB(mcp->mb[6]);
2249 name[6] = MSB(mcp->mb[7]);
2250 name[7] = LSB(mcp->mb[7]);
2253 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2254 "Done %s.\n", __func__);
2257 return rval;
2261 * qla24xx_link_initialization
2262 * Issue link initialization mailbox command.
2264 * Input:
2265 * ha = adapter block pointer.
2266 * TARGET_QUEUE_LOCK must be released.
2267 * ADAPTER_STATE_LOCK must be released.
2269 * Returns:
2270 * qla2x00 local function return status code.
2272 * Context:
2273 * Kernel context.
2276 qla24xx_link_initialize(scsi_qla_host_t *vha)
2278 int rval;
2279 mbx_cmd_t mc;
2280 mbx_cmd_t *mcp = &mc;
2282 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2283 "Entered %s.\n", __func__);
2285 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2286 return QLA_FUNCTION_FAILED;
2288 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2289 mcp->mb[1] = BIT_4;
2290 if (vha->hw->operating_mode == LOOP)
2291 mcp->mb[1] |= BIT_6;
2292 else
2293 mcp->mb[1] |= BIT_5;
2294 mcp->mb[2] = 0;
2295 mcp->mb[3] = 0;
2296 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2297 mcp->in_mb = MBX_0;
2298 mcp->tov = MBX_TOV_SECONDS;
2299 mcp->flags = 0;
2300 rval = qla2x00_mailbox_command(vha, mcp);
2302 if (rval != QLA_SUCCESS) {
2303 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2304 } else {
2305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2306 "Done %s.\n", __func__);
2309 return rval;
2313 * qla2x00_lip_reset
2314 * Issue LIP reset mailbox command.
2316 * Input:
2317 * ha = adapter block pointer.
2318 * TARGET_QUEUE_LOCK must be released.
2319 * ADAPTER_STATE_LOCK must be released.
2321 * Returns:
2322 * qla2x00 local function return status code.
2324 * Context:
2325 * Kernel context.
2328 qla2x00_lip_reset(scsi_qla_host_t *vha)
2330 int rval;
2331 mbx_cmd_t mc;
2332 mbx_cmd_t *mcp = &mc;
2334 ql_dbg(ql_dbg_disc, vha, 0x105a,
2335 "Entered %s.\n", __func__);
2337 if (IS_CNA_CAPABLE(vha->hw)) {
2338 /* Logout across all FCFs. */
2339 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2340 mcp->mb[1] = BIT_1;
2341 mcp->mb[2] = 0;
2342 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2343 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2344 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2345 mcp->mb[1] = BIT_4;
2346 mcp->mb[2] = 0;
2347 mcp->mb[3] = vha->hw->loop_reset_delay;
2348 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2349 } else {
2350 mcp->mb[0] = MBC_LIP_RESET;
2351 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2352 if (HAS_EXTENDED_IDS(vha->hw)) {
2353 mcp->mb[1] = 0x00ff;
2354 mcp->mb[10] = 0;
2355 mcp->out_mb |= MBX_10;
2356 } else {
2357 mcp->mb[1] = 0xff00;
2359 mcp->mb[2] = vha->hw->loop_reset_delay;
2360 mcp->mb[3] = 0;
2362 mcp->in_mb = MBX_0;
2363 mcp->tov = MBX_TOV_SECONDS;
2364 mcp->flags = 0;
2365 rval = qla2x00_mailbox_command(vha, mcp);
2367 if (rval != QLA_SUCCESS) {
2368 /*EMPTY*/
2369 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2370 } else {
2371 /*EMPTY*/
2372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2373 "Done %s.\n", __func__);
2376 return rval;
2380 * qla2x00_send_sns
2381 * Send SNS command.
2383 * Input:
2384 * ha = adapter block pointer.
2385 * sns = pointer for command.
2386 * cmd_size = command size.
2387 * buf_size = response/command size.
2388 * TARGET_QUEUE_LOCK must be released.
2389 * ADAPTER_STATE_LOCK must be released.
2391 * Returns:
2392 * qla2x00 local function return status code.
2394 * Context:
2395 * Kernel context.
2398 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2399 uint16_t cmd_size, size_t buf_size)
2401 int rval;
2402 mbx_cmd_t mc;
2403 mbx_cmd_t *mcp = &mc;
2405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2406 "Entered %s.\n", __func__);
2408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2409 "Retry cnt=%d ratov=%d total tov=%d.\n",
2410 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2412 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2413 mcp->mb[1] = cmd_size;
2414 mcp->mb[2] = MSW(sns_phys_address);
2415 mcp->mb[3] = LSW(sns_phys_address);
2416 mcp->mb[6] = MSW(MSD(sns_phys_address));
2417 mcp->mb[7] = LSW(MSD(sns_phys_address));
2418 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2419 mcp->in_mb = MBX_0|MBX_1;
2420 mcp->buf_size = buf_size;
2421 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2422 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2423 rval = qla2x00_mailbox_command(vha, mcp);
2425 if (rval != QLA_SUCCESS) {
2426 /*EMPTY*/
2427 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2428 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2429 rval, mcp->mb[0], mcp->mb[1]);
2430 } else {
2431 /*EMPTY*/
2432 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2433 "Done %s.\n", __func__);
2436 return rval;
2440 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2441 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2443 int rval;
2445 struct logio_entry_24xx *lg;
2446 dma_addr_t lg_dma;
2447 uint32_t iop[2];
2448 struct qla_hw_data *ha = vha->hw;
2449 struct req_que *req;
2451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2452 "Entered %s.\n", __func__);
2454 if (vha->vp_idx && vha->qpair)
2455 req = vha->qpair->req;
2456 else
2457 req = ha->req_q_map[0];
2459 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2460 if (lg == NULL) {
2461 ql_log(ql_log_warn, vha, 0x1062,
2462 "Failed to allocate login IOCB.\n");
2463 return QLA_MEMORY_ALLOC_FAILED;
2466 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2467 lg->entry_count = 1;
2468 lg->handle = make_handle(req->id, lg->handle);
2469 lg->nport_handle = cpu_to_le16(loop_id);
2470 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2471 if (opt & BIT_0)
2472 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2473 if (opt & BIT_1)
2474 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2475 lg->port_id[0] = al_pa;
2476 lg->port_id[1] = area;
2477 lg->port_id[2] = domain;
2478 lg->vp_index = vha->vp_idx;
2479 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2480 (ha->r_a_tov / 10 * 2) + 2);
2481 if (rval != QLA_SUCCESS) {
2482 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2483 "Failed to issue login IOCB (%x).\n", rval);
2484 } else if (lg->entry_status != 0) {
2485 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2486 "Failed to complete IOCB -- error status (%x).\n",
2487 lg->entry_status);
2488 rval = QLA_FUNCTION_FAILED;
2489 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2490 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2491 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2493 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2494 "Failed to complete IOCB -- completion status (%x) "
2495 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2496 iop[0], iop[1]);
2498 switch (iop[0]) {
2499 case LSC_SCODE_PORTID_USED:
2500 mb[0] = MBS_PORT_ID_USED;
2501 mb[1] = LSW(iop[1]);
2502 break;
2503 case LSC_SCODE_NPORT_USED:
2504 mb[0] = MBS_LOOP_ID_USED;
2505 break;
2506 case LSC_SCODE_NOLINK:
2507 case LSC_SCODE_NOIOCB:
2508 case LSC_SCODE_NOXCB:
2509 case LSC_SCODE_CMD_FAILED:
2510 case LSC_SCODE_NOFABRIC:
2511 case LSC_SCODE_FW_NOT_READY:
2512 case LSC_SCODE_NOT_LOGGED_IN:
2513 case LSC_SCODE_NOPCB:
2514 case LSC_SCODE_ELS_REJECT:
2515 case LSC_SCODE_CMD_PARAM_ERR:
2516 case LSC_SCODE_NONPORT:
2517 case LSC_SCODE_LOGGED_IN:
2518 case LSC_SCODE_NOFLOGI_ACC:
2519 default:
2520 mb[0] = MBS_COMMAND_ERROR;
2521 break;
2523 } else {
2524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2525 "Done %s.\n", __func__);
2527 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2529 mb[0] = MBS_COMMAND_COMPLETE;
2530 mb[1] = 0;
2531 if (iop[0] & BIT_4) {
2532 if (iop[0] & BIT_8)
2533 mb[1] |= BIT_1;
2534 } else
2535 mb[1] = BIT_0;
2537 /* Passback COS information. */
2538 mb[10] = 0;
2539 if (lg->io_parameter[7] || lg->io_parameter[8])
2540 mb[10] |= BIT_0; /* Class 2. */
2541 if (lg->io_parameter[9] || lg->io_parameter[10])
2542 mb[10] |= BIT_1; /* Class 3. */
2543 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2544 mb[10] |= BIT_7; /* Confirmed Completion
2545 * Allowed
2549 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2551 return rval;
2555 * qla2x00_login_fabric
2556 * Issue login fabric port mailbox command.
2558 * Input:
2559 * ha = adapter block pointer.
2560 * loop_id = device loop ID.
2561 * domain = device domain.
2562 * area = device area.
2563 * al_pa = device AL_PA.
2564 * status = pointer for return status.
2565 * opt = command options.
2566 * TARGET_QUEUE_LOCK must be released.
2567 * ADAPTER_STATE_LOCK must be released.
2569 * Returns:
2570 * qla2x00 local function return status code.
2572 * Context:
2573 * Kernel context.
2576 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2577 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2579 int rval;
2580 mbx_cmd_t mc;
2581 mbx_cmd_t *mcp = &mc;
2582 struct qla_hw_data *ha = vha->hw;
2584 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2585 "Entered %s.\n", __func__);
2587 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2588 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2589 if (HAS_EXTENDED_IDS(ha)) {
2590 mcp->mb[1] = loop_id;
2591 mcp->mb[10] = opt;
2592 mcp->out_mb |= MBX_10;
2593 } else {
2594 mcp->mb[1] = (loop_id << 8) | opt;
2596 mcp->mb[2] = domain;
2597 mcp->mb[3] = area << 8 | al_pa;
2599 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2600 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2601 mcp->flags = 0;
2602 rval = qla2x00_mailbox_command(vha, mcp);
2604 /* Return mailbox statuses. */
2605 if (mb != NULL) {
2606 mb[0] = mcp->mb[0];
2607 mb[1] = mcp->mb[1];
2608 mb[2] = mcp->mb[2];
2609 mb[6] = mcp->mb[6];
2610 mb[7] = mcp->mb[7];
2611 /* COS retrieved from Get-Port-Database mailbox command. */
2612 mb[10] = 0;
2615 if (rval != QLA_SUCCESS) {
2616 /* RLU tmp code: need to change main mailbox_command function to
2617 * return ok even when the mailbox completion value is not
2618 * SUCCESS. The caller needs to be responsible to interpret
2619 * the return values of this mailbox command if we're not
2620 * to change too much of the existing code.
2622 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2623 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2624 mcp->mb[0] == 0x4006)
2625 rval = QLA_SUCCESS;
2627 /*EMPTY*/
2628 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2629 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2630 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2631 } else {
2632 /*EMPTY*/
2633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2634 "Done %s.\n", __func__);
2637 return rval;
2641 * qla2x00_login_local_device
2642 * Issue login loop port mailbox command.
2644 * Input:
2645 * ha = adapter block pointer.
2646 * loop_id = device loop ID.
2647 * opt = command options.
2649 * Returns:
2650 * Return status code.
2652 * Context:
2653 * Kernel context.
2657 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2658 uint16_t *mb_ret, uint8_t opt)
2660 int rval;
2661 mbx_cmd_t mc;
2662 mbx_cmd_t *mcp = &mc;
2663 struct qla_hw_data *ha = vha->hw;
2665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2666 "Entered %s.\n", __func__);
2668 if (IS_FWI2_CAPABLE(ha))
2669 return qla24xx_login_fabric(vha, fcport->loop_id,
2670 fcport->d_id.b.domain, fcport->d_id.b.area,
2671 fcport->d_id.b.al_pa, mb_ret, opt);
2673 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2674 if (HAS_EXTENDED_IDS(ha))
2675 mcp->mb[1] = fcport->loop_id;
2676 else
2677 mcp->mb[1] = fcport->loop_id << 8;
2678 mcp->mb[2] = opt;
2679 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2680 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2681 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2682 mcp->flags = 0;
2683 rval = qla2x00_mailbox_command(vha, mcp);
2685 /* Return mailbox statuses. */
2686 if (mb_ret != NULL) {
2687 mb_ret[0] = mcp->mb[0];
2688 mb_ret[1] = mcp->mb[1];
2689 mb_ret[6] = mcp->mb[6];
2690 mb_ret[7] = mcp->mb[7];
2693 if (rval != QLA_SUCCESS) {
2694 /* AV tmp code: need to change main mailbox_command function to
2695 * return ok even when the mailbox completion value is not
2696 * SUCCESS. The caller needs to be responsible to interpret
2697 * the return values of this mailbox command if we're not
2698 * to change too much of the existing code.
2700 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2701 rval = QLA_SUCCESS;
2703 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2704 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2705 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2706 } else {
2707 /*EMPTY*/
2708 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2709 "Done %s.\n", __func__);
2712 return (rval);
2716 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2717 uint8_t area, uint8_t al_pa)
2719 int rval;
2720 struct logio_entry_24xx *lg;
2721 dma_addr_t lg_dma;
2722 struct qla_hw_data *ha = vha->hw;
2723 struct req_que *req;
2725 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2726 "Entered %s.\n", __func__);
2728 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2729 if (lg == NULL) {
2730 ql_log(ql_log_warn, vha, 0x106e,
2731 "Failed to allocate logout IOCB.\n");
2732 return QLA_MEMORY_ALLOC_FAILED;
2735 req = vha->req;
2736 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2737 lg->entry_count = 1;
2738 lg->handle = make_handle(req->id, lg->handle);
2739 lg->nport_handle = cpu_to_le16(loop_id);
2740 lg->control_flags =
2741 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2742 LCF_FREE_NPORT);
2743 lg->port_id[0] = al_pa;
2744 lg->port_id[1] = area;
2745 lg->port_id[2] = domain;
2746 lg->vp_index = vha->vp_idx;
2747 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2748 (ha->r_a_tov / 10 * 2) + 2);
2749 if (rval != QLA_SUCCESS) {
2750 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2751 "Failed to issue logout IOCB (%x).\n", rval);
2752 } else if (lg->entry_status != 0) {
2753 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2754 "Failed to complete IOCB -- error status (%x).\n",
2755 lg->entry_status);
2756 rval = QLA_FUNCTION_FAILED;
2757 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2758 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2759 "Failed to complete IOCB -- completion status (%x) "
2760 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2761 le32_to_cpu(lg->io_parameter[0]),
2762 le32_to_cpu(lg->io_parameter[1]));
2763 } else {
2764 /*EMPTY*/
2765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2766 "Done %s.\n", __func__);
2769 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2771 return rval;
2775 * qla2x00_fabric_logout
2776 * Issue logout fabric port mailbox command.
2778 * Input:
2779 * ha = adapter block pointer.
2780 * loop_id = device loop ID.
2781 * TARGET_QUEUE_LOCK must be released.
2782 * ADAPTER_STATE_LOCK must be released.
2784 * Returns:
2785 * qla2x00 local function return status code.
2787 * Context:
2788 * Kernel context.
2791 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2792 uint8_t area, uint8_t al_pa)
2794 int rval;
2795 mbx_cmd_t mc;
2796 mbx_cmd_t *mcp = &mc;
2798 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2799 "Entered %s.\n", __func__);
2801 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2802 mcp->out_mb = MBX_1|MBX_0;
2803 if (HAS_EXTENDED_IDS(vha->hw)) {
2804 mcp->mb[1] = loop_id;
2805 mcp->mb[10] = 0;
2806 mcp->out_mb |= MBX_10;
2807 } else {
2808 mcp->mb[1] = loop_id << 8;
2811 mcp->in_mb = MBX_1|MBX_0;
2812 mcp->tov = MBX_TOV_SECONDS;
2813 mcp->flags = 0;
2814 rval = qla2x00_mailbox_command(vha, mcp);
2816 if (rval != QLA_SUCCESS) {
2817 /*EMPTY*/
2818 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2819 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2820 } else {
2821 /*EMPTY*/
2822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2823 "Done %s.\n", __func__);
2826 return rval;
2830 * qla2x00_full_login_lip
2831 * Issue full login LIP mailbox command.
2833 * Input:
2834 * ha = adapter block pointer.
2835 * TARGET_QUEUE_LOCK must be released.
2836 * ADAPTER_STATE_LOCK must be released.
2838 * Returns:
2839 * qla2x00 local function return status code.
2841 * Context:
2842 * Kernel context.
2845 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2847 int rval;
2848 mbx_cmd_t mc;
2849 mbx_cmd_t *mcp = &mc;
2851 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2852 "Entered %s.\n", __func__);
2854 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2855 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2856 mcp->mb[2] = 0;
2857 mcp->mb[3] = 0;
2858 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2859 mcp->in_mb = MBX_0;
2860 mcp->tov = MBX_TOV_SECONDS;
2861 mcp->flags = 0;
2862 rval = qla2x00_mailbox_command(vha, mcp);
2864 if (rval != QLA_SUCCESS) {
2865 /*EMPTY*/
2866 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2867 } else {
2868 /*EMPTY*/
2869 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2870 "Done %s.\n", __func__);
2873 return rval;
2877 * qla2x00_get_id_list
2879 * Input:
2880 * ha = adapter block pointer.
2882 * Returns:
2883 * qla2x00 local function return status code.
2885 * Context:
2886 * Kernel context.
2889 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2890 uint16_t *entries)
2892 int rval;
2893 mbx_cmd_t mc;
2894 mbx_cmd_t *mcp = &mc;
2896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2897 "Entered %s.\n", __func__);
2899 if (id_list == NULL)
2900 return QLA_FUNCTION_FAILED;
2902 mcp->mb[0] = MBC_GET_ID_LIST;
2903 mcp->out_mb = MBX_0;
2904 if (IS_FWI2_CAPABLE(vha->hw)) {
2905 mcp->mb[2] = MSW(id_list_dma);
2906 mcp->mb[3] = LSW(id_list_dma);
2907 mcp->mb[6] = MSW(MSD(id_list_dma));
2908 mcp->mb[7] = LSW(MSD(id_list_dma));
2909 mcp->mb[8] = 0;
2910 mcp->mb[9] = vha->vp_idx;
2911 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2912 } else {
2913 mcp->mb[1] = MSW(id_list_dma);
2914 mcp->mb[2] = LSW(id_list_dma);
2915 mcp->mb[3] = MSW(MSD(id_list_dma));
2916 mcp->mb[6] = LSW(MSD(id_list_dma));
2917 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2919 mcp->in_mb = MBX_1|MBX_0;
2920 mcp->tov = MBX_TOV_SECONDS;
2921 mcp->flags = 0;
2922 rval = qla2x00_mailbox_command(vha, mcp);
2924 if (rval != QLA_SUCCESS) {
2925 /*EMPTY*/
2926 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2927 } else {
2928 *entries = mcp->mb[1];
2929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2930 "Done %s.\n", __func__);
2933 return rval;
2937 * qla2x00_get_resource_cnts
2938 * Get current firmware resource counts.
2940 * Input:
2941 * ha = adapter block pointer.
2943 * Returns:
2944 * qla2x00 local function return status code.
2946 * Context:
2947 * Kernel context.
2950 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2952 struct qla_hw_data *ha = vha->hw;
2953 int rval;
2954 mbx_cmd_t mc;
2955 mbx_cmd_t *mcp = &mc;
2957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2958 "Entered %s.\n", __func__);
2960 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2961 mcp->out_mb = MBX_0;
2962 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2963 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2964 IS_QLA27XX(ha) || IS_QLA28XX(ha))
2965 mcp->in_mb |= MBX_12;
2966 mcp->tov = MBX_TOV_SECONDS;
2967 mcp->flags = 0;
2968 rval = qla2x00_mailbox_command(vha, mcp);
2970 if (rval != QLA_SUCCESS) {
2971 /*EMPTY*/
2972 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2973 "Failed mb[0]=%x.\n", mcp->mb[0]);
2974 } else {
2975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2976 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2977 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2978 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2979 mcp->mb[11], mcp->mb[12]);
2981 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2982 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2983 ha->cur_fw_xcb_count = mcp->mb[3];
2984 ha->orig_fw_xcb_count = mcp->mb[6];
2985 ha->cur_fw_iocb_count = mcp->mb[7];
2986 ha->orig_fw_iocb_count = mcp->mb[10];
2987 if (ha->flags.npiv_supported)
2988 ha->max_npiv_vports = mcp->mb[11];
2989 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2990 IS_QLA28XX(ha))
2991 ha->fw_max_fcf_count = mcp->mb[12];
2994 return (rval);
2998 * qla2x00_get_fcal_position_map
2999 * Get FCAL (LILP) position map using mailbox command
3001 * Input:
3002 * ha = adapter state pointer.
3003 * pos_map = buffer pointer (can be NULL).
3005 * Returns:
3006 * qla2x00 local function return status code.
3008 * Context:
3009 * Kernel context.
3012 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
3014 int rval;
3015 mbx_cmd_t mc;
3016 mbx_cmd_t *mcp = &mc;
3017 char *pmap;
3018 dma_addr_t pmap_dma;
3019 struct qla_hw_data *ha = vha->hw;
3021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
3022 "Entered %s.\n", __func__);
3024 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3025 if (pmap == NULL) {
3026 ql_log(ql_log_warn, vha, 0x1080,
3027 "Memory alloc failed.\n");
3028 return QLA_MEMORY_ALLOC_FAILED;
3031 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
3032 mcp->mb[2] = MSW(pmap_dma);
3033 mcp->mb[3] = LSW(pmap_dma);
3034 mcp->mb[6] = MSW(MSD(pmap_dma));
3035 mcp->mb[7] = LSW(MSD(pmap_dma));
3036 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3037 mcp->in_mb = MBX_1|MBX_0;
3038 mcp->buf_size = FCAL_MAP_SIZE;
3039 mcp->flags = MBX_DMA_IN;
3040 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3041 rval = qla2x00_mailbox_command(vha, mcp);
3043 if (rval == QLA_SUCCESS) {
3044 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
3045 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
3046 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
3047 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
3048 pmap, pmap[0] + 1);
3050 if (pos_map)
3051 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
3053 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3055 if (rval != QLA_SUCCESS) {
3056 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
3057 } else {
3058 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
3059 "Done %s.\n", __func__);
3062 return rval;
3066 * qla2x00_get_link_status
3068 * Input:
3069 * ha = adapter block pointer.
3070 * loop_id = device loop ID.
3071 * ret_buf = pointer to link status return buffer.
3073 * Returns:
3074 * 0 = success.
3075 * BIT_0 = mem alloc error.
3076 * BIT_1 = mailbox error.
3079 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
3080 struct link_statistics *stats, dma_addr_t stats_dma)
3082 int rval;
3083 mbx_cmd_t mc;
3084 mbx_cmd_t *mcp = &mc;
3085 uint32_t *iter = (uint32_t *)stats;
3086 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
3087 struct qla_hw_data *ha = vha->hw;
3089 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
3090 "Entered %s.\n", __func__);
3092 mcp->mb[0] = MBC_GET_LINK_STATUS;
3093 mcp->mb[2] = MSW(LSD(stats_dma));
3094 mcp->mb[3] = LSW(LSD(stats_dma));
3095 mcp->mb[6] = MSW(MSD(stats_dma));
3096 mcp->mb[7] = LSW(MSD(stats_dma));
3097 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3098 mcp->in_mb = MBX_0;
3099 if (IS_FWI2_CAPABLE(ha)) {
3100 mcp->mb[1] = loop_id;
3101 mcp->mb[4] = 0;
3102 mcp->mb[10] = 0;
3103 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3104 mcp->in_mb |= MBX_1;
3105 } else if (HAS_EXTENDED_IDS(ha)) {
3106 mcp->mb[1] = loop_id;
3107 mcp->mb[10] = 0;
3108 mcp->out_mb |= MBX_10|MBX_1;
3109 } else {
3110 mcp->mb[1] = loop_id << 8;
3111 mcp->out_mb |= MBX_1;
3113 mcp->tov = MBX_TOV_SECONDS;
3114 mcp->flags = IOCTL_CMD;
3115 rval = qla2x00_mailbox_command(vha, mcp);
3117 if (rval == QLA_SUCCESS) {
3118 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3119 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3120 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3121 rval = QLA_FUNCTION_FAILED;
3122 } else {
3123 /* Re-endianize - firmware data is le32. */
3124 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3125 "Done %s.\n", __func__);
3126 for ( ; dwords--; iter++)
3127 le32_to_cpus(iter);
3129 } else {
3130 /* Failed. */
3131 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3134 return rval;
3138 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3139 dma_addr_t stats_dma, uint16_t options)
3141 int rval;
3142 mbx_cmd_t mc;
3143 mbx_cmd_t *mcp = &mc;
3144 uint32_t *iter = (uint32_t *)stats;
3145 ushort dwords = sizeof(*stats)/sizeof(*iter);
3147 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3148 "Entered %s.\n", __func__);
3150 memset(&mc, 0, sizeof(mc));
3151 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3152 mc.mb[2] = MSW(LSD(stats_dma));
3153 mc.mb[3] = LSW(LSD(stats_dma));
3154 mc.mb[6] = MSW(MSD(stats_dma));
3155 mc.mb[7] = LSW(MSD(stats_dma));
3156 mc.mb[8] = dwords;
3157 mc.mb[9] = vha->vp_idx;
3158 mc.mb[10] = options;
3160 rval = qla24xx_send_mb_cmd(vha, &mc);
3162 if (rval == QLA_SUCCESS) {
3163 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3164 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3165 "Failed mb[0]=%x.\n", mcp->mb[0]);
3166 rval = QLA_FUNCTION_FAILED;
3167 } else {
3168 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3169 "Done %s.\n", __func__);
3170 /* Re-endianize - firmware data is le32. */
3171 for ( ; dwords--; iter++)
3172 le32_to_cpus(iter);
3174 } else {
3175 /* Failed. */
3176 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3179 return rval;
3183 qla24xx_abort_command(srb_t *sp)
3185 int rval;
3186 unsigned long flags = 0;
3188 struct abort_entry_24xx *abt;
3189 dma_addr_t abt_dma;
3190 uint32_t handle;
3191 fc_port_t *fcport = sp->fcport;
3192 struct scsi_qla_host *vha = fcport->vha;
3193 struct qla_hw_data *ha = vha->hw;
3194 struct req_que *req = vha->req;
3195 struct qla_qpair *qpair = sp->qpair;
3197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3198 "Entered %s.\n", __func__);
3200 if (sp->qpair)
3201 req = sp->qpair->req;
3202 else
3203 return QLA_FUNCTION_FAILED;
3205 if (ql2xasynctmfenable)
3206 return qla24xx_async_abort_command(sp);
3208 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3209 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3210 if (req->outstanding_cmds[handle] == sp)
3211 break;
3213 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3214 if (handle == req->num_outstanding_cmds) {
3215 /* Command not found. */
3216 return QLA_FUNCTION_FAILED;
3219 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3220 if (abt == NULL) {
3221 ql_log(ql_log_warn, vha, 0x108d,
3222 "Failed to allocate abort IOCB.\n");
3223 return QLA_MEMORY_ALLOC_FAILED;
3226 abt->entry_type = ABORT_IOCB_TYPE;
3227 abt->entry_count = 1;
3228 abt->handle = make_handle(req->id, abt->handle);
3229 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3230 abt->handle_to_abort = make_handle(req->id, handle);
3231 abt->port_id[0] = fcport->d_id.b.al_pa;
3232 abt->port_id[1] = fcport->d_id.b.area;
3233 abt->port_id[2] = fcport->d_id.b.domain;
3234 abt->vp_index = fcport->vha->vp_idx;
3236 abt->req_que_no = cpu_to_le16(req->id);
3238 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3239 if (rval != QLA_SUCCESS) {
3240 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3241 "Failed to issue IOCB (%x).\n", rval);
3242 } else if (abt->entry_status != 0) {
3243 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3244 "Failed to complete IOCB -- error status (%x).\n",
3245 abt->entry_status);
3246 rval = QLA_FUNCTION_FAILED;
3247 } else if (abt->nport_handle != cpu_to_le16(0)) {
3248 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3249 "Failed to complete IOCB -- completion status (%x).\n",
3250 le16_to_cpu(abt->nport_handle));
3251 if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR))
3252 rval = QLA_FUNCTION_PARAMETER_ERROR;
3253 else
3254 rval = QLA_FUNCTION_FAILED;
3255 } else {
3256 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3257 "Done %s.\n", __func__);
3260 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3262 return rval;
3265 struct tsk_mgmt_cmd {
3266 union {
3267 struct tsk_mgmt_entry tsk;
3268 struct sts_entry_24xx sts;
3269 } p;
3272 static int
3273 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3274 uint64_t l, int tag)
3276 int rval, rval2;
3277 struct tsk_mgmt_cmd *tsk;
3278 struct sts_entry_24xx *sts;
3279 dma_addr_t tsk_dma;
3280 scsi_qla_host_t *vha;
3281 struct qla_hw_data *ha;
3282 struct req_que *req;
3283 struct qla_qpair *qpair;
3285 vha = fcport->vha;
3286 ha = vha->hw;
3287 req = vha->req;
3289 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3290 "Entered %s.\n", __func__);
3292 if (vha->vp_idx && vha->qpair) {
3293 /* NPIV port */
3294 qpair = vha->qpair;
3295 req = qpair->req;
3298 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3299 if (tsk == NULL) {
3300 ql_log(ql_log_warn, vha, 0x1093,
3301 "Failed to allocate task management IOCB.\n");
3302 return QLA_MEMORY_ALLOC_FAILED;
3305 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3306 tsk->p.tsk.entry_count = 1;
3307 tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle);
3308 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3309 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3310 tsk->p.tsk.control_flags = cpu_to_le32(type);
3311 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3312 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3313 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3314 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3315 if (type == TCF_LUN_RESET) {
3316 int_to_scsilun(l, &tsk->p.tsk.lun);
3317 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3318 sizeof(tsk->p.tsk.lun));
3321 sts = &tsk->p.sts;
3322 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3323 if (rval != QLA_SUCCESS) {
3324 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3325 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3326 } else if (sts->entry_status != 0) {
3327 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3328 "Failed to complete IOCB -- error status (%x).\n",
3329 sts->entry_status);
3330 rval = QLA_FUNCTION_FAILED;
3331 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3332 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3333 "Failed to complete IOCB -- completion status (%x).\n",
3334 le16_to_cpu(sts->comp_status));
3335 rval = QLA_FUNCTION_FAILED;
3336 } else if (le16_to_cpu(sts->scsi_status) &
3337 SS_RESPONSE_INFO_LEN_VALID) {
3338 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3340 "Ignoring inconsistent data length -- not enough "
3341 "response info (%d).\n",
3342 le32_to_cpu(sts->rsp_data_len));
3343 } else if (sts->data[3]) {
3344 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3345 "Failed to complete IOCB -- response (%x).\n",
3346 sts->data[3]);
3347 rval = QLA_FUNCTION_FAILED;
3351 /* Issue marker IOCB. */
3352 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3353 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
3354 if (rval2 != QLA_SUCCESS) {
3355 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3356 "Failed to issue marker IOCB (%x).\n", rval2);
3357 } else {
3358 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3359 "Done %s.\n", __func__);
3362 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3364 return rval;
3368 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3370 struct qla_hw_data *ha = fcport->vha->hw;
3372 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3373 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3375 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3379 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3381 struct qla_hw_data *ha = fcport->vha->hw;
3383 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3384 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3386 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3390 qla2x00_system_error(scsi_qla_host_t *vha)
3392 int rval;
3393 mbx_cmd_t mc;
3394 mbx_cmd_t *mcp = &mc;
3395 struct qla_hw_data *ha = vha->hw;
3397 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3398 return QLA_FUNCTION_FAILED;
3400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3401 "Entered %s.\n", __func__);
3403 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3404 mcp->out_mb = MBX_0;
3405 mcp->in_mb = MBX_0;
3406 mcp->tov = 5;
3407 mcp->flags = 0;
3408 rval = qla2x00_mailbox_command(vha, mcp);
3410 if (rval != QLA_SUCCESS) {
3411 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3412 } else {
3413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3414 "Done %s.\n", __func__);
3417 return rval;
3421 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3423 int rval;
3424 mbx_cmd_t mc;
3425 mbx_cmd_t *mcp = &mc;
3427 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3428 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3429 return QLA_FUNCTION_FAILED;
3431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3432 "Entered %s.\n", __func__);
3434 mcp->mb[0] = MBC_WRITE_SERDES;
3435 mcp->mb[1] = addr;
3436 if (IS_QLA2031(vha->hw))
3437 mcp->mb[2] = data & 0xff;
3438 else
3439 mcp->mb[2] = data;
3441 mcp->mb[3] = 0;
3442 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3443 mcp->in_mb = MBX_0;
3444 mcp->tov = MBX_TOV_SECONDS;
3445 mcp->flags = 0;
3446 rval = qla2x00_mailbox_command(vha, mcp);
3448 if (rval != QLA_SUCCESS) {
3449 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3450 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3451 } else {
3452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3453 "Done %s.\n", __func__);
3456 return rval;
3460 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3462 int rval;
3463 mbx_cmd_t mc;
3464 mbx_cmd_t *mcp = &mc;
3466 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3467 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
3468 return QLA_FUNCTION_FAILED;
3470 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3471 "Entered %s.\n", __func__);
3473 mcp->mb[0] = MBC_READ_SERDES;
3474 mcp->mb[1] = addr;
3475 mcp->mb[3] = 0;
3476 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3477 mcp->in_mb = MBX_1|MBX_0;
3478 mcp->tov = MBX_TOV_SECONDS;
3479 mcp->flags = 0;
3480 rval = qla2x00_mailbox_command(vha, mcp);
3482 if (IS_QLA2031(vha->hw))
3483 *data = mcp->mb[1] & 0xff;
3484 else
3485 *data = mcp->mb[1];
3487 if (rval != QLA_SUCCESS) {
3488 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3489 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3490 } else {
3491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3492 "Done %s.\n", __func__);
3495 return rval;
3499 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3501 int rval;
3502 mbx_cmd_t mc;
3503 mbx_cmd_t *mcp = &mc;
3505 if (!IS_QLA8044(vha->hw))
3506 return QLA_FUNCTION_FAILED;
3508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3509 "Entered %s.\n", __func__);
3511 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3512 mcp->mb[1] = HCS_WRITE_SERDES;
3513 mcp->mb[3] = LSW(addr);
3514 mcp->mb[4] = MSW(addr);
3515 mcp->mb[5] = LSW(data);
3516 mcp->mb[6] = MSW(data);
3517 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3518 mcp->in_mb = MBX_0;
3519 mcp->tov = MBX_TOV_SECONDS;
3520 mcp->flags = 0;
3521 rval = qla2x00_mailbox_command(vha, mcp);
3523 if (rval != QLA_SUCCESS) {
3524 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3525 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3526 } else {
3527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3528 "Done %s.\n", __func__);
3531 return rval;
3535 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3537 int rval;
3538 mbx_cmd_t mc;
3539 mbx_cmd_t *mcp = &mc;
3541 if (!IS_QLA8044(vha->hw))
3542 return QLA_FUNCTION_FAILED;
3544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3545 "Entered %s.\n", __func__);
3547 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3548 mcp->mb[1] = HCS_READ_SERDES;
3549 mcp->mb[3] = LSW(addr);
3550 mcp->mb[4] = MSW(addr);
3551 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3552 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3553 mcp->tov = MBX_TOV_SECONDS;
3554 mcp->flags = 0;
3555 rval = qla2x00_mailbox_command(vha, mcp);
3557 *data = mcp->mb[2] << 16 | mcp->mb[1];
3559 if (rval != QLA_SUCCESS) {
3560 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3561 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3562 } else {
3563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3564 "Done %s.\n", __func__);
3567 return rval;
3571 * qla2x00_set_serdes_params() -
3572 * @vha: HA context
3573 * @sw_em_1g: serial link options
3574 * @sw_em_2g: serial link options
3575 * @sw_em_4g: serial link options
3577 * Returns
3580 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3581 uint16_t sw_em_2g, uint16_t sw_em_4g)
3583 int rval;
3584 mbx_cmd_t mc;
3585 mbx_cmd_t *mcp = &mc;
3587 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3588 "Entered %s.\n", __func__);
3590 mcp->mb[0] = MBC_SERDES_PARAMS;
3591 mcp->mb[1] = BIT_0;
3592 mcp->mb[2] = sw_em_1g | BIT_15;
3593 mcp->mb[3] = sw_em_2g | BIT_15;
3594 mcp->mb[4] = sw_em_4g | BIT_15;
3595 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3596 mcp->in_mb = MBX_0;
3597 mcp->tov = MBX_TOV_SECONDS;
3598 mcp->flags = 0;
3599 rval = qla2x00_mailbox_command(vha, mcp);
3601 if (rval != QLA_SUCCESS) {
3602 /*EMPTY*/
3603 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3605 } else {
3606 /*EMPTY*/
3607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3608 "Done %s.\n", __func__);
3611 return rval;
3615 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3617 int rval;
3618 mbx_cmd_t mc;
3619 mbx_cmd_t *mcp = &mc;
3621 if (!IS_FWI2_CAPABLE(vha->hw))
3622 return QLA_FUNCTION_FAILED;
3624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3625 "Entered %s.\n", __func__);
3627 mcp->mb[0] = MBC_STOP_FIRMWARE;
3628 mcp->mb[1] = 0;
3629 mcp->out_mb = MBX_1|MBX_0;
3630 mcp->in_mb = MBX_0;
3631 mcp->tov = 5;
3632 mcp->flags = 0;
3633 rval = qla2x00_mailbox_command(vha, mcp);
3635 if (rval != QLA_SUCCESS) {
3636 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3637 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3638 rval = QLA_INVALID_COMMAND;
3639 } else {
3640 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3641 "Done %s.\n", __func__);
3644 return rval;
3648 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3649 uint16_t buffers)
3651 int rval;
3652 mbx_cmd_t mc;
3653 mbx_cmd_t *mcp = &mc;
3655 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3656 "Entered %s.\n", __func__);
3658 if (!IS_FWI2_CAPABLE(vha->hw))
3659 return QLA_FUNCTION_FAILED;
3661 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3662 return QLA_FUNCTION_FAILED;
3664 mcp->mb[0] = MBC_TRACE_CONTROL;
3665 mcp->mb[1] = TC_EFT_ENABLE;
3666 mcp->mb[2] = LSW(eft_dma);
3667 mcp->mb[3] = MSW(eft_dma);
3668 mcp->mb[4] = LSW(MSD(eft_dma));
3669 mcp->mb[5] = MSW(MSD(eft_dma));
3670 mcp->mb[6] = buffers;
3671 mcp->mb[7] = TC_AEN_DISABLE;
3672 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3673 mcp->in_mb = MBX_1|MBX_0;
3674 mcp->tov = MBX_TOV_SECONDS;
3675 mcp->flags = 0;
3676 rval = qla2x00_mailbox_command(vha, mcp);
3677 if (rval != QLA_SUCCESS) {
3678 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3679 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3680 rval, mcp->mb[0], mcp->mb[1]);
3681 } else {
3682 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3683 "Done %s.\n", __func__);
3686 return rval;
3690 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3692 int rval;
3693 mbx_cmd_t mc;
3694 mbx_cmd_t *mcp = &mc;
3696 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3697 "Entered %s.\n", __func__);
3699 if (!IS_FWI2_CAPABLE(vha->hw))
3700 return QLA_FUNCTION_FAILED;
3702 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3703 return QLA_FUNCTION_FAILED;
3705 mcp->mb[0] = MBC_TRACE_CONTROL;
3706 mcp->mb[1] = TC_EFT_DISABLE;
3707 mcp->out_mb = MBX_1|MBX_0;
3708 mcp->in_mb = MBX_1|MBX_0;
3709 mcp->tov = MBX_TOV_SECONDS;
3710 mcp->flags = 0;
3711 rval = qla2x00_mailbox_command(vha, mcp);
3712 if (rval != QLA_SUCCESS) {
3713 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3714 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3715 rval, mcp->mb[0], mcp->mb[1]);
3716 } else {
3717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3718 "Done %s.\n", __func__);
3721 return rval;
3725 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3726 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3728 int rval;
3729 mbx_cmd_t mc;
3730 mbx_cmd_t *mcp = &mc;
3732 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3733 "Entered %s.\n", __func__);
3735 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3736 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
3737 !IS_QLA28XX(vha->hw))
3738 return QLA_FUNCTION_FAILED;
3740 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3741 return QLA_FUNCTION_FAILED;
3743 mcp->mb[0] = MBC_TRACE_CONTROL;
3744 mcp->mb[1] = TC_FCE_ENABLE;
3745 mcp->mb[2] = LSW(fce_dma);
3746 mcp->mb[3] = MSW(fce_dma);
3747 mcp->mb[4] = LSW(MSD(fce_dma));
3748 mcp->mb[5] = MSW(MSD(fce_dma));
3749 mcp->mb[6] = buffers;
3750 mcp->mb[7] = TC_AEN_DISABLE;
3751 mcp->mb[8] = 0;
3752 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3753 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3754 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3755 MBX_1|MBX_0;
3756 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3757 mcp->tov = MBX_TOV_SECONDS;
3758 mcp->flags = 0;
3759 rval = qla2x00_mailbox_command(vha, mcp);
3760 if (rval != QLA_SUCCESS) {
3761 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3762 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3763 rval, mcp->mb[0], mcp->mb[1]);
3764 } else {
3765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3766 "Done %s.\n", __func__);
3768 if (mb)
3769 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3770 if (dwords)
3771 *dwords = buffers;
3774 return rval;
3778 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3780 int rval;
3781 mbx_cmd_t mc;
3782 mbx_cmd_t *mcp = &mc;
3784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3785 "Entered %s.\n", __func__);
3787 if (!IS_FWI2_CAPABLE(vha->hw))
3788 return QLA_FUNCTION_FAILED;
3790 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3791 return QLA_FUNCTION_FAILED;
3793 mcp->mb[0] = MBC_TRACE_CONTROL;
3794 mcp->mb[1] = TC_FCE_DISABLE;
3795 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3796 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3797 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3798 MBX_1|MBX_0;
3799 mcp->tov = MBX_TOV_SECONDS;
3800 mcp->flags = 0;
3801 rval = qla2x00_mailbox_command(vha, mcp);
3802 if (rval != QLA_SUCCESS) {
3803 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3804 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3805 rval, mcp->mb[0], mcp->mb[1]);
3806 } else {
3807 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3808 "Done %s.\n", __func__);
3810 if (wr)
3811 *wr = (uint64_t) mcp->mb[5] << 48 |
3812 (uint64_t) mcp->mb[4] << 32 |
3813 (uint64_t) mcp->mb[3] << 16 |
3814 (uint64_t) mcp->mb[2];
3815 if (rd)
3816 *rd = (uint64_t) mcp->mb[9] << 48 |
3817 (uint64_t) mcp->mb[8] << 32 |
3818 (uint64_t) mcp->mb[7] << 16 |
3819 (uint64_t) mcp->mb[6];
3822 return rval;
3826 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3827 uint16_t *port_speed, uint16_t *mb)
3829 int rval;
3830 mbx_cmd_t mc;
3831 mbx_cmd_t *mcp = &mc;
3833 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3834 "Entered %s.\n", __func__);
3836 if (!IS_IIDMA_CAPABLE(vha->hw))
3837 return QLA_FUNCTION_FAILED;
3839 mcp->mb[0] = MBC_PORT_PARAMS;
3840 mcp->mb[1] = loop_id;
3841 mcp->mb[2] = mcp->mb[3] = 0;
3842 mcp->mb[9] = vha->vp_idx;
3843 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3844 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3845 mcp->tov = MBX_TOV_SECONDS;
3846 mcp->flags = 0;
3847 rval = qla2x00_mailbox_command(vha, mcp);
3849 /* Return mailbox statuses. */
3850 if (mb) {
3851 mb[0] = mcp->mb[0];
3852 mb[1] = mcp->mb[1];
3853 mb[3] = mcp->mb[3];
3856 if (rval != QLA_SUCCESS) {
3857 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3858 } else {
3859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3860 "Done %s.\n", __func__);
3861 if (port_speed)
3862 *port_speed = mcp->mb[3];
3865 return rval;
3869 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3870 uint16_t port_speed, uint16_t *mb)
3872 int rval;
3873 mbx_cmd_t mc;
3874 mbx_cmd_t *mcp = &mc;
3876 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3877 "Entered %s.\n", __func__);
3879 if (!IS_IIDMA_CAPABLE(vha->hw))
3880 return QLA_FUNCTION_FAILED;
3882 mcp->mb[0] = MBC_PORT_PARAMS;
3883 mcp->mb[1] = loop_id;
3884 mcp->mb[2] = BIT_0;
3885 mcp->mb[3] = port_speed & 0x3F;
3886 mcp->mb[9] = vha->vp_idx;
3887 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3888 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3889 mcp->tov = MBX_TOV_SECONDS;
3890 mcp->flags = 0;
3891 rval = qla2x00_mailbox_command(vha, mcp);
3893 /* Return mailbox statuses. */
3894 if (mb) {
3895 mb[0] = mcp->mb[0];
3896 mb[1] = mcp->mb[1];
3897 mb[3] = mcp->mb[3];
3900 if (rval != QLA_SUCCESS) {
3901 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3902 "Failed=%x.\n", rval);
3903 } else {
3904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3905 "Done %s.\n", __func__);
3908 return rval;
3911 void
3912 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3913 struct vp_rpt_id_entry_24xx *rptid_entry)
3915 struct qla_hw_data *ha = vha->hw;
3916 scsi_qla_host_t *vp = NULL;
3917 unsigned long flags;
3918 int found;
3919 port_id_t id;
3920 struct fc_port *fcport;
3922 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3923 "Entered %s.\n", __func__);
3925 if (rptid_entry->entry_status != 0)
3926 return;
3928 id.b.domain = rptid_entry->port_id[2];
3929 id.b.area = rptid_entry->port_id[1];
3930 id.b.al_pa = rptid_entry->port_id[0];
3931 id.b.rsvd_1 = 0;
3932 ha->flags.n2n_ae = 0;
3934 if (rptid_entry->format == 0) {
3935 /* loop */
3936 ql_dbg(ql_dbg_async, vha, 0x10b7,
3937 "Format 0 : Number of VPs setup %d, number of "
3938 "VPs acquired %d.\n", rptid_entry->vp_setup,
3939 rptid_entry->vp_acquired);
3940 ql_dbg(ql_dbg_async, vha, 0x10b8,
3941 "Primary port id %02x%02x%02x.\n",
3942 rptid_entry->port_id[2], rptid_entry->port_id[1],
3943 rptid_entry->port_id[0]);
3944 ha->current_topology = ISP_CFG_NL;
3945 qlt_update_host_map(vha, id);
3947 } else if (rptid_entry->format == 1) {
3948 /* fabric */
3949 ql_dbg(ql_dbg_async, vha, 0x10b9,
3950 "Format 1: VP[%d] enabled - status %d - with "
3951 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3952 rptid_entry->vp_status,
3953 rptid_entry->port_id[2], rptid_entry->port_id[1],
3954 rptid_entry->port_id[0]);
3955 ql_dbg(ql_dbg_async, vha, 0x5075,
3956 "Format 1: Remote WWPN %8phC.\n",
3957 rptid_entry->u.f1.port_name);
3959 ql_dbg(ql_dbg_async, vha, 0x5075,
3960 "Format 1: WWPN %8phC.\n",
3961 vha->port_name);
3963 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3964 case TOPO_N2N:
3965 ha->current_topology = ISP_CFG_N;
3966 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3967 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3968 fcport->scan_state = QLA_FCPORT_SCAN;
3969 fcport->n2n_flag = 0;
3971 id.b24 = 0;
3972 if (wwn_to_u64(vha->port_name) >
3973 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3974 vha->d_id.b24 = 0;
3975 vha->d_id.b.al_pa = 1;
3976 ha->flags.n2n_bigger = 1;
3978 id.b.al_pa = 2;
3979 ql_dbg(ql_dbg_async, vha, 0x5075,
3980 "Format 1: assign local id %x remote id %x\n",
3981 vha->d_id.b24, id.b24);
3982 } else {
3983 ql_dbg(ql_dbg_async, vha, 0x5075,
3984 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3985 rptid_entry->u.f1.port_name);
3986 ha->flags.n2n_bigger = 0;
3989 fcport = qla2x00_find_fcport_by_wwpn(vha,
3990 rptid_entry->u.f1.port_name, 1);
3991 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3994 if (fcport) {
3995 fcport->plogi_nack_done_deadline = jiffies + HZ;
3996 fcport->dm_login_expire = jiffies +
3997 QLA_N2N_WAIT_TIME * HZ;
3998 fcport->scan_state = QLA_FCPORT_FOUND;
3999 fcport->n2n_flag = 1;
4000 fcport->keep_nport_handle = 1;
4002 if (wwn_to_u64(vha->port_name) >
4003 wwn_to_u64(fcport->port_name)) {
4004 fcport->d_id = id;
4007 switch (fcport->disc_state) {
4008 case DSC_DELETED:
4009 set_bit(RELOGIN_NEEDED,
4010 &vha->dpc_flags);
4011 break;
4012 case DSC_DELETE_PEND:
4013 break;
4014 default:
4015 qlt_schedule_sess_for_deletion(fcport);
4016 break;
4018 } else {
4019 qla24xx_post_newsess_work(vha, &id,
4020 rptid_entry->u.f1.port_name,
4021 rptid_entry->u.f1.node_name,
4022 NULL,
4023 FS_FCP_IS_N2N);
4026 /* if our portname is higher then initiate N2N login */
4028 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
4029 return;
4030 case TOPO_FL:
4031 ha->current_topology = ISP_CFG_FL;
4032 break;
4033 case TOPO_F:
4034 ha->current_topology = ISP_CFG_F;
4035 break;
4036 default:
4037 break;
4040 ha->flags.gpsc_supported = 1;
4041 ha->current_topology = ISP_CFG_F;
4042 /* buffer to buffer credit flag */
4043 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
4045 if (rptid_entry->vp_idx == 0) {
4046 if (rptid_entry->vp_status == VP_STAT_COMPL) {
4047 /* FA-WWN is only for physical port */
4048 if (qla_ini_mode_enabled(vha) &&
4049 ha->flags.fawwpn_enabled &&
4050 (rptid_entry->u.f1.flags &
4051 BIT_6)) {
4052 memcpy(vha->port_name,
4053 rptid_entry->u.f1.port_name,
4054 WWN_SIZE);
4057 qlt_update_host_map(vha, id);
4060 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
4061 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
4062 } else {
4063 if (rptid_entry->vp_status != VP_STAT_COMPL &&
4064 rptid_entry->vp_status != VP_STAT_ID_CHG) {
4065 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
4066 "Could not acquire ID for VP[%d].\n",
4067 rptid_entry->vp_idx);
4068 return;
4071 found = 0;
4072 spin_lock_irqsave(&ha->vport_slock, flags);
4073 list_for_each_entry(vp, &ha->vp_list, list) {
4074 if (rptid_entry->vp_idx == vp->vp_idx) {
4075 found = 1;
4076 break;
4079 spin_unlock_irqrestore(&ha->vport_slock, flags);
4081 if (!found)
4082 return;
4084 qlt_update_host_map(vp, id);
4087 * Cannot configure here as we are still sitting on the
4088 * response queue. Handle it in dpc context.
4090 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
4091 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
4092 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
4094 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
4095 qla2xxx_wake_dpc(vha);
4096 } else if (rptid_entry->format == 2) {
4097 ql_dbg(ql_dbg_async, vha, 0x505f,
4098 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
4099 rptid_entry->port_id[2], rptid_entry->port_id[1],
4100 rptid_entry->port_id[0]);
4102 ql_dbg(ql_dbg_async, vha, 0x5075,
4103 "N2N: Remote WWPN %8phC.\n",
4104 rptid_entry->u.f2.port_name);
4106 /* N2N. direct connect */
4107 ha->current_topology = ISP_CFG_N;
4108 ha->flags.rida_fmt2 = 1;
4109 vha->d_id.b.domain = rptid_entry->port_id[2];
4110 vha->d_id.b.area = rptid_entry->port_id[1];
4111 vha->d_id.b.al_pa = rptid_entry->port_id[0];
4113 ha->flags.n2n_ae = 1;
4114 spin_lock_irqsave(&ha->vport_slock, flags);
4115 qlt_update_vp_map(vha, SET_AL_PA);
4116 spin_unlock_irqrestore(&ha->vport_slock, flags);
4118 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4119 fcport->scan_state = QLA_FCPORT_SCAN;
4120 fcport->n2n_flag = 0;
4123 fcport = qla2x00_find_fcport_by_wwpn(vha,
4124 rptid_entry->u.f2.port_name, 1);
4126 if (fcport) {
4127 fcport->login_retry = vha->hw->login_retry_count;
4128 fcport->plogi_nack_done_deadline = jiffies + HZ;
4129 fcport->scan_state = QLA_FCPORT_FOUND;
4130 fcport->keep_nport_handle = 1;
4131 fcport->n2n_flag = 1;
4132 fcport->d_id.b.domain =
4133 rptid_entry->u.f2.remote_nport_id[2];
4134 fcport->d_id.b.area =
4135 rptid_entry->u.f2.remote_nport_id[1];
4136 fcport->d_id.b.al_pa =
4137 rptid_entry->u.f2.remote_nport_id[0];
4143 * qla24xx_modify_vp_config
4144 * Change VP configuration for vha
4146 * Input:
4147 * vha = adapter block pointer.
4149 * Returns:
4150 * qla2xxx local function return status code.
4152 * Context:
4153 * Kernel context.
4156 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4158 int rval;
4159 struct vp_config_entry_24xx *vpmod;
4160 dma_addr_t vpmod_dma;
4161 struct qla_hw_data *ha = vha->hw;
4162 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4164 /* This can be called by the parent */
4166 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4167 "Entered %s.\n", __func__);
4169 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4170 if (!vpmod) {
4171 ql_log(ql_log_warn, vha, 0x10bc,
4172 "Failed to allocate modify VP IOCB.\n");
4173 return QLA_MEMORY_ALLOC_FAILED;
4176 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4177 vpmod->entry_count = 1;
4178 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4179 vpmod->vp_count = 1;
4180 vpmod->vp_index1 = vha->vp_idx;
4181 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4183 qlt_modify_vp_config(vha, vpmod);
4185 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4186 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4187 vpmod->entry_count = 1;
4189 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4190 if (rval != QLA_SUCCESS) {
4191 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4192 "Failed to issue VP config IOCB (%x).\n", rval);
4193 } else if (vpmod->comp_status != 0) {
4194 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4195 "Failed to complete IOCB -- error status (%x).\n",
4196 vpmod->comp_status);
4197 rval = QLA_FUNCTION_FAILED;
4198 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4199 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4200 "Failed to complete IOCB -- completion status (%x).\n",
4201 le16_to_cpu(vpmod->comp_status));
4202 rval = QLA_FUNCTION_FAILED;
4203 } else {
4204 /* EMPTY */
4205 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4206 "Done %s.\n", __func__);
4207 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4209 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4211 return rval;
4215 * qla2x00_send_change_request
4216 * Receive or disable RSCN request from fabric controller
4218 * Input:
4219 * ha = adapter block pointer
4220 * format = registration format:
4221 * 0 - Reserved
4222 * 1 - Fabric detected registration
4223 * 2 - N_port detected registration
4224 * 3 - Full registration
4225 * FF - clear registration
4226 * vp_idx = Virtual port index
4228 * Returns:
4229 * qla2x00 local function return status code.
4231 * Context:
4232 * Kernel Context
4236 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4237 uint16_t vp_idx)
4239 int rval;
4240 mbx_cmd_t mc;
4241 mbx_cmd_t *mcp = &mc;
4243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4244 "Entered %s.\n", __func__);
4246 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4247 mcp->mb[1] = format;
4248 mcp->mb[9] = vp_idx;
4249 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4250 mcp->in_mb = MBX_0|MBX_1;
4251 mcp->tov = MBX_TOV_SECONDS;
4252 mcp->flags = 0;
4253 rval = qla2x00_mailbox_command(vha, mcp);
4255 if (rval == QLA_SUCCESS) {
4256 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4257 rval = BIT_1;
4259 } else
4260 rval = BIT_1;
4262 return rval;
4266 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4267 uint32_t size)
4269 int rval;
4270 mbx_cmd_t mc;
4271 mbx_cmd_t *mcp = &mc;
4273 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4274 "Entered %s.\n", __func__);
4276 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4277 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4278 mcp->mb[8] = MSW(addr);
4279 mcp->out_mb = MBX_8|MBX_0;
4280 } else {
4281 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4282 mcp->out_mb = MBX_0;
4284 mcp->mb[1] = LSW(addr);
4285 mcp->mb[2] = MSW(req_dma);
4286 mcp->mb[3] = LSW(req_dma);
4287 mcp->mb[6] = MSW(MSD(req_dma));
4288 mcp->mb[7] = LSW(MSD(req_dma));
4289 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4290 if (IS_FWI2_CAPABLE(vha->hw)) {
4291 mcp->mb[4] = MSW(size);
4292 mcp->mb[5] = LSW(size);
4293 mcp->out_mb |= MBX_5|MBX_4;
4294 } else {
4295 mcp->mb[4] = LSW(size);
4296 mcp->out_mb |= MBX_4;
4299 mcp->in_mb = MBX_0;
4300 mcp->tov = MBX_TOV_SECONDS;
4301 mcp->flags = 0;
4302 rval = qla2x00_mailbox_command(vha, mcp);
4304 if (rval != QLA_SUCCESS) {
4305 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4307 } else {
4308 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4309 "Done %s.\n", __func__);
4312 return rval;
4314 /* 84XX Support **************************************************************/
4316 struct cs84xx_mgmt_cmd {
4317 union {
4318 struct verify_chip_entry_84xx req;
4319 struct verify_chip_rsp_84xx rsp;
4320 } p;
4324 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4326 int rval, retry;
4327 struct cs84xx_mgmt_cmd *mn;
4328 dma_addr_t mn_dma;
4329 uint16_t options;
4330 unsigned long flags;
4331 struct qla_hw_data *ha = vha->hw;
4333 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4334 "Entered %s.\n", __func__);
4336 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4337 if (mn == NULL) {
4338 return QLA_MEMORY_ALLOC_FAILED;
4341 /* Force Update? */
4342 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4343 /* Diagnostic firmware? */
4344 /* options |= MENLO_DIAG_FW; */
4345 /* We update the firmware with only one data sequence. */
4346 options |= VCO_END_OF_DATA;
4348 do {
4349 retry = 0;
4350 memset(mn, 0, sizeof(*mn));
4351 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4352 mn->p.req.entry_count = 1;
4353 mn->p.req.options = cpu_to_le16(options);
4355 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4356 "Dump of Verify Request.\n");
4357 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4358 mn, sizeof(*mn));
4360 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4361 if (rval != QLA_SUCCESS) {
4362 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4363 "Failed to issue verify IOCB (%x).\n", rval);
4364 goto verify_done;
4367 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4368 "Dump of Verify Response.\n");
4369 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4370 mn, sizeof(*mn));
4372 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4373 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4374 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4376 "cs=%x fc=%x.\n", status[0], status[1]);
4378 if (status[0] != CS_COMPLETE) {
4379 rval = QLA_FUNCTION_FAILED;
4380 if (!(options & VCO_DONT_UPDATE_FW)) {
4381 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4382 "Firmware update failed. Retrying "
4383 "without update firmware.\n");
4384 options |= VCO_DONT_UPDATE_FW;
4385 options &= ~VCO_FORCE_UPDATE;
4386 retry = 1;
4388 } else {
4389 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4390 "Firmware updated to %x.\n",
4391 le32_to_cpu(mn->p.rsp.fw_ver));
4393 /* NOTE: we only update OP firmware. */
4394 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4395 ha->cs84xx->op_fw_version =
4396 le32_to_cpu(mn->p.rsp.fw_ver);
4397 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4398 flags);
4400 } while (retry);
4402 verify_done:
4403 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4405 if (rval != QLA_SUCCESS) {
4406 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4407 "Failed=%x.\n", rval);
4408 } else {
4409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4410 "Done %s.\n", __func__);
4413 return rval;
4417 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4419 int rval;
4420 unsigned long flags;
4421 mbx_cmd_t mc;
4422 mbx_cmd_t *mcp = &mc;
4423 struct qla_hw_data *ha = vha->hw;
4425 if (!ha->flags.fw_started)
4426 return QLA_SUCCESS;
4428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4429 "Entered %s.\n", __func__);
4431 if (IS_SHADOW_REG_CAPABLE(ha))
4432 req->options |= BIT_13;
4434 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4435 mcp->mb[1] = req->options;
4436 mcp->mb[2] = MSW(LSD(req->dma));
4437 mcp->mb[3] = LSW(LSD(req->dma));
4438 mcp->mb[6] = MSW(MSD(req->dma));
4439 mcp->mb[7] = LSW(MSD(req->dma));
4440 mcp->mb[5] = req->length;
4441 if (req->rsp)
4442 mcp->mb[10] = req->rsp->id;
4443 mcp->mb[12] = req->qos;
4444 mcp->mb[11] = req->vp_idx;
4445 mcp->mb[13] = req->rid;
4446 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4447 mcp->mb[15] = 0;
4449 mcp->mb[4] = req->id;
4450 /* que in ptr index */
4451 mcp->mb[8] = 0;
4452 /* que out ptr index */
4453 mcp->mb[9] = *req->out_ptr = 0;
4454 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4455 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4456 mcp->in_mb = MBX_0;
4457 mcp->flags = MBX_DMA_OUT;
4458 mcp->tov = MBX_TOV_SECONDS * 2;
4460 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4461 IS_QLA28XX(ha))
4462 mcp->in_mb |= MBX_1;
4463 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4464 mcp->out_mb |= MBX_15;
4465 /* debug q create issue in SR-IOV */
4466 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4469 spin_lock_irqsave(&ha->hardware_lock, flags);
4470 if (!(req->options & BIT_0)) {
4471 wrt_reg_dword(req->req_q_in, 0);
4472 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4473 wrt_reg_dword(req->req_q_out, 0);
4475 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4477 rval = qla2x00_mailbox_command(vha, mcp);
4478 if (rval != QLA_SUCCESS) {
4479 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4481 } else {
4482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4483 "Done %s.\n", __func__);
4486 return rval;
4490 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4492 int rval;
4493 unsigned long flags;
4494 mbx_cmd_t mc;
4495 mbx_cmd_t *mcp = &mc;
4496 struct qla_hw_data *ha = vha->hw;
4498 if (!ha->flags.fw_started)
4499 return QLA_SUCCESS;
4501 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4502 "Entered %s.\n", __func__);
4504 if (IS_SHADOW_REG_CAPABLE(ha))
4505 rsp->options |= BIT_13;
4507 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4508 mcp->mb[1] = rsp->options;
4509 mcp->mb[2] = MSW(LSD(rsp->dma));
4510 mcp->mb[3] = LSW(LSD(rsp->dma));
4511 mcp->mb[6] = MSW(MSD(rsp->dma));
4512 mcp->mb[7] = LSW(MSD(rsp->dma));
4513 mcp->mb[5] = rsp->length;
4514 mcp->mb[14] = rsp->msix->entry;
4515 mcp->mb[13] = rsp->rid;
4516 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4517 mcp->mb[15] = 0;
4519 mcp->mb[4] = rsp->id;
4520 /* que in ptr index */
4521 mcp->mb[8] = *rsp->in_ptr = 0;
4522 /* que out ptr index */
4523 mcp->mb[9] = 0;
4524 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4525 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4526 mcp->in_mb = MBX_0;
4527 mcp->flags = MBX_DMA_OUT;
4528 mcp->tov = MBX_TOV_SECONDS * 2;
4530 if (IS_QLA81XX(ha)) {
4531 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4532 mcp->in_mb |= MBX_1;
4533 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4534 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4535 mcp->in_mb |= MBX_1;
4536 /* debug q create issue in SR-IOV */
4537 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4540 spin_lock_irqsave(&ha->hardware_lock, flags);
4541 if (!(rsp->options & BIT_0)) {
4542 wrt_reg_dword(rsp->rsp_q_out, 0);
4543 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4544 wrt_reg_dword(rsp->rsp_q_in, 0);
4547 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4549 rval = qla2x00_mailbox_command(vha, mcp);
4550 if (rval != QLA_SUCCESS) {
4551 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4552 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4553 } else {
4554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4555 "Done %s.\n", __func__);
4558 return rval;
4562 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4564 int rval;
4565 mbx_cmd_t mc;
4566 mbx_cmd_t *mcp = &mc;
4568 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4569 "Entered %s.\n", __func__);
4571 mcp->mb[0] = MBC_IDC_ACK;
4572 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4573 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4574 mcp->in_mb = MBX_0;
4575 mcp->tov = MBX_TOV_SECONDS;
4576 mcp->flags = 0;
4577 rval = qla2x00_mailbox_command(vha, mcp);
4579 if (rval != QLA_SUCCESS) {
4580 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4581 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4582 } else {
4583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4584 "Done %s.\n", __func__);
4587 return rval;
4591 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4593 int rval;
4594 mbx_cmd_t mc;
4595 mbx_cmd_t *mcp = &mc;
4597 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4598 "Entered %s.\n", __func__);
4600 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4601 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4602 return QLA_FUNCTION_FAILED;
4604 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4605 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4606 mcp->out_mb = MBX_1|MBX_0;
4607 mcp->in_mb = MBX_1|MBX_0;
4608 mcp->tov = MBX_TOV_SECONDS;
4609 mcp->flags = 0;
4610 rval = qla2x00_mailbox_command(vha, mcp);
4612 if (rval != QLA_SUCCESS) {
4613 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4614 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4615 rval, mcp->mb[0], mcp->mb[1]);
4616 } else {
4617 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4618 "Done %s.\n", __func__);
4619 *sector_size = mcp->mb[1];
4622 return rval;
4626 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4628 int rval;
4629 mbx_cmd_t mc;
4630 mbx_cmd_t *mcp = &mc;
4632 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4633 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4634 return QLA_FUNCTION_FAILED;
4636 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4637 "Entered %s.\n", __func__);
4639 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4640 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4641 FAC_OPT_CMD_WRITE_PROTECT;
4642 mcp->out_mb = MBX_1|MBX_0;
4643 mcp->in_mb = MBX_1|MBX_0;
4644 mcp->tov = MBX_TOV_SECONDS;
4645 mcp->flags = 0;
4646 rval = qla2x00_mailbox_command(vha, mcp);
4648 if (rval != QLA_SUCCESS) {
4649 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4650 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4651 rval, mcp->mb[0], mcp->mb[1]);
4652 } else {
4653 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4654 "Done %s.\n", __func__);
4657 return rval;
4661 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4663 int rval;
4664 mbx_cmd_t mc;
4665 mbx_cmd_t *mcp = &mc;
4667 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4668 !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
4669 return QLA_FUNCTION_FAILED;
4671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4672 "Entered %s.\n", __func__);
4674 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4675 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4676 mcp->mb[2] = LSW(start);
4677 mcp->mb[3] = MSW(start);
4678 mcp->mb[4] = LSW(finish);
4679 mcp->mb[5] = MSW(finish);
4680 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4681 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4682 mcp->tov = MBX_TOV_SECONDS;
4683 mcp->flags = 0;
4684 rval = qla2x00_mailbox_command(vha, mcp);
4686 if (rval != QLA_SUCCESS) {
4687 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4688 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4689 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4690 } else {
4691 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4692 "Done %s.\n", __func__);
4695 return rval;
4699 qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock)
4701 int rval = QLA_SUCCESS;
4702 mbx_cmd_t mc;
4703 mbx_cmd_t *mcp = &mc;
4704 struct qla_hw_data *ha = vha->hw;
4706 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4707 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4708 return rval;
4710 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4711 "Entered %s.\n", __func__);
4713 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4714 mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE :
4715 FAC_OPT_CMD_UNLOCK_SEMAPHORE);
4716 mcp->out_mb = MBX_1|MBX_0;
4717 mcp->in_mb = MBX_1|MBX_0;
4718 mcp->tov = MBX_TOV_SECONDS;
4719 mcp->flags = 0;
4720 rval = qla2x00_mailbox_command(vha, mcp);
4722 if (rval != QLA_SUCCESS) {
4723 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4724 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4725 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4726 } else {
4727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4728 "Done %s.\n", __func__);
4731 return rval;
4735 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4737 int rval = 0;
4738 mbx_cmd_t mc;
4739 mbx_cmd_t *mcp = &mc;
4741 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4742 "Entered %s.\n", __func__);
4744 mcp->mb[0] = MBC_RESTART_MPI_FW;
4745 mcp->out_mb = MBX_0;
4746 mcp->in_mb = MBX_0|MBX_1;
4747 mcp->tov = MBX_TOV_SECONDS;
4748 mcp->flags = 0;
4749 rval = qla2x00_mailbox_command(vha, mcp);
4751 if (rval != QLA_SUCCESS) {
4752 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4753 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4754 rval, mcp->mb[0], mcp->mb[1]);
4755 } else {
4756 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4757 "Done %s.\n", __func__);
4760 return rval;
4764 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4766 int rval;
4767 mbx_cmd_t mc;
4768 mbx_cmd_t *mcp = &mc;
4769 int i;
4770 int len;
4771 __le16 *str;
4772 struct qla_hw_data *ha = vha->hw;
4774 if (!IS_P3P_TYPE(ha))
4775 return QLA_FUNCTION_FAILED;
4777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4778 "Entered %s.\n", __func__);
4780 str = (__force __le16 *)version;
4781 len = strlen(version);
4783 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4784 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4785 mcp->out_mb = MBX_1|MBX_0;
4786 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4787 mcp->mb[i] = le16_to_cpup(str);
4788 mcp->out_mb |= 1<<i;
4790 for (; i < 16; i++) {
4791 mcp->mb[i] = 0;
4792 mcp->out_mb |= 1<<i;
4794 mcp->in_mb = MBX_1|MBX_0;
4795 mcp->tov = MBX_TOV_SECONDS;
4796 mcp->flags = 0;
4797 rval = qla2x00_mailbox_command(vha, mcp);
4799 if (rval != QLA_SUCCESS) {
4800 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4801 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4802 } else {
4803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4804 "Done %s.\n", __func__);
4807 return rval;
4811 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4813 int rval;
4814 mbx_cmd_t mc;
4815 mbx_cmd_t *mcp = &mc;
4816 int len;
4817 uint16_t dwlen;
4818 uint8_t *str;
4819 dma_addr_t str_dma;
4820 struct qla_hw_data *ha = vha->hw;
4822 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4823 IS_P3P_TYPE(ha))
4824 return QLA_FUNCTION_FAILED;
4826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4827 "Entered %s.\n", __func__);
4829 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4830 if (!str) {
4831 ql_log(ql_log_warn, vha, 0x117f,
4832 "Failed to allocate driver version param.\n");
4833 return QLA_MEMORY_ALLOC_FAILED;
4836 memcpy(str, "\x7\x3\x11\x0", 4);
4837 dwlen = str[0];
4838 len = dwlen * 4 - 4;
4839 memset(str + 4, 0, len);
4840 if (len > strlen(version))
4841 len = strlen(version);
4842 memcpy(str + 4, version, len);
4844 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4845 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4846 mcp->mb[2] = MSW(LSD(str_dma));
4847 mcp->mb[3] = LSW(LSD(str_dma));
4848 mcp->mb[6] = MSW(MSD(str_dma));
4849 mcp->mb[7] = LSW(MSD(str_dma));
4850 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4851 mcp->in_mb = MBX_1|MBX_0;
4852 mcp->tov = MBX_TOV_SECONDS;
4853 mcp->flags = 0;
4854 rval = qla2x00_mailbox_command(vha, mcp);
4856 if (rval != QLA_SUCCESS) {
4857 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4858 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4859 } else {
4860 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4861 "Done %s.\n", __func__);
4864 dma_pool_free(ha->s_dma_pool, str, str_dma);
4866 return rval;
4870 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4871 void *buf, uint16_t bufsiz)
4873 int rval, i;
4874 mbx_cmd_t mc;
4875 mbx_cmd_t *mcp = &mc;
4876 uint32_t *bp;
4878 if (!IS_FWI2_CAPABLE(vha->hw))
4879 return QLA_FUNCTION_FAILED;
4881 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4882 "Entered %s.\n", __func__);
4884 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4885 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4886 mcp->mb[2] = MSW(buf_dma);
4887 mcp->mb[3] = LSW(buf_dma);
4888 mcp->mb[6] = MSW(MSD(buf_dma));
4889 mcp->mb[7] = LSW(MSD(buf_dma));
4890 mcp->mb[8] = bufsiz/4;
4891 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4892 mcp->in_mb = MBX_1|MBX_0;
4893 mcp->tov = MBX_TOV_SECONDS;
4894 mcp->flags = 0;
4895 rval = qla2x00_mailbox_command(vha, mcp);
4897 if (rval != QLA_SUCCESS) {
4898 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4899 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4900 } else {
4901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4902 "Done %s.\n", __func__);
4903 bp = (uint32_t *) buf;
4904 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4905 *bp = le32_to_cpu((__force __le32)*bp);
4908 return rval;
4911 #define PUREX_CMD_COUNT 2
4913 qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
4915 int rval;
4916 mbx_cmd_t mc;
4917 mbx_cmd_t *mcp = &mc;
4918 uint8_t *els_cmd_map;
4919 dma_addr_t els_cmd_map_dma;
4920 uint8_t cmd_opcode[PUREX_CMD_COUNT];
4921 uint8_t i, index, purex_bit;
4922 struct qla_hw_data *ha = vha->hw;
4924 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
4925 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4926 return QLA_SUCCESS;
4928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1197,
4929 "Entered %s.\n", __func__);
4931 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
4932 &els_cmd_map_dma, GFP_KERNEL);
4933 if (!els_cmd_map) {
4934 ql_log(ql_log_warn, vha, 0x7101,
4935 "Failed to allocate RDP els command param.\n");
4936 return QLA_MEMORY_ALLOC_FAILED;
4939 /* List of Purex ELS */
4940 cmd_opcode[0] = ELS_FPIN;
4941 cmd_opcode[1] = ELS_RDP;
4943 for (i = 0; i < PUREX_CMD_COUNT; i++) {
4944 index = cmd_opcode[i] / 8;
4945 purex_bit = cmd_opcode[i] % 8;
4946 els_cmd_map[index] |= 1 << purex_bit;
4949 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4950 mcp->mb[1] = RNID_TYPE_ELS_CMD << 8;
4951 mcp->mb[2] = MSW(LSD(els_cmd_map_dma));
4952 mcp->mb[3] = LSW(LSD(els_cmd_map_dma));
4953 mcp->mb[6] = MSW(MSD(els_cmd_map_dma));
4954 mcp->mb[7] = LSW(MSD(els_cmd_map_dma));
4955 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4956 mcp->in_mb = MBX_1|MBX_0;
4957 mcp->tov = MBX_TOV_SECONDS;
4958 mcp->flags = MBX_DMA_OUT;
4959 mcp->buf_size = ELS_CMD_MAP_SIZE;
4960 rval = qla2x00_mailbox_command(vha, mcp);
4962 if (rval != QLA_SUCCESS) {
4963 ql_dbg(ql_dbg_mbx, vha, 0x118d,
4964 "Failed=%x (%x,%x).\n", rval, mcp->mb[0], mcp->mb[1]);
4965 } else {
4966 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
4967 "Done %s.\n", __func__);
4970 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
4971 els_cmd_map, els_cmd_map_dma);
4973 return rval;
4976 static int
4977 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4979 int rval;
4980 mbx_cmd_t mc;
4981 mbx_cmd_t *mcp = &mc;
4983 if (!IS_FWI2_CAPABLE(vha->hw))
4984 return QLA_FUNCTION_FAILED;
4986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4987 "Entered %s.\n", __func__);
4989 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4990 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4991 mcp->out_mb = MBX_1|MBX_0;
4992 mcp->in_mb = MBX_1|MBX_0;
4993 mcp->tov = MBX_TOV_SECONDS;
4994 mcp->flags = 0;
4995 rval = qla2x00_mailbox_command(vha, mcp);
4996 *temp = mcp->mb[1];
4998 if (rval != QLA_SUCCESS) {
4999 ql_dbg(ql_dbg_mbx, vha, 0x115a,
5000 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
5001 } else {
5002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
5003 "Done %s.\n", __func__);
5006 return rval;
5010 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5011 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5013 int rval;
5014 mbx_cmd_t mc;
5015 mbx_cmd_t *mcp = &mc;
5016 struct qla_hw_data *ha = vha->hw;
5018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
5019 "Entered %s.\n", __func__);
5021 if (!IS_FWI2_CAPABLE(ha))
5022 return QLA_FUNCTION_FAILED;
5024 if (len == 1)
5025 opt |= BIT_0;
5027 mcp->mb[0] = MBC_READ_SFP;
5028 mcp->mb[1] = dev;
5029 mcp->mb[2] = MSW(LSD(sfp_dma));
5030 mcp->mb[3] = LSW(LSD(sfp_dma));
5031 mcp->mb[6] = MSW(MSD(sfp_dma));
5032 mcp->mb[7] = LSW(MSD(sfp_dma));
5033 mcp->mb[8] = len;
5034 mcp->mb[9] = off;
5035 mcp->mb[10] = opt;
5036 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5037 mcp->in_mb = MBX_1|MBX_0;
5038 mcp->tov = MBX_TOV_SECONDS;
5039 mcp->flags = 0;
5040 rval = qla2x00_mailbox_command(vha, mcp);
5042 if (opt & BIT_0)
5043 *sfp = mcp->mb[1];
5045 if (rval != QLA_SUCCESS) {
5046 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
5047 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5048 if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
5049 /* sfp is not there */
5050 rval = QLA_INTERFACE_ERROR;
5052 } else {
5053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
5054 "Done %s.\n", __func__);
5057 return rval;
5061 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
5062 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
5064 int rval;
5065 mbx_cmd_t mc;
5066 mbx_cmd_t *mcp = &mc;
5067 struct qla_hw_data *ha = vha->hw;
5069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
5070 "Entered %s.\n", __func__);
5072 if (!IS_FWI2_CAPABLE(ha))
5073 return QLA_FUNCTION_FAILED;
5075 if (len == 1)
5076 opt |= BIT_0;
5078 if (opt & BIT_0)
5079 len = *sfp;
5081 mcp->mb[0] = MBC_WRITE_SFP;
5082 mcp->mb[1] = dev;
5083 mcp->mb[2] = MSW(LSD(sfp_dma));
5084 mcp->mb[3] = LSW(LSD(sfp_dma));
5085 mcp->mb[6] = MSW(MSD(sfp_dma));
5086 mcp->mb[7] = LSW(MSD(sfp_dma));
5087 mcp->mb[8] = len;
5088 mcp->mb[9] = off;
5089 mcp->mb[10] = opt;
5090 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5091 mcp->in_mb = MBX_1|MBX_0;
5092 mcp->tov = MBX_TOV_SECONDS;
5093 mcp->flags = 0;
5094 rval = qla2x00_mailbox_command(vha, mcp);
5096 if (rval != QLA_SUCCESS) {
5097 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
5098 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5099 } else {
5100 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
5101 "Done %s.\n", __func__);
5104 return rval;
5108 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
5109 uint16_t size_in_bytes, uint16_t *actual_size)
5111 int rval;
5112 mbx_cmd_t mc;
5113 mbx_cmd_t *mcp = &mc;
5115 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
5116 "Entered %s.\n", __func__);
5118 if (!IS_CNA_CAPABLE(vha->hw))
5119 return QLA_FUNCTION_FAILED;
5121 mcp->mb[0] = MBC_GET_XGMAC_STATS;
5122 mcp->mb[2] = MSW(stats_dma);
5123 mcp->mb[3] = LSW(stats_dma);
5124 mcp->mb[6] = MSW(MSD(stats_dma));
5125 mcp->mb[7] = LSW(MSD(stats_dma));
5126 mcp->mb[8] = size_in_bytes >> 2;
5127 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
5128 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5129 mcp->tov = MBX_TOV_SECONDS;
5130 mcp->flags = 0;
5131 rval = qla2x00_mailbox_command(vha, mcp);
5133 if (rval != QLA_SUCCESS) {
5134 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
5135 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5136 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5137 } else {
5138 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
5139 "Done %s.\n", __func__);
5142 *actual_size = mcp->mb[2] << 2;
5145 return rval;
5149 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
5150 uint16_t size)
5152 int rval;
5153 mbx_cmd_t mc;
5154 mbx_cmd_t *mcp = &mc;
5156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
5157 "Entered %s.\n", __func__);
5159 if (!IS_CNA_CAPABLE(vha->hw))
5160 return QLA_FUNCTION_FAILED;
5162 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
5163 mcp->mb[1] = 0;
5164 mcp->mb[2] = MSW(tlv_dma);
5165 mcp->mb[3] = LSW(tlv_dma);
5166 mcp->mb[6] = MSW(MSD(tlv_dma));
5167 mcp->mb[7] = LSW(MSD(tlv_dma));
5168 mcp->mb[8] = size;
5169 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5170 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5171 mcp->tov = MBX_TOV_SECONDS;
5172 mcp->flags = 0;
5173 rval = qla2x00_mailbox_command(vha, mcp);
5175 if (rval != QLA_SUCCESS) {
5176 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
5177 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
5178 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
5179 } else {
5180 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
5181 "Done %s.\n", __func__);
5184 return rval;
5188 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
5190 int rval;
5191 mbx_cmd_t mc;
5192 mbx_cmd_t *mcp = &mc;
5194 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
5195 "Entered %s.\n", __func__);
5197 if (!IS_FWI2_CAPABLE(vha->hw))
5198 return QLA_FUNCTION_FAILED;
5200 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
5201 mcp->mb[1] = LSW(risc_addr);
5202 mcp->mb[8] = MSW(risc_addr);
5203 mcp->out_mb = MBX_8|MBX_1|MBX_0;
5204 mcp->in_mb = MBX_3|MBX_2|MBX_0;
5205 mcp->tov = MBX_TOV_SECONDS;
5206 mcp->flags = 0;
5207 rval = qla2x00_mailbox_command(vha, mcp);
5208 if (rval != QLA_SUCCESS) {
5209 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
5210 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5211 } else {
5212 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
5213 "Done %s.\n", __func__);
5214 *data = mcp->mb[3] << 16 | mcp->mb[2];
5217 return rval;
5221 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5222 uint16_t *mresp)
5224 int rval;
5225 mbx_cmd_t mc;
5226 mbx_cmd_t *mcp = &mc;
5228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5229 "Entered %s.\n", __func__);
5231 memset(mcp->mb, 0 , sizeof(mcp->mb));
5232 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5233 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5235 /* transfer count */
5236 mcp->mb[10] = LSW(mreq->transfer_size);
5237 mcp->mb[11] = MSW(mreq->transfer_size);
5239 /* send data address */
5240 mcp->mb[14] = LSW(mreq->send_dma);
5241 mcp->mb[15] = MSW(mreq->send_dma);
5242 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5243 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5245 /* receive data address */
5246 mcp->mb[16] = LSW(mreq->rcv_dma);
5247 mcp->mb[17] = MSW(mreq->rcv_dma);
5248 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5249 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5251 /* Iteration count */
5252 mcp->mb[18] = LSW(mreq->iteration_count);
5253 mcp->mb[19] = MSW(mreq->iteration_count);
5255 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5256 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5257 if (IS_CNA_CAPABLE(vha->hw))
5258 mcp->out_mb |= MBX_2;
5259 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5261 mcp->buf_size = mreq->transfer_size;
5262 mcp->tov = MBX_TOV_SECONDS;
5263 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5265 rval = qla2x00_mailbox_command(vha, mcp);
5267 if (rval != QLA_SUCCESS) {
5268 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5269 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5270 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5271 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5272 } else {
5273 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5274 "Done %s.\n", __func__);
5277 /* Copy mailbox information */
5278 memcpy( mresp, mcp->mb, 64);
5279 return rval;
5283 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5284 uint16_t *mresp)
5286 int rval;
5287 mbx_cmd_t mc;
5288 mbx_cmd_t *mcp = &mc;
5289 struct qla_hw_data *ha = vha->hw;
5291 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5292 "Entered %s.\n", __func__);
5294 memset(mcp->mb, 0 , sizeof(mcp->mb));
5295 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5296 /* BIT_6 specifies 64bit address */
5297 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5298 if (IS_CNA_CAPABLE(ha)) {
5299 mcp->mb[2] = vha->fcoe_fcf_idx;
5301 mcp->mb[16] = LSW(mreq->rcv_dma);
5302 mcp->mb[17] = MSW(mreq->rcv_dma);
5303 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5304 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5306 mcp->mb[10] = LSW(mreq->transfer_size);
5308 mcp->mb[14] = LSW(mreq->send_dma);
5309 mcp->mb[15] = MSW(mreq->send_dma);
5310 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5311 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5313 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5314 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5315 if (IS_CNA_CAPABLE(ha))
5316 mcp->out_mb |= MBX_2;
5318 mcp->in_mb = MBX_0;
5319 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5320 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5321 mcp->in_mb |= MBX_1;
5322 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5323 IS_QLA28XX(ha))
5324 mcp->in_mb |= MBX_3;
5326 mcp->tov = MBX_TOV_SECONDS;
5327 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5328 mcp->buf_size = mreq->transfer_size;
5330 rval = qla2x00_mailbox_command(vha, mcp);
5332 if (rval != QLA_SUCCESS) {
5333 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5334 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5335 rval, mcp->mb[0], mcp->mb[1]);
5336 } else {
5337 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5338 "Done %s.\n", __func__);
5341 /* Copy mailbox information */
5342 memcpy(mresp, mcp->mb, 64);
5343 return rval;
5347 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5349 int rval;
5350 mbx_cmd_t mc;
5351 mbx_cmd_t *mcp = &mc;
5353 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5354 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5356 mcp->mb[0] = MBC_ISP84XX_RESET;
5357 mcp->mb[1] = enable_diagnostic;
5358 mcp->out_mb = MBX_1|MBX_0;
5359 mcp->in_mb = MBX_1|MBX_0;
5360 mcp->tov = MBX_TOV_SECONDS;
5361 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5362 rval = qla2x00_mailbox_command(vha, mcp);
5364 if (rval != QLA_SUCCESS)
5365 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5366 else
5367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5368 "Done %s.\n", __func__);
5370 return rval;
5374 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5376 int rval;
5377 mbx_cmd_t mc;
5378 mbx_cmd_t *mcp = &mc;
5380 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5381 "Entered %s.\n", __func__);
5383 if (!IS_FWI2_CAPABLE(vha->hw))
5384 return QLA_FUNCTION_FAILED;
5386 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5387 mcp->mb[1] = LSW(risc_addr);
5388 mcp->mb[2] = LSW(data);
5389 mcp->mb[3] = MSW(data);
5390 mcp->mb[8] = MSW(risc_addr);
5391 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5392 mcp->in_mb = MBX_1|MBX_0;
5393 mcp->tov = MBX_TOV_SECONDS;
5394 mcp->flags = 0;
5395 rval = qla2x00_mailbox_command(vha, mcp);
5396 if (rval != QLA_SUCCESS) {
5397 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5398 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5399 rval, mcp->mb[0], mcp->mb[1]);
5400 } else {
5401 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5402 "Done %s.\n", __func__);
5405 return rval;
5409 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5411 int rval;
5412 uint32_t stat, timer;
5413 uint16_t mb0 = 0;
5414 struct qla_hw_data *ha = vha->hw;
5415 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5417 rval = QLA_SUCCESS;
5419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5420 "Entered %s.\n", __func__);
5422 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5424 /* Write the MBC data to the registers */
5425 wrt_reg_word(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5426 wrt_reg_word(&reg->mailbox1, mb[0]);
5427 wrt_reg_word(&reg->mailbox2, mb[1]);
5428 wrt_reg_word(&reg->mailbox3, mb[2]);
5429 wrt_reg_word(&reg->mailbox4, mb[3]);
5431 wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
5433 /* Poll for MBC interrupt */
5434 for (timer = 6000000; timer; timer--) {
5435 /* Check for pending interrupts. */
5436 stat = rd_reg_dword(&reg->host_status);
5437 if (stat & HSRX_RISC_INT) {
5438 stat &= 0xff;
5440 if (stat == 0x1 || stat == 0x2 ||
5441 stat == 0x10 || stat == 0x11) {
5442 set_bit(MBX_INTERRUPT,
5443 &ha->mbx_cmd_flags);
5444 mb0 = rd_reg_word(&reg->mailbox0);
5445 wrt_reg_dword(&reg->hccr,
5446 HCCRX_CLR_RISC_INT);
5447 rd_reg_dword(&reg->hccr);
5448 break;
5451 udelay(5);
5454 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5455 rval = mb0 & MBS_MASK;
5456 else
5457 rval = QLA_FUNCTION_FAILED;
5459 if (rval != QLA_SUCCESS) {
5460 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5461 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5462 } else {
5463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5464 "Done %s.\n", __func__);
5467 return rval;
5470 /* Set the specified data rate */
5472 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5474 int rval;
5475 mbx_cmd_t mc;
5476 mbx_cmd_t *mcp = &mc;
5477 struct qla_hw_data *ha = vha->hw;
5478 uint16_t val;
5480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5481 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5482 mode);
5484 if (!IS_FWI2_CAPABLE(ha))
5485 return QLA_FUNCTION_FAILED;
5487 memset(mcp, 0, sizeof(*mcp));
5488 switch (ha->set_data_rate) {
5489 case PORT_SPEED_AUTO:
5490 case PORT_SPEED_4GB:
5491 case PORT_SPEED_8GB:
5492 case PORT_SPEED_16GB:
5493 case PORT_SPEED_32GB:
5494 val = ha->set_data_rate;
5495 break;
5496 default:
5497 ql_log(ql_log_warn, vha, 0x1199,
5498 "Unrecognized speed setting:%d. Setting Autoneg\n",
5499 ha->set_data_rate);
5500 val = ha->set_data_rate = PORT_SPEED_AUTO;
5501 break;
5504 mcp->mb[0] = MBC_DATA_RATE;
5505 mcp->mb[1] = mode;
5506 mcp->mb[2] = val;
5508 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5509 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5510 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5511 mcp->in_mb |= MBX_4|MBX_3;
5512 mcp->tov = MBX_TOV_SECONDS;
5513 mcp->flags = 0;
5514 rval = qla2x00_mailbox_command(vha, mcp);
5515 if (rval != QLA_SUCCESS) {
5516 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5517 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5518 } else {
5519 if (mcp->mb[1] != 0x7)
5520 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5521 "Speed set:0x%x\n", mcp->mb[1]);
5523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5524 "Done %s.\n", __func__);
5527 return rval;
5531 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5533 int rval;
5534 mbx_cmd_t mc;
5535 mbx_cmd_t *mcp = &mc;
5536 struct qla_hw_data *ha = vha->hw;
5538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5539 "Entered %s.\n", __func__);
5541 if (!IS_FWI2_CAPABLE(ha))
5542 return QLA_FUNCTION_FAILED;
5544 mcp->mb[0] = MBC_DATA_RATE;
5545 mcp->mb[1] = QLA_GET_DATA_RATE;
5546 mcp->out_mb = MBX_1|MBX_0;
5547 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5548 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5549 mcp->in_mb |= MBX_3;
5550 mcp->tov = MBX_TOV_SECONDS;
5551 mcp->flags = 0;
5552 rval = qla2x00_mailbox_command(vha, mcp);
5553 if (rval != QLA_SUCCESS) {
5554 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5555 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5556 } else {
5557 if (mcp->mb[1] != 0x7)
5558 ha->link_data_rate = mcp->mb[1];
5560 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5561 if (mcp->mb[4] & BIT_0)
5562 ql_log(ql_log_info, vha, 0x11a2,
5563 "FEC=enabled (data rate).\n");
5566 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5567 "Done %s.\n", __func__);
5568 if (mcp->mb[1] != 0x7)
5569 ha->link_data_rate = mcp->mb[1];
5572 return rval;
5576 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5578 int rval;
5579 mbx_cmd_t mc;
5580 mbx_cmd_t *mcp = &mc;
5581 struct qla_hw_data *ha = vha->hw;
5583 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5584 "Entered %s.\n", __func__);
5586 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5587 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5588 return QLA_FUNCTION_FAILED;
5589 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5590 mcp->out_mb = MBX_0;
5591 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5592 mcp->tov = MBX_TOV_SECONDS;
5593 mcp->flags = 0;
5595 rval = qla2x00_mailbox_command(vha, mcp);
5597 if (rval != QLA_SUCCESS) {
5598 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5599 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5600 } else {
5601 /* Copy all bits to preserve original value */
5602 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5604 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5605 "Done %s.\n", __func__);
5607 return rval;
5611 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5613 int rval;
5614 mbx_cmd_t mc;
5615 mbx_cmd_t *mcp = &mc;
5617 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5618 "Entered %s.\n", __func__);
5620 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5621 /* Copy all bits to preserve original setting */
5622 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5623 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5624 mcp->in_mb = MBX_0;
5625 mcp->tov = MBX_TOV_SECONDS;
5626 mcp->flags = 0;
5627 rval = qla2x00_mailbox_command(vha, mcp);
5629 if (rval != QLA_SUCCESS) {
5630 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5631 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5632 } else
5633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5634 "Done %s.\n", __func__);
5636 return rval;
5641 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5642 uint16_t *mb)
5644 int rval;
5645 mbx_cmd_t mc;
5646 mbx_cmd_t *mcp = &mc;
5647 struct qla_hw_data *ha = vha->hw;
5649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5650 "Entered %s.\n", __func__);
5652 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5653 return QLA_FUNCTION_FAILED;
5655 mcp->mb[0] = MBC_PORT_PARAMS;
5656 mcp->mb[1] = loop_id;
5657 if (ha->flags.fcp_prio_enabled)
5658 mcp->mb[2] = BIT_1;
5659 else
5660 mcp->mb[2] = BIT_2;
5661 mcp->mb[4] = priority & 0xf;
5662 mcp->mb[9] = vha->vp_idx;
5663 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5664 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5665 mcp->tov = MBX_TOV_SECONDS;
5666 mcp->flags = 0;
5667 rval = qla2x00_mailbox_command(vha, mcp);
5668 if (mb != NULL) {
5669 mb[0] = mcp->mb[0];
5670 mb[1] = mcp->mb[1];
5671 mb[3] = mcp->mb[3];
5672 mb[4] = mcp->mb[4];
5675 if (rval != QLA_SUCCESS) {
5676 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5677 } else {
5678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5679 "Done %s.\n", __func__);
5682 return rval;
5686 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5688 int rval = QLA_FUNCTION_FAILED;
5689 struct qla_hw_data *ha = vha->hw;
5690 uint8_t byte;
5692 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5693 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5694 "Thermal not supported by this card.\n");
5695 return rval;
5698 if (IS_QLA25XX(ha)) {
5699 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5700 ha->pdev->subsystem_device == 0x0175) {
5701 rval = qla2x00_read_sfp(vha, 0, &byte,
5702 0x98, 0x1, 1, BIT_13|BIT_0);
5703 *temp = byte;
5704 return rval;
5706 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5707 ha->pdev->subsystem_device == 0x338e) {
5708 rval = qla2x00_read_sfp(vha, 0, &byte,
5709 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5710 *temp = byte;
5711 return rval;
5713 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5714 "Thermal not supported by this card.\n");
5715 return rval;
5718 if (IS_QLA82XX(ha)) {
5719 *temp = qla82xx_read_temperature(vha);
5720 rval = QLA_SUCCESS;
5721 return rval;
5722 } else if (IS_QLA8044(ha)) {
5723 *temp = qla8044_read_temperature(vha);
5724 rval = QLA_SUCCESS;
5725 return rval;
5728 rval = qla2x00_read_asic_temperature(vha, temp);
5729 return rval;
5733 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5735 int rval;
5736 struct qla_hw_data *ha = vha->hw;
5737 mbx_cmd_t mc;
5738 mbx_cmd_t *mcp = &mc;
5740 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5741 "Entered %s.\n", __func__);
5743 if (!IS_FWI2_CAPABLE(ha))
5744 return QLA_FUNCTION_FAILED;
5746 memset(mcp, 0, sizeof(mbx_cmd_t));
5747 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5748 mcp->mb[1] = 1;
5750 mcp->out_mb = MBX_1|MBX_0;
5751 mcp->in_mb = MBX_0;
5752 mcp->tov = MBX_TOV_SECONDS;
5753 mcp->flags = 0;
5755 rval = qla2x00_mailbox_command(vha, mcp);
5756 if (rval != QLA_SUCCESS) {
5757 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5758 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5759 } else {
5760 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5761 "Done %s.\n", __func__);
5764 return rval;
5768 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5770 int rval;
5771 struct qla_hw_data *ha = vha->hw;
5772 mbx_cmd_t mc;
5773 mbx_cmd_t *mcp = &mc;
5775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5776 "Entered %s.\n", __func__);
5778 if (!IS_P3P_TYPE(ha))
5779 return QLA_FUNCTION_FAILED;
5781 memset(mcp, 0, sizeof(mbx_cmd_t));
5782 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5783 mcp->mb[1] = 0;
5785 mcp->out_mb = MBX_1|MBX_0;
5786 mcp->in_mb = MBX_0;
5787 mcp->tov = MBX_TOV_SECONDS;
5788 mcp->flags = 0;
5790 rval = qla2x00_mailbox_command(vha, mcp);
5791 if (rval != QLA_SUCCESS) {
5792 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5793 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5794 } else {
5795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5796 "Done %s.\n", __func__);
5799 return rval;
5803 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5805 struct qla_hw_data *ha = vha->hw;
5806 mbx_cmd_t mc;
5807 mbx_cmd_t *mcp = &mc;
5808 int rval = QLA_FUNCTION_FAILED;
5810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5811 "Entered %s.\n", __func__);
5813 memset(mcp->mb, 0 , sizeof(mcp->mb));
5814 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5815 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5816 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5817 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5819 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5820 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5821 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5823 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5824 mcp->tov = MBX_TOV_SECONDS;
5825 rval = qla2x00_mailbox_command(vha, mcp);
5827 /* Always copy back return mailbox values. */
5828 if (rval != QLA_SUCCESS) {
5829 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5830 "mailbox command FAILED=0x%x, subcode=%x.\n",
5831 (mcp->mb[1] << 16) | mcp->mb[0],
5832 (mcp->mb[3] << 16) | mcp->mb[2]);
5833 } else {
5834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5835 "Done %s.\n", __func__);
5836 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5837 if (!ha->md_template_size) {
5838 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5839 "Null template size obtained.\n");
5840 rval = QLA_FUNCTION_FAILED;
5843 return rval;
5847 qla82xx_md_get_template(scsi_qla_host_t *vha)
5849 struct qla_hw_data *ha = vha->hw;
5850 mbx_cmd_t mc;
5851 mbx_cmd_t *mcp = &mc;
5852 int rval = QLA_FUNCTION_FAILED;
5854 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5855 "Entered %s.\n", __func__);
5857 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5858 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5859 if (!ha->md_tmplt_hdr) {
5860 ql_log(ql_log_warn, vha, 0x1124,
5861 "Unable to allocate memory for Minidump template.\n");
5862 return rval;
5865 memset(mcp->mb, 0 , sizeof(mcp->mb));
5866 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5867 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5868 mcp->mb[2] = LSW(RQST_TMPLT);
5869 mcp->mb[3] = MSW(RQST_TMPLT);
5870 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5871 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5872 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5873 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5874 mcp->mb[8] = LSW(ha->md_template_size);
5875 mcp->mb[9] = MSW(ha->md_template_size);
5877 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5878 mcp->tov = MBX_TOV_SECONDS;
5879 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5880 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5881 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5882 rval = qla2x00_mailbox_command(vha, mcp);
5884 if (rval != QLA_SUCCESS) {
5885 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5886 "mailbox command FAILED=0x%x, subcode=%x.\n",
5887 ((mcp->mb[1] << 16) | mcp->mb[0]),
5888 ((mcp->mb[3] << 16) | mcp->mb[2]));
5889 } else
5890 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5891 "Done %s.\n", __func__);
5892 return rval;
5896 qla8044_md_get_template(scsi_qla_host_t *vha)
5898 struct qla_hw_data *ha = vha->hw;
5899 mbx_cmd_t mc;
5900 mbx_cmd_t *mcp = &mc;
5901 int rval = QLA_FUNCTION_FAILED;
5902 int offset = 0, size = MINIDUMP_SIZE_36K;
5904 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5905 "Entered %s.\n", __func__);
5907 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5908 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5909 if (!ha->md_tmplt_hdr) {
5910 ql_log(ql_log_warn, vha, 0xb11b,
5911 "Unable to allocate memory for Minidump template.\n");
5912 return rval;
5915 memset(mcp->mb, 0 , sizeof(mcp->mb));
5916 while (offset < ha->md_template_size) {
5917 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5918 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5919 mcp->mb[2] = LSW(RQST_TMPLT);
5920 mcp->mb[3] = MSW(RQST_TMPLT);
5921 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5922 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5923 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5924 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5925 mcp->mb[8] = LSW(size);
5926 mcp->mb[9] = MSW(size);
5927 mcp->mb[10] = offset & 0x0000FFFF;
5928 mcp->mb[11] = offset & 0xFFFF0000;
5929 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5930 mcp->tov = MBX_TOV_SECONDS;
5931 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5932 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5933 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5934 rval = qla2x00_mailbox_command(vha, mcp);
5936 if (rval != QLA_SUCCESS) {
5937 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5938 "mailbox command FAILED=0x%x, subcode=%x.\n",
5939 ((mcp->mb[1] << 16) | mcp->mb[0]),
5940 ((mcp->mb[3] << 16) | mcp->mb[2]));
5941 return rval;
5942 } else
5943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5944 "Done %s.\n", __func__);
5945 offset = offset + size;
5947 return rval;
5951 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5953 int rval;
5954 struct qla_hw_data *ha = vha->hw;
5955 mbx_cmd_t mc;
5956 mbx_cmd_t *mcp = &mc;
5958 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5959 return QLA_FUNCTION_FAILED;
5961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5962 "Entered %s.\n", __func__);
5964 memset(mcp, 0, sizeof(mbx_cmd_t));
5965 mcp->mb[0] = MBC_SET_LED_CONFIG;
5966 mcp->mb[1] = led_cfg[0];
5967 mcp->mb[2] = led_cfg[1];
5968 if (IS_QLA8031(ha)) {
5969 mcp->mb[3] = led_cfg[2];
5970 mcp->mb[4] = led_cfg[3];
5971 mcp->mb[5] = led_cfg[4];
5972 mcp->mb[6] = led_cfg[5];
5975 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5976 if (IS_QLA8031(ha))
5977 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5978 mcp->in_mb = MBX_0;
5979 mcp->tov = MBX_TOV_SECONDS;
5980 mcp->flags = 0;
5982 rval = qla2x00_mailbox_command(vha, mcp);
5983 if (rval != QLA_SUCCESS) {
5984 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5985 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5986 } else {
5987 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5988 "Done %s.\n", __func__);
5991 return rval;
5995 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5997 int rval;
5998 struct qla_hw_data *ha = vha->hw;
5999 mbx_cmd_t mc;
6000 mbx_cmd_t *mcp = &mc;
6002 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6003 return QLA_FUNCTION_FAILED;
6005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
6006 "Entered %s.\n", __func__);
6008 memset(mcp, 0, sizeof(mbx_cmd_t));
6009 mcp->mb[0] = MBC_GET_LED_CONFIG;
6011 mcp->out_mb = MBX_0;
6012 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6013 if (IS_QLA8031(ha))
6014 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
6015 mcp->tov = MBX_TOV_SECONDS;
6016 mcp->flags = 0;
6018 rval = qla2x00_mailbox_command(vha, mcp);
6019 if (rval != QLA_SUCCESS) {
6020 ql_dbg(ql_dbg_mbx, vha, 0x1137,
6021 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6022 } else {
6023 led_cfg[0] = mcp->mb[1];
6024 led_cfg[1] = mcp->mb[2];
6025 if (IS_QLA8031(ha)) {
6026 led_cfg[2] = mcp->mb[3];
6027 led_cfg[3] = mcp->mb[4];
6028 led_cfg[4] = mcp->mb[5];
6029 led_cfg[5] = mcp->mb[6];
6031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
6032 "Done %s.\n", __func__);
6035 return rval;
6039 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
6041 int rval;
6042 struct qla_hw_data *ha = vha->hw;
6043 mbx_cmd_t mc;
6044 mbx_cmd_t *mcp = &mc;
6046 if (!IS_P3P_TYPE(ha))
6047 return QLA_FUNCTION_FAILED;
6049 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
6050 "Entered %s.\n", __func__);
6052 memset(mcp, 0, sizeof(mbx_cmd_t));
6053 mcp->mb[0] = MBC_SET_LED_CONFIG;
6054 if (enable)
6055 mcp->mb[7] = 0xE;
6056 else
6057 mcp->mb[7] = 0xD;
6059 mcp->out_mb = MBX_7|MBX_0;
6060 mcp->in_mb = MBX_0;
6061 mcp->tov = MBX_TOV_SECONDS;
6062 mcp->flags = 0;
6064 rval = qla2x00_mailbox_command(vha, mcp);
6065 if (rval != QLA_SUCCESS) {
6066 ql_dbg(ql_dbg_mbx, vha, 0x1128,
6067 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6068 } else {
6069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
6070 "Done %s.\n", __func__);
6073 return rval;
6077 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
6079 int rval;
6080 struct qla_hw_data *ha = vha->hw;
6081 mbx_cmd_t mc;
6082 mbx_cmd_t *mcp = &mc;
6084 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6085 return QLA_FUNCTION_FAILED;
6087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
6088 "Entered %s.\n", __func__);
6090 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6091 mcp->mb[1] = LSW(reg);
6092 mcp->mb[2] = MSW(reg);
6093 mcp->mb[3] = LSW(data);
6094 mcp->mb[4] = MSW(data);
6095 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6097 mcp->in_mb = MBX_1|MBX_0;
6098 mcp->tov = MBX_TOV_SECONDS;
6099 mcp->flags = 0;
6100 rval = qla2x00_mailbox_command(vha, mcp);
6102 if (rval != QLA_SUCCESS) {
6103 ql_dbg(ql_dbg_mbx, vha, 0x1131,
6104 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6105 } else {
6106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
6107 "Done %s.\n", __func__);
6110 return rval;
6114 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
6116 int rval;
6117 struct qla_hw_data *ha = vha->hw;
6118 mbx_cmd_t mc;
6119 mbx_cmd_t *mcp = &mc;
6121 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6122 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
6123 "Implicit LOGO Unsupported.\n");
6124 return QLA_FUNCTION_FAILED;
6128 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
6129 "Entering %s.\n", __func__);
6131 /* Perform Implicit LOGO. */
6132 mcp->mb[0] = MBC_PORT_LOGOUT;
6133 mcp->mb[1] = fcport->loop_id;
6134 mcp->mb[10] = BIT_15;
6135 mcp->out_mb = MBX_10|MBX_1|MBX_0;
6136 mcp->in_mb = MBX_0;
6137 mcp->tov = MBX_TOV_SECONDS;
6138 mcp->flags = 0;
6139 rval = qla2x00_mailbox_command(vha, mcp);
6140 if (rval != QLA_SUCCESS)
6141 ql_dbg(ql_dbg_mbx, vha, 0x113d,
6142 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6143 else
6144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
6145 "Done %s.\n", __func__);
6147 return rval;
6151 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
6153 int rval;
6154 mbx_cmd_t mc;
6155 mbx_cmd_t *mcp = &mc;
6156 struct qla_hw_data *ha = vha->hw;
6157 unsigned long retry_max_time = jiffies + (2 * HZ);
6159 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6160 return QLA_FUNCTION_FAILED;
6162 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
6164 retry_rd_reg:
6165 mcp->mb[0] = MBC_READ_REMOTE_REG;
6166 mcp->mb[1] = LSW(reg);
6167 mcp->mb[2] = MSW(reg);
6168 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6169 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
6170 mcp->tov = MBX_TOV_SECONDS;
6171 mcp->flags = 0;
6172 rval = qla2x00_mailbox_command(vha, mcp);
6174 if (rval != QLA_SUCCESS) {
6175 ql_dbg(ql_dbg_mbx, vha, 0x114c,
6176 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6177 rval, mcp->mb[0], mcp->mb[1]);
6178 } else {
6179 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
6180 if (*data == QLA8XXX_BAD_VALUE) {
6182 * During soft-reset CAMRAM register reads might
6183 * return 0xbad0bad0. So retry for MAX of 2 sec
6184 * while reading camram registers.
6186 if (time_after(jiffies, retry_max_time)) {
6187 ql_dbg(ql_dbg_mbx, vha, 0x1141,
6188 "Failure to read CAMRAM register. "
6189 "data=0x%x.\n", *data);
6190 return QLA_FUNCTION_FAILED;
6192 msleep(100);
6193 goto retry_rd_reg;
6195 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
6198 return rval;
6202 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
6204 int rval;
6205 mbx_cmd_t mc;
6206 mbx_cmd_t *mcp = &mc;
6207 struct qla_hw_data *ha = vha->hw;
6209 if (!IS_QLA83XX(ha))
6210 return QLA_FUNCTION_FAILED;
6212 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
6214 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
6215 mcp->out_mb = MBX_0;
6216 mcp->in_mb = MBX_1|MBX_0;
6217 mcp->tov = MBX_TOV_SECONDS;
6218 mcp->flags = 0;
6219 rval = qla2x00_mailbox_command(vha, mcp);
6221 if (rval != QLA_SUCCESS) {
6222 ql_dbg(ql_dbg_mbx, vha, 0x1144,
6223 "Failed=%x mb[0]=%x mb[1]=%x.\n",
6224 rval, mcp->mb[0], mcp->mb[1]);
6225 qla2xxx_dump_fw(vha);
6226 } else {
6227 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
6230 return rval;
6234 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
6235 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
6237 int rval;
6238 mbx_cmd_t mc;
6239 mbx_cmd_t *mcp = &mc;
6240 uint8_t subcode = (uint8_t)options;
6241 struct qla_hw_data *ha = vha->hw;
6243 if (!IS_QLA8031(ha))
6244 return QLA_FUNCTION_FAILED;
6246 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6248 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6249 mcp->mb[1] = options;
6250 mcp->out_mb = MBX_1|MBX_0;
6251 if (subcode & BIT_2) {
6252 mcp->mb[2] = LSW(start_addr);
6253 mcp->mb[3] = MSW(start_addr);
6254 mcp->mb[4] = LSW(end_addr);
6255 mcp->mb[5] = MSW(end_addr);
6256 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6258 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6259 if (!(subcode & (BIT_2 | BIT_5)))
6260 mcp->in_mb |= MBX_4|MBX_3;
6261 mcp->tov = MBX_TOV_SECONDS;
6262 mcp->flags = 0;
6263 rval = qla2x00_mailbox_command(vha, mcp);
6265 if (rval != QLA_SUCCESS) {
6266 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6267 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6268 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6269 mcp->mb[4]);
6270 qla2xxx_dump_fw(vha);
6271 } else {
6272 if (subcode & BIT_5)
6273 *sector_size = mcp->mb[1];
6274 else if (subcode & (BIT_6 | BIT_7)) {
6275 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6276 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6277 } else if (subcode & (BIT_3 | BIT_4)) {
6278 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6279 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6281 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6284 return rval;
6288 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6289 uint32_t size)
6291 int rval;
6292 mbx_cmd_t mc;
6293 mbx_cmd_t *mcp = &mc;
6295 if (!IS_MCTP_CAPABLE(vha->hw))
6296 return QLA_FUNCTION_FAILED;
6298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6299 "Entered %s.\n", __func__);
6301 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6302 mcp->mb[1] = LSW(addr);
6303 mcp->mb[2] = MSW(req_dma);
6304 mcp->mb[3] = LSW(req_dma);
6305 mcp->mb[4] = MSW(size);
6306 mcp->mb[5] = LSW(size);
6307 mcp->mb[6] = MSW(MSD(req_dma));
6308 mcp->mb[7] = LSW(MSD(req_dma));
6309 mcp->mb[8] = MSW(addr);
6310 /* Setting RAM ID to valid */
6311 /* For MCTP RAM ID is 0x40 */
6312 mcp->mb[10] = BIT_7 | 0x40;
6314 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6315 MBX_0;
6317 mcp->in_mb = MBX_0;
6318 mcp->tov = MBX_TOV_SECONDS;
6319 mcp->flags = 0;
6320 rval = qla2x00_mailbox_command(vha, mcp);
6322 if (rval != QLA_SUCCESS) {
6323 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6324 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6325 } else {
6326 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6327 "Done %s.\n", __func__);
6330 return rval;
6334 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6335 void *dd_buf, uint size, uint options)
6337 int rval;
6338 mbx_cmd_t mc;
6339 mbx_cmd_t *mcp = &mc;
6340 dma_addr_t dd_dma;
6342 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
6343 !IS_QLA28XX(vha->hw))
6344 return QLA_FUNCTION_FAILED;
6346 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6347 "Entered %s.\n", __func__);
6349 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6350 dd_buf, size, DMA_FROM_DEVICE);
6351 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6352 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6353 return QLA_MEMORY_ALLOC_FAILED;
6356 memset(dd_buf, 0, size);
6358 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6359 mcp->mb[1] = options;
6360 mcp->mb[2] = MSW(LSD(dd_dma));
6361 mcp->mb[3] = LSW(LSD(dd_dma));
6362 mcp->mb[6] = MSW(MSD(dd_dma));
6363 mcp->mb[7] = LSW(MSD(dd_dma));
6364 mcp->mb[8] = size;
6365 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6366 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6367 mcp->buf_size = size;
6368 mcp->flags = MBX_DMA_IN;
6369 mcp->tov = MBX_TOV_SECONDS * 4;
6370 rval = qla2x00_mailbox_command(vha, mcp);
6372 if (rval != QLA_SUCCESS) {
6373 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6374 } else {
6375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6376 "Done %s.\n", __func__);
6379 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6380 size, DMA_FROM_DEVICE);
6382 return rval;
6385 static void qla2x00_async_mb_sp_done(srb_t *sp, int res)
6387 sp->u.iocb_cmd.u.mbx.rc = res;
6389 complete(&sp->u.iocb_cmd.u.mbx.comp);
6390 /* don't free sp here. Let the caller do the free */
6394 * This mailbox uses the iocb interface to send MB command.
6395 * This allows non-critial (non chip setup) command to go
6396 * out in parrallel.
6398 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6400 int rval = QLA_FUNCTION_FAILED;
6401 srb_t *sp;
6402 struct srb_iocb *c;
6404 if (!vha->hw->flags.fw_started)
6405 goto done;
6407 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6408 if (!sp)
6409 goto done;
6411 sp->type = SRB_MB_IOCB;
6412 sp->name = mb_to_str(mcp->mb[0]);
6414 c = &sp->u.iocb_cmd;
6415 c->timeout = qla2x00_async_iocb_timeout;
6416 init_completion(&c->u.mbx.comp);
6418 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6420 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6422 sp->done = qla2x00_async_mb_sp_done;
6424 rval = qla2x00_start_sp(sp);
6425 if (rval != QLA_SUCCESS) {
6426 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6427 "%s: %s Failed submission. %x.\n",
6428 __func__, sp->name, rval);
6429 goto done_free_sp;
6432 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6433 sp->name, sp->handle);
6435 wait_for_completion(&c->u.mbx.comp);
6436 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6438 rval = c->u.mbx.rc;
6439 switch (rval) {
6440 case QLA_FUNCTION_TIMEOUT:
6441 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6442 __func__, sp->name, rval);
6443 break;
6444 case QLA_SUCCESS:
6445 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6446 __func__, sp->name);
6447 break;
6448 default:
6449 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6450 __func__, sp->name, rval);
6451 break;
6454 done_free_sp:
6455 sp->free(sp);
6456 done:
6457 return rval;
6461 * qla24xx_gpdb_wait
6462 * NOTE: Do not call this routine from DPC thread
6464 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6466 int rval = QLA_FUNCTION_FAILED;
6467 dma_addr_t pd_dma;
6468 struct port_database_24xx *pd;
6469 struct qla_hw_data *ha = vha->hw;
6470 mbx_cmd_t mc;
6472 if (!vha->hw->flags.fw_started)
6473 goto done;
6475 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6476 if (pd == NULL) {
6477 ql_log(ql_log_warn, vha, 0xd047,
6478 "Failed to allocate port database structure.\n");
6479 goto done_free_sp;
6482 memset(&mc, 0, sizeof(mc));
6483 mc.mb[0] = MBC_GET_PORT_DATABASE;
6484 mc.mb[1] = fcport->loop_id;
6485 mc.mb[2] = MSW(pd_dma);
6486 mc.mb[3] = LSW(pd_dma);
6487 mc.mb[6] = MSW(MSD(pd_dma));
6488 mc.mb[7] = LSW(MSD(pd_dma));
6489 mc.mb[9] = vha->vp_idx;
6490 mc.mb[10] = opt;
6492 rval = qla24xx_send_mb_cmd(vha, &mc);
6493 if (rval != QLA_SUCCESS) {
6494 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6495 "%s: %8phC fail\n", __func__, fcport->port_name);
6496 goto done_free_sp;
6499 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6501 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6502 __func__, fcport->port_name);
6504 done_free_sp:
6505 if (pd)
6506 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6507 done:
6508 return rval;
6511 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6512 struct port_database_24xx *pd)
6514 int rval = QLA_SUCCESS;
6515 uint64_t zero = 0;
6516 u8 current_login_state, last_login_state;
6518 if (NVME_TARGET(vha->hw, fcport)) {
6519 current_login_state = pd->current_login_state >> 4;
6520 last_login_state = pd->last_login_state >> 4;
6521 } else {
6522 current_login_state = pd->current_login_state & 0xf;
6523 last_login_state = pd->last_login_state & 0xf;
6526 /* Check for logged in state. */
6527 if (current_login_state != PDS_PRLI_COMPLETE) {
6528 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6529 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6530 current_login_state, last_login_state, fcport->loop_id);
6531 rval = QLA_FUNCTION_FAILED;
6532 goto gpd_error_out;
6535 if (fcport->loop_id == FC_NO_LOOP_ID ||
6536 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6537 memcmp(fcport->port_name, pd->port_name, 8))) {
6538 /* We lost the device mid way. */
6539 rval = QLA_NOT_LOGGED_IN;
6540 goto gpd_error_out;
6543 /* Names are little-endian. */
6544 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6545 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6547 /* Get port_id of device. */
6548 fcport->d_id.b.domain = pd->port_id[0];
6549 fcport->d_id.b.area = pd->port_id[1];
6550 fcport->d_id.b.al_pa = pd->port_id[2];
6551 fcport->d_id.b.rsvd_1 = 0;
6553 if (NVME_TARGET(vha->hw, fcport)) {
6554 fcport->port_type = FCT_NVME;
6555 if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0)
6556 fcport->port_type |= FCT_NVME_INITIATOR;
6557 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6558 fcport->port_type |= FCT_NVME_TARGET;
6559 if ((pd->prli_svc_param_word_3[0] & BIT_3) == 0)
6560 fcport->port_type |= FCT_NVME_DISCOVERY;
6561 } else {
6562 /* If not target must be initiator or unknown type. */
6563 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6564 fcport->port_type = FCT_INITIATOR;
6565 else
6566 fcport->port_type = FCT_TARGET;
6568 /* Passback COS information. */
6569 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6570 FC_COS_CLASS2 : FC_COS_CLASS3;
6572 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6573 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6574 fcport->conf_compl_supported = 1;
6577 gpd_error_out:
6578 return rval;
6582 * qla24xx_gidlist__wait
6583 * NOTE: don't call this routine from DPC thread.
6585 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6586 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6588 int rval = QLA_FUNCTION_FAILED;
6589 mbx_cmd_t mc;
6591 if (!vha->hw->flags.fw_started)
6592 goto done;
6594 memset(&mc, 0, sizeof(mc));
6595 mc.mb[0] = MBC_GET_ID_LIST;
6596 mc.mb[2] = MSW(id_list_dma);
6597 mc.mb[3] = LSW(id_list_dma);
6598 mc.mb[6] = MSW(MSD(id_list_dma));
6599 mc.mb[7] = LSW(MSD(id_list_dma));
6600 mc.mb[8] = 0;
6601 mc.mb[9] = vha->vp_idx;
6603 rval = qla24xx_send_mb_cmd(vha, &mc);
6604 if (rval != QLA_SUCCESS) {
6605 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6606 "%s: fail\n", __func__);
6607 } else {
6608 *entries = mc.mb[1];
6609 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6610 "%s: done\n", __func__);
6612 done:
6613 return rval;
6616 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6618 int rval;
6619 mbx_cmd_t mc;
6620 mbx_cmd_t *mcp = &mc;
6622 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6623 "Entered %s\n", __func__);
6625 memset(mcp->mb, 0 , sizeof(mcp->mb));
6626 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6627 mcp->mb[1] = 1;
6628 mcp->mb[2] = value;
6629 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6630 mcp->in_mb = MBX_2 | MBX_0;
6631 mcp->tov = MBX_TOV_SECONDS;
6632 mcp->flags = 0;
6634 rval = qla2x00_mailbox_command(vha, mcp);
6636 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6637 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6639 return rval;
6642 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6644 int rval;
6645 mbx_cmd_t mc;
6646 mbx_cmd_t *mcp = &mc;
6648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6649 "Entered %s\n", __func__);
6651 memset(mcp->mb, 0, sizeof(mcp->mb));
6652 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6653 mcp->mb[1] = 0;
6654 mcp->out_mb = MBX_1 | MBX_0;
6655 mcp->in_mb = MBX_2 | MBX_0;
6656 mcp->tov = MBX_TOV_SECONDS;
6657 mcp->flags = 0;
6659 rval = qla2x00_mailbox_command(vha, mcp);
6660 if (rval == QLA_SUCCESS)
6661 *value = mc.mb[2];
6663 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6664 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6666 return rval;
6670 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6672 struct qla_hw_data *ha = vha->hw;
6673 uint16_t iter, addr, offset;
6674 dma_addr_t phys_addr;
6675 int rval, c;
6676 u8 *sfp_data;
6678 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6679 addr = 0xa0;
6680 phys_addr = ha->sfp_data_dma;
6681 sfp_data = ha->sfp_data;
6682 offset = c = 0;
6684 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6685 if (iter == 4) {
6686 /* Skip to next device address. */
6687 addr = 0xa2;
6688 offset = 0;
6691 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6692 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6693 if (rval != QLA_SUCCESS) {
6694 ql_log(ql_log_warn, vha, 0x706d,
6695 "Unable to read SFP data (%x/%x/%x).\n", rval,
6696 addr, offset);
6698 return rval;
6701 if (buf && (c < count)) {
6702 u16 sz;
6704 if ((count - c) >= SFP_BLOCK_SIZE)
6705 sz = SFP_BLOCK_SIZE;
6706 else
6707 sz = count - c;
6709 memcpy(buf, sfp_data, sz);
6710 buf += SFP_BLOCK_SIZE;
6711 c += sz;
6713 phys_addr += SFP_BLOCK_SIZE;
6714 sfp_data += SFP_BLOCK_SIZE;
6715 offset += SFP_BLOCK_SIZE;
6718 return rval;
6721 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6722 uint16_t *out_mb, int out_mb_sz)
6724 int rval = QLA_FUNCTION_FAILED;
6725 mbx_cmd_t mc;
6727 if (!vha->hw->flags.fw_started)
6728 goto done;
6730 memset(&mc, 0, sizeof(mc));
6731 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6733 rval = qla24xx_send_mb_cmd(vha, &mc);
6734 if (rval != QLA_SUCCESS) {
6735 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6736 "%s: fail\n", __func__);
6737 } else {
6738 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6739 memcpy(out_mb, mc.mb, out_mb_sz);
6740 else
6741 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6743 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6744 "%s: done\n", __func__);
6746 done:
6747 return rval;
6750 int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts,
6751 uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr,
6752 uint32_t sfub_len)
6754 int rval;
6755 mbx_cmd_t mc;
6756 mbx_cmd_t *mcp = &mc;
6758 mcp->mb[0] = MBC_SECURE_FLASH_UPDATE;
6759 mcp->mb[1] = opts;
6760 mcp->mb[2] = region;
6761 mcp->mb[3] = MSW(len);
6762 mcp->mb[4] = LSW(len);
6763 mcp->mb[5] = MSW(sfub_dma_addr);
6764 mcp->mb[6] = LSW(sfub_dma_addr);
6765 mcp->mb[7] = MSW(MSD(sfub_dma_addr));
6766 mcp->mb[8] = LSW(MSD(sfub_dma_addr));
6767 mcp->mb[9] = sfub_len;
6768 mcp->out_mb =
6769 MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6770 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6771 mcp->tov = MBX_TOV_SECONDS;
6772 mcp->flags = 0;
6773 rval = qla2x00_mailbox_command(vha, mcp);
6775 if (rval != QLA_SUCCESS) {
6776 ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x",
6777 __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1],
6778 mcp->mb[2]);
6781 return rval;
6784 int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6785 uint32_t data)
6787 int rval;
6788 mbx_cmd_t mc;
6789 mbx_cmd_t *mcp = &mc;
6791 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6792 "Entered %s.\n", __func__);
6794 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
6795 mcp->mb[1] = LSW(addr);
6796 mcp->mb[2] = MSW(addr);
6797 mcp->mb[3] = LSW(data);
6798 mcp->mb[4] = MSW(data);
6799 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6800 mcp->in_mb = MBX_1|MBX_0;
6801 mcp->tov = MBX_TOV_SECONDS;
6802 mcp->flags = 0;
6803 rval = qla2x00_mailbox_command(vha, mcp);
6805 if (rval != QLA_SUCCESS) {
6806 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6807 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6808 } else {
6809 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6810 "Done %s.\n", __func__);
6813 return rval;
6816 int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr,
6817 uint32_t *data)
6819 int rval;
6820 mbx_cmd_t mc;
6821 mbx_cmd_t *mcp = &mc;
6823 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
6824 "Entered %s.\n", __func__);
6826 mcp->mb[0] = MBC_READ_REMOTE_REG;
6827 mcp->mb[1] = LSW(addr);
6828 mcp->mb[2] = MSW(addr);
6829 mcp->out_mb = MBX_2|MBX_1|MBX_0;
6830 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
6831 mcp->tov = MBX_TOV_SECONDS;
6832 mcp->flags = 0;
6833 rval = qla2x00_mailbox_command(vha, mcp);
6835 *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]);
6837 if (rval != QLA_SUCCESS) {
6838 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
6839 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6840 } else {
6841 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
6842 "Done %s.\n", __func__);
6845 return rval;
6849 ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led)
6851 struct qla_hw_data *ha = vha->hw;
6852 mbx_cmd_t mc;
6853 mbx_cmd_t *mcp = &mc;
6854 int rval;
6856 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6857 return QLA_FUNCTION_FAILED;
6859 ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n",
6860 __func__, options);
6862 mcp->mb[0] = MBC_SET_GET_FC_LED_CONFIG;
6863 mcp->mb[1] = options;
6864 mcp->out_mb = MBX_1|MBX_0;
6865 mcp->in_mb = MBX_1|MBX_0;
6866 if (options & BIT_0) {
6867 if (options & BIT_1) {
6868 mcp->mb[2] = led[2];
6869 mcp->out_mb |= MBX_2;
6871 if (options & BIT_2) {
6872 mcp->mb[3] = led[0];
6873 mcp->out_mb |= MBX_3;
6875 if (options & BIT_3) {
6876 mcp->mb[4] = led[1];
6877 mcp->out_mb |= MBX_4;
6879 } else {
6880 mcp->in_mb |= MBX_4|MBX_3|MBX_2;
6882 mcp->tov = MBX_TOV_SECONDS;
6883 mcp->flags = 0;
6884 rval = qla2x00_mailbox_command(vha, mcp);
6885 if (rval) {
6886 ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x)\n",
6887 __func__, rval, mcp->mb[0], mcp->mb[1]);
6888 return rval;
6891 if (options & BIT_0) {
6892 ha->beacon_blink_led = 0;
6893 ql_dbg(ql_dbg_mbx, vha, 0x7072, "Done %s\n", __func__);
6894 } else {
6895 led[2] = mcp->mb[2];
6896 led[0] = mcp->mb[3];
6897 led[1] = mcp->mb[4];
6898 ql_dbg(ql_dbg_mbx, vha, 0x7073, "Done %s (led=%x,%x,%x)\n",
6899 __func__, led[0], led[1], led[2]);
6902 return rval;