blk-mq: always free hctx after request queue is freed
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_mbx.c
blob5400696e1f6b90a7e84536e8d2095c73e980b1c1
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
23 static const char *mb_to_str(uint16_t cmd)
25 int i;
26 struct mb_cmd_name *e;
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 e = mb_str + i;
30 if (cmd == e->cmd)
31 return e->str;
33 return "unknown";
36 static struct rom_cmd {
37 uint16_t cmd;
38 } rom_cmds[] = {
39 { MBC_LOAD_RAM },
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_READ_SFP },
62 { MBC_GET_RNID_PARAMS },
63 { MBC_GET_SET_ZIO_THRESHOLD },
66 static int is_rom_cmd(uint16_t cmd)
68 int i;
69 struct rom_cmd *wc;
71 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
72 wc = rom_cmds + i;
73 if (wc->cmd == cmd)
74 return 1;
77 return 0;
81 * qla2x00_mailbox_command
82 * Issue mailbox command and waits for completion.
84 * Input:
85 * ha = adapter block pointer.
86 * mcp = driver internal mbx struct pointer.
88 * Output:
89 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
91 * Returns:
92 * 0 : QLA_SUCCESS = cmd performed success
93 * 1 : QLA_FUNCTION_FAILED (error encountered)
94 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
96 * Context:
97 * Kernel context.
99 static int
100 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
102 int rval, i;
103 unsigned long flags = 0;
104 device_reg_t *reg;
105 uint8_t abort_active;
106 uint8_t io_lock_on;
107 uint16_t command = 0;
108 uint16_t *iptr;
109 uint16_t __iomem *optr;
110 uint32_t cnt;
111 uint32_t mboxes;
112 unsigned long wait_time;
113 struct qla_hw_data *ha = vha->hw;
114 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 u32 chip_reset;
118 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
120 if (ha->pdev->error_state > pci_channel_io_frozen) {
121 ql_log(ql_log_warn, vha, 0x1001,
122 "error_state is greater than pci_channel_io_frozen, "
123 "exiting.\n");
124 return QLA_FUNCTION_TIMEOUT;
127 if (vha->device_flags & DFLG_DEV_FAILED) {
128 ql_log(ql_log_warn, vha, 0x1002,
129 "Device in failed state, exiting.\n");
130 return QLA_FUNCTION_TIMEOUT;
133 /* if PCI error, then avoid mbx processing.*/
134 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
135 test_bit(UNLOADING, &base_vha->dpc_flags)) {
136 ql_log(ql_log_warn, vha, 0xd04e,
137 "PCI error, exiting.\n");
138 return QLA_FUNCTION_TIMEOUT;
141 reg = ha->iobase;
142 io_lock_on = base_vha->flags.init_done;
144 rval = QLA_SUCCESS;
145 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
146 chip_reset = ha->chip_reset;
148 if (ha->flags.pci_channel_io_perm_failure) {
149 ql_log(ql_log_warn, vha, 0x1003,
150 "Perm failure on EEH timeout MBX, exiting.\n");
151 return QLA_FUNCTION_TIMEOUT;
154 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
155 /* Setting Link-Down error */
156 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
157 ql_log(ql_log_warn, vha, 0x1004,
158 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
159 return QLA_FUNCTION_TIMEOUT;
162 /* check if ISP abort is active and return cmd with timeout */
163 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
164 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
165 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
166 !is_rom_cmd(mcp->mb[0])) {
167 ql_log(ql_log_info, vha, 0x1005,
168 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
169 mcp->mb[0]);
170 return QLA_FUNCTION_TIMEOUT;
173 atomic_inc(&ha->num_pend_mbx_stage1);
175 * Wait for active mailbox commands to finish by waiting at most tov
176 * seconds. This is to serialize actual issuing of mailbox cmds during
177 * non ISP abort time.
179 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
180 /* Timeout occurred. Return error. */
181 ql_log(ql_log_warn, vha, 0xd035,
182 "Cmd access timeout, cmd=0x%x, Exiting.\n",
183 mcp->mb[0]);
184 atomic_dec(&ha->num_pend_mbx_stage1);
185 return QLA_FUNCTION_TIMEOUT;
187 atomic_dec(&ha->num_pend_mbx_stage1);
188 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
189 rval = QLA_ABORTED;
190 goto premature_exit;
194 /* Save mailbox command for debug */
195 ha->mcp = mcp;
197 ql_dbg(ql_dbg_mbx, vha, 0x1006,
198 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
200 spin_lock_irqsave(&ha->hardware_lock, flags);
202 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
203 ha->flags.mbox_busy) {
204 rval = QLA_ABORTED;
205 spin_unlock_irqrestore(&ha->hardware_lock, flags);
206 goto premature_exit;
208 ha->flags.mbox_busy = 1;
210 /* Load mailbox registers. */
211 if (IS_P3P_TYPE(ha))
212 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
213 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
214 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
215 else
216 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
218 iptr = mcp->mb;
219 command = mcp->mb[0];
220 mboxes = mcp->out_mb;
222 ql_dbg(ql_dbg_mbx, vha, 0x1111,
223 "Mailbox registers (OUT):\n");
224 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
225 if (IS_QLA2200(ha) && cnt == 8)
226 optr =
227 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
228 if (mboxes & BIT_0) {
229 ql_dbg(ql_dbg_mbx, vha, 0x1112,
230 "mbox[%d]<-0x%04x\n", cnt, *iptr);
231 WRT_REG_WORD(optr, *iptr);
234 mboxes >>= 1;
235 optr++;
236 iptr++;
239 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
240 "I/O Address = %p.\n", optr);
242 /* Issue set host interrupt command to send cmd out. */
243 ha->flags.mbox_int = 0;
244 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
246 /* Unlock mbx registers and wait for interrupt */
247 ql_dbg(ql_dbg_mbx, vha, 0x100f,
248 "Going to unlock irq & waiting for interrupts. "
249 "jiffies=%lx.\n", jiffies);
251 /* Wait for mbx cmd completion until timeout */
252 atomic_inc(&ha->num_pend_mbx_stage2);
253 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
254 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
256 if (IS_P3P_TYPE(ha)) {
257 if (RD_REG_DWORD(&reg->isp82.hint) &
258 HINT_MBX_INT_PENDING) {
259 ha->flags.mbox_busy = 0;
260 spin_unlock_irqrestore(&ha->hardware_lock,
261 flags);
263 atomic_dec(&ha->num_pend_mbx_stage2);
264 ql_dbg(ql_dbg_mbx, vha, 0x1010,
265 "Pending mailbox timeout, exiting.\n");
266 rval = QLA_FUNCTION_TIMEOUT;
267 goto premature_exit;
269 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
270 } else if (IS_FWI2_CAPABLE(ha))
271 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
272 else
273 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
274 spin_unlock_irqrestore(&ha->hardware_lock, flags);
276 wait_time = jiffies;
277 atomic_inc(&ha->num_pend_mbx_stage3);
278 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
279 mcp->tov * HZ)) {
280 if (chip_reset != ha->chip_reset) {
281 spin_lock_irqsave(&ha->hardware_lock, flags);
282 ha->flags.mbox_busy = 0;
283 spin_unlock_irqrestore(&ha->hardware_lock,
284 flags);
285 atomic_dec(&ha->num_pend_mbx_stage2);
286 atomic_dec(&ha->num_pend_mbx_stage3);
287 rval = QLA_ABORTED;
288 goto premature_exit;
290 ql_dbg(ql_dbg_mbx, vha, 0x117a,
291 "cmd=%x Timeout.\n", command);
292 spin_lock_irqsave(&ha->hardware_lock, flags);
293 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
296 } else if (ha->flags.purge_mbox ||
297 chip_reset != ha->chip_reset) {
298 spin_lock_irqsave(&ha->hardware_lock, flags);
299 ha->flags.mbox_busy = 0;
300 spin_unlock_irqrestore(&ha->hardware_lock, flags);
301 atomic_dec(&ha->num_pend_mbx_stage2);
302 atomic_dec(&ha->num_pend_mbx_stage3);
303 rval = QLA_ABORTED;
304 goto premature_exit;
306 atomic_dec(&ha->num_pend_mbx_stage3);
308 if (time_after(jiffies, wait_time + 5 * HZ))
309 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
310 command, jiffies_to_msecs(jiffies - wait_time));
311 } else {
312 ql_dbg(ql_dbg_mbx, vha, 0x1011,
313 "Cmd=%x Polling Mode.\n", command);
315 if (IS_P3P_TYPE(ha)) {
316 if (RD_REG_DWORD(&reg->isp82.hint) &
317 HINT_MBX_INT_PENDING) {
318 ha->flags.mbox_busy = 0;
319 spin_unlock_irqrestore(&ha->hardware_lock,
320 flags);
321 atomic_dec(&ha->num_pend_mbx_stage2);
322 ql_dbg(ql_dbg_mbx, vha, 0x1012,
323 "Pending mailbox timeout, exiting.\n");
324 rval = QLA_FUNCTION_TIMEOUT;
325 goto premature_exit;
327 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
328 } else if (IS_FWI2_CAPABLE(ha))
329 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
330 else
331 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
332 spin_unlock_irqrestore(&ha->hardware_lock, flags);
334 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
335 while (!ha->flags.mbox_int) {
336 if (ha->flags.purge_mbox ||
337 chip_reset != ha->chip_reset) {
338 spin_lock_irqsave(&ha->hardware_lock, flags);
339 ha->flags.mbox_busy = 0;
340 spin_unlock_irqrestore(&ha->hardware_lock,
341 flags);
342 atomic_dec(&ha->num_pend_mbx_stage2);
343 rval = QLA_ABORTED;
344 goto premature_exit;
347 if (time_after(jiffies, wait_time))
348 break;
351 * Check if it's UNLOADING, cause we cannot poll in
352 * this case, or else a NULL pointer dereference
353 * is triggered.
355 if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
356 return QLA_FUNCTION_TIMEOUT;
358 /* Check for pending interrupts. */
359 qla2x00_poll(ha->rsp_q_map[0]);
361 if (!ha->flags.mbox_int &&
362 !(IS_QLA2200(ha) &&
363 command == MBC_LOAD_RISC_RAM_EXTENDED))
364 msleep(10);
365 } /* while */
366 ql_dbg(ql_dbg_mbx, vha, 0x1013,
367 "Waited %d sec.\n",
368 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
370 atomic_dec(&ha->num_pend_mbx_stage2);
372 /* Check whether we timed out */
373 if (ha->flags.mbox_int) {
374 uint16_t *iptr2;
376 ql_dbg(ql_dbg_mbx, vha, 0x1014,
377 "Cmd=%x completed.\n", command);
379 /* Got interrupt. Clear the flag. */
380 ha->flags.mbox_int = 0;
381 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
383 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
384 spin_lock_irqsave(&ha->hardware_lock, flags);
385 ha->flags.mbox_busy = 0;
386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
388 /* Setting Link-Down error */
389 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
390 ha->mcp = NULL;
391 rval = QLA_FUNCTION_FAILED;
392 ql_log(ql_log_warn, vha, 0xd048,
393 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
394 goto premature_exit;
397 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
398 rval = QLA_FUNCTION_FAILED;
400 /* Load return mailbox registers. */
401 iptr2 = mcp->mb;
402 iptr = (uint16_t *)&ha->mailbox_out[0];
403 mboxes = mcp->in_mb;
405 ql_dbg(ql_dbg_mbx, vha, 0x1113,
406 "Mailbox registers (IN):\n");
407 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
408 if (mboxes & BIT_0) {
409 *iptr2 = *iptr;
410 ql_dbg(ql_dbg_mbx, vha, 0x1114,
411 "mbox[%d]->0x%04x\n", cnt, *iptr2);
414 mboxes >>= 1;
415 iptr2++;
416 iptr++;
418 } else {
420 uint16_t mb[8];
421 uint32_t ictrl, host_status, hccr;
422 uint16_t w;
424 if (IS_FWI2_CAPABLE(ha)) {
425 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
426 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
427 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
428 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
429 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
430 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
431 host_status = RD_REG_DWORD(&reg->isp24.host_status);
432 hccr = RD_REG_DWORD(&reg->isp24.hccr);
434 ql_log(ql_log_warn, vha, 0xd04c,
435 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
436 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
437 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
438 mb[7], host_status, hccr);
440 } else {
441 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
442 ictrl = RD_REG_WORD(&reg->isp.ictrl);
443 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
444 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
445 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
447 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
449 /* Capture FW dump only, if PCI device active */
450 if (!pci_channel_offline(vha->hw->pdev)) {
451 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
452 if (w == 0xffff || ictrl == 0xffffffff ||
453 (chip_reset != ha->chip_reset)) {
454 /* This is special case if there is unload
455 * of driver happening and if PCI device go
456 * into bad state due to PCI error condition
457 * then only PCI ERR flag would be set.
458 * we will do premature exit for above case.
460 spin_lock_irqsave(&ha->hardware_lock, flags);
461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(&ha->hardware_lock,
463 flags);
464 rval = QLA_FUNCTION_TIMEOUT;
465 goto premature_exit;
468 /* Attempt to capture firmware dump for further
469 * anallysis of the current formware state. we do not
470 * need to do this if we are intentionally generating
471 * a dump
473 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
474 ha->isp_ops->fw_dump(vha, 0);
475 rval = QLA_FUNCTION_TIMEOUT;
478 spin_lock_irqsave(&ha->hardware_lock, flags);
479 ha->flags.mbox_busy = 0;
480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
482 /* Clean up */
483 ha->mcp = NULL;
485 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
486 ql_dbg(ql_dbg_mbx, vha, 0x101a,
487 "Checking for additional resp interrupt.\n");
489 /* polling mode for non isp_abort commands. */
490 qla2x00_poll(ha->rsp_q_map[0]);
493 if (rval == QLA_FUNCTION_TIMEOUT &&
494 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
495 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
496 ha->flags.eeh_busy) {
497 /* not in dpc. schedule it for dpc to take over. */
498 ql_dbg(ql_dbg_mbx, vha, 0x101b,
499 "Timeout, schedule isp_abort_needed.\n");
501 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
502 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
503 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
504 if (IS_QLA82XX(ha)) {
505 ql_dbg(ql_dbg_mbx, vha, 0x112a,
506 "disabling pause transmit on port "
507 "0 & 1.\n");
508 qla82xx_wr_32(ha,
509 QLA82XX_CRB_NIU + 0x98,
510 CRB_NIU_XG_PAUSE_CTL_P0|
511 CRB_NIU_XG_PAUSE_CTL_P1);
513 ql_log(ql_log_info, base_vha, 0x101c,
514 "Mailbox cmd timeout occurred, cmd=0x%x, "
515 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
516 "abort.\n", command, mcp->mb[0],
517 ha->flags.eeh_busy);
518 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
519 qla2xxx_wake_dpc(vha);
521 } else if (current == ha->dpc_thread) {
522 /* call abort directly since we are in the DPC thread */
523 ql_dbg(ql_dbg_mbx, vha, 0x101d,
524 "Timeout, calling abort_isp.\n");
526 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
527 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
528 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
529 if (IS_QLA82XX(ha)) {
530 ql_dbg(ql_dbg_mbx, vha, 0x112b,
531 "disabling pause transmit on port "
532 "0 & 1.\n");
533 qla82xx_wr_32(ha,
534 QLA82XX_CRB_NIU + 0x98,
535 CRB_NIU_XG_PAUSE_CTL_P0|
536 CRB_NIU_XG_PAUSE_CTL_P1);
538 ql_log(ql_log_info, base_vha, 0x101e,
539 "Mailbox cmd timeout occurred, cmd=0x%x, "
540 "mb[0]=0x%x. Scheduling ISP abort ",
541 command, mcp->mb[0]);
542 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
543 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
544 /* Allow next mbx cmd to come in. */
545 complete(&ha->mbx_cmd_comp);
546 if (ha->isp_ops->abort_isp(vha)) {
547 /* Failed. retry later. */
548 set_bit(ISP_ABORT_NEEDED,
549 &vha->dpc_flags);
551 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
552 ql_dbg(ql_dbg_mbx, vha, 0x101f,
553 "Finished abort_isp.\n");
554 goto mbx_done;
559 premature_exit:
560 /* Allow next mbx cmd to come in. */
561 complete(&ha->mbx_cmd_comp);
563 mbx_done:
564 if (rval == QLA_ABORTED) {
565 ql_log(ql_log_info, vha, 0xd035,
566 "Chip Reset in progress. Purging Mbox cmd=0x%x.\n",
567 mcp->mb[0]);
568 } else if (rval) {
569 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
570 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
571 dev_name(&ha->pdev->dev), 0x1020+0x800,
572 vha->host_no);
573 mboxes = mcp->in_mb;
574 cnt = 4;
575 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
576 if (mboxes & BIT_0) {
577 printk(" mb[%u]=%x", i, mcp->mb[i]);
578 cnt--;
580 pr_warn(" cmd=%x ****\n", command);
582 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
583 ql_dbg(ql_dbg_mbx, vha, 0x1198,
584 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
585 RD_REG_DWORD(&reg->isp24.host_status),
586 RD_REG_DWORD(&reg->isp24.ictrl),
587 RD_REG_DWORD(&reg->isp24.istatus));
588 } else {
589 ql_dbg(ql_dbg_mbx, vha, 0x1206,
590 "ctrl_status=%#x ictrl=%#x istatus=%#x\n",
591 RD_REG_WORD(&reg->isp.ctrl_status),
592 RD_REG_WORD(&reg->isp.ictrl),
593 RD_REG_WORD(&reg->isp.istatus));
595 } else {
596 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
599 return rval;
603 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
604 uint32_t risc_code_size)
606 int rval;
607 struct qla_hw_data *ha = vha->hw;
608 mbx_cmd_t mc;
609 mbx_cmd_t *mcp = &mc;
611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
612 "Entered %s.\n", __func__);
614 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
615 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
616 mcp->mb[8] = MSW(risc_addr);
617 mcp->out_mb = MBX_8|MBX_0;
618 } else {
619 mcp->mb[0] = MBC_LOAD_RISC_RAM;
620 mcp->out_mb = MBX_0;
622 mcp->mb[1] = LSW(risc_addr);
623 mcp->mb[2] = MSW(req_dma);
624 mcp->mb[3] = LSW(req_dma);
625 mcp->mb[6] = MSW(MSD(req_dma));
626 mcp->mb[7] = LSW(MSD(req_dma));
627 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
628 if (IS_FWI2_CAPABLE(ha)) {
629 mcp->mb[4] = MSW(risc_code_size);
630 mcp->mb[5] = LSW(risc_code_size);
631 mcp->out_mb |= MBX_5|MBX_4;
632 } else {
633 mcp->mb[4] = LSW(risc_code_size);
634 mcp->out_mb |= MBX_4;
637 mcp->in_mb = MBX_0;
638 mcp->tov = MBX_TOV_SECONDS;
639 mcp->flags = 0;
640 rval = qla2x00_mailbox_command(vha, mcp);
642 if (rval != QLA_SUCCESS) {
643 ql_dbg(ql_dbg_mbx, vha, 0x1023,
644 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
645 } else {
646 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
647 "Done %s.\n", __func__);
650 return rval;
653 #define EXTENDED_BB_CREDITS BIT_0
654 #define NVME_ENABLE_FLAG BIT_3
655 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
657 uint16_t mb4 = BIT_0;
659 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
660 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
662 return mb4;
665 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
667 uint16_t mb4 = BIT_0;
669 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
670 struct nvram_81xx *nv = ha->nvram;
672 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
675 return mb4;
679 * qla2x00_execute_fw
680 * Start adapter firmware.
682 * Input:
683 * ha = adapter block pointer.
684 * TARGET_QUEUE_LOCK must be released.
685 * ADAPTER_STATE_LOCK must be released.
687 * Returns:
688 * qla2x00 local function return status code.
690 * Context:
691 * Kernel context.
694 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
696 int rval;
697 struct qla_hw_data *ha = vha->hw;
698 mbx_cmd_t mc;
699 mbx_cmd_t *mcp = &mc;
701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
702 "Entered %s.\n", __func__);
704 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
705 mcp->out_mb = MBX_0;
706 mcp->in_mb = MBX_0;
707 if (IS_FWI2_CAPABLE(ha)) {
708 mcp->mb[1] = MSW(risc_addr);
709 mcp->mb[2] = LSW(risc_addr);
710 mcp->mb[3] = 0;
711 mcp->mb[4] = 0;
712 ha->flags.using_lr_setting = 0;
713 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
714 IS_QLA27XX(ha)) {
715 if (ql2xautodetectsfp) {
716 if (ha->flags.detected_lr_sfp) {
717 mcp->mb[4] |=
718 qla25xx_set_sfp_lr_dist(ha);
719 ha->flags.using_lr_setting = 1;
721 } else {
722 struct nvram_81xx *nv = ha->nvram;
723 /* set LR distance if specified in nvram */
724 if (nv->enhanced_features &
725 NEF_LR_DIST_ENABLE) {
726 mcp->mb[4] |=
727 qla25xx_set_nvr_lr_dist(ha);
728 ha->flags.using_lr_setting = 1;
733 if (ql2xnvmeenable && IS_QLA27XX(ha))
734 mcp->mb[4] |= NVME_ENABLE_FLAG;
736 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
737 struct nvram_81xx *nv = ha->nvram;
738 /* set minimum speed if specified in nvram */
739 if (nv->min_link_speed >= 2 &&
740 nv->min_link_speed <= 5) {
741 mcp->mb[4] |= BIT_4;
742 mcp->mb[11] = nv->min_link_speed;
743 mcp->out_mb |= MBX_11;
744 mcp->in_mb |= BIT_5;
745 vha->min_link_speed_feat = nv->min_link_speed;
749 if (ha->flags.exlogins_enabled)
750 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
752 if (ha->flags.exchoffld_enabled)
753 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
755 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
756 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
757 } else {
758 mcp->mb[1] = LSW(risc_addr);
759 mcp->out_mb |= MBX_1;
760 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
761 mcp->mb[2] = 0;
762 mcp->out_mb |= MBX_2;
766 mcp->tov = MBX_TOV_SECONDS;
767 mcp->flags = 0;
768 rval = qla2x00_mailbox_command(vha, mcp);
770 if (rval != QLA_SUCCESS) {
771 ql_dbg(ql_dbg_mbx, vha, 0x1026,
772 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
773 } else {
774 if (IS_FWI2_CAPABLE(ha)) {
775 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
776 ql_dbg(ql_dbg_mbx, vha, 0x119a,
777 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
778 ql_dbg(ql_dbg_mbx, vha, 0x1027,
779 "exchanges=%x.\n", mcp->mb[1]);
780 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
781 ha->max_speed_sup = mcp->mb[2] & BIT_0;
782 ql_dbg(ql_dbg_mbx, vha, 0x119b,
783 "Maximum speed supported=%s.\n",
784 ha->max_speed_sup ? "32Gps" : "16Gps");
785 if (vha->min_link_speed_feat) {
786 ha->min_link_speed = mcp->mb[5];
787 ql_dbg(ql_dbg_mbx, vha, 0x119c,
788 "Minimum speed set=%s.\n",
789 mcp->mb[5] == 5 ? "32Gps" :
790 mcp->mb[5] == 4 ? "16Gps" :
791 mcp->mb[5] == 3 ? "8Gps" :
792 mcp->mb[5] == 2 ? "4Gps" :
793 "unknown");
797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
798 "Done.\n");
801 return rval;
805 * qla_get_exlogin_status
806 * Get extended login status
807 * uses the memory offload control/status Mailbox
809 * Input:
810 * ha: adapter state pointer.
811 * fwopt: firmware options
813 * Returns:
814 * qla2x00 local function status
816 * Context:
817 * Kernel context.
819 #define FETCH_XLOGINS_STAT 0x8
821 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
822 uint16_t *ex_logins_cnt)
824 int rval;
825 mbx_cmd_t mc;
826 mbx_cmd_t *mcp = &mc;
828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
829 "Entered %s\n", __func__);
831 memset(mcp->mb, 0 , sizeof(mcp->mb));
832 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
833 mcp->mb[1] = FETCH_XLOGINS_STAT;
834 mcp->out_mb = MBX_1|MBX_0;
835 mcp->in_mb = MBX_10|MBX_4|MBX_0;
836 mcp->tov = MBX_TOV_SECONDS;
837 mcp->flags = 0;
839 rval = qla2x00_mailbox_command(vha, mcp);
840 if (rval != QLA_SUCCESS) {
841 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
842 } else {
843 *buf_sz = mcp->mb[4];
844 *ex_logins_cnt = mcp->mb[10];
846 ql_log(ql_log_info, vha, 0x1190,
847 "buffer size 0x%x, exchange login count=%d\n",
848 mcp->mb[4], mcp->mb[10]);
850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
851 "Done %s.\n", __func__);
854 return rval;
858 * qla_set_exlogin_mem_cfg
859 * set extended login memory configuration
860 * Mbx needs to be issues before init_cb is set
862 * Input:
863 * ha: adapter state pointer.
864 * buffer: buffer pointer
865 * phys_addr: physical address of buffer
866 * size: size of buffer
867 * TARGET_QUEUE_LOCK must be released
868 * ADAPTER_STATE_LOCK must be release
870 * Returns:
871 * qla2x00 local funxtion status code.
873 * Context:
874 * Kernel context.
876 #define CONFIG_XLOGINS_MEM 0x3
878 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
880 int rval;
881 mbx_cmd_t mc;
882 mbx_cmd_t *mcp = &mc;
883 struct qla_hw_data *ha = vha->hw;
885 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
886 "Entered %s.\n", __func__);
888 memset(mcp->mb, 0 , sizeof(mcp->mb));
889 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
890 mcp->mb[1] = CONFIG_XLOGINS_MEM;
891 mcp->mb[2] = MSW(phys_addr);
892 mcp->mb[3] = LSW(phys_addr);
893 mcp->mb[6] = MSW(MSD(phys_addr));
894 mcp->mb[7] = LSW(MSD(phys_addr));
895 mcp->mb[8] = MSW(ha->exlogin_size);
896 mcp->mb[9] = LSW(ha->exlogin_size);
897 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
898 mcp->in_mb = MBX_11|MBX_0;
899 mcp->tov = MBX_TOV_SECONDS;
900 mcp->flags = 0;
901 rval = qla2x00_mailbox_command(vha, mcp);
902 if (rval != QLA_SUCCESS) {
903 /*EMPTY*/
904 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
905 } else {
906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
907 "Done %s.\n", __func__);
910 return rval;
914 * qla_get_exchoffld_status
915 * Get exchange offload status
916 * uses the memory offload control/status Mailbox
918 * Input:
919 * ha: adapter state pointer.
920 * fwopt: firmware options
922 * Returns:
923 * qla2x00 local function status
925 * Context:
926 * Kernel context.
928 #define FETCH_XCHOFFLD_STAT 0x2
930 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
931 uint16_t *ex_logins_cnt)
933 int rval;
934 mbx_cmd_t mc;
935 mbx_cmd_t *mcp = &mc;
937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
938 "Entered %s\n", __func__);
940 memset(mcp->mb, 0 , sizeof(mcp->mb));
941 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
942 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
943 mcp->out_mb = MBX_1|MBX_0;
944 mcp->in_mb = MBX_10|MBX_4|MBX_0;
945 mcp->tov = MBX_TOV_SECONDS;
946 mcp->flags = 0;
948 rval = qla2x00_mailbox_command(vha, mcp);
949 if (rval != QLA_SUCCESS) {
950 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
951 } else {
952 *buf_sz = mcp->mb[4];
953 *ex_logins_cnt = mcp->mb[10];
955 ql_log(ql_log_info, vha, 0x118e,
956 "buffer size 0x%x, exchange offload count=%d\n",
957 mcp->mb[4], mcp->mb[10]);
959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
960 "Done %s.\n", __func__);
963 return rval;
967 * qla_set_exchoffld_mem_cfg
968 * Set exchange offload memory configuration
969 * Mbx needs to be issues before init_cb is set
971 * Input:
972 * ha: adapter state pointer.
973 * buffer: buffer pointer
974 * phys_addr: physical address of buffer
975 * size: size of buffer
976 * TARGET_QUEUE_LOCK must be released
977 * ADAPTER_STATE_LOCK must be release
979 * Returns:
980 * qla2x00 local funxtion status code.
982 * Context:
983 * Kernel context.
985 #define CONFIG_XCHOFFLD_MEM 0x3
987 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
989 int rval;
990 mbx_cmd_t mc;
991 mbx_cmd_t *mcp = &mc;
992 struct qla_hw_data *ha = vha->hw;
994 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
995 "Entered %s.\n", __func__);
997 memset(mcp->mb, 0 , sizeof(mcp->mb));
998 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
999 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
1000 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1001 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1002 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1003 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1004 mcp->mb[8] = MSW(ha->exchoffld_size);
1005 mcp->mb[9] = LSW(ha->exchoffld_size);
1006 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1007 mcp->in_mb = MBX_11|MBX_0;
1008 mcp->tov = MBX_TOV_SECONDS;
1009 mcp->flags = 0;
1010 rval = qla2x00_mailbox_command(vha, mcp);
1011 if (rval != QLA_SUCCESS) {
1012 /*EMPTY*/
1013 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
1014 } else {
1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
1016 "Done %s.\n", __func__);
1019 return rval;
1023 * qla2x00_get_fw_version
1024 * Get firmware version.
1026 * Input:
1027 * ha: adapter state pointer.
1028 * major: pointer for major number.
1029 * minor: pointer for minor number.
1030 * subminor: pointer for subminor number.
1032 * Returns:
1033 * qla2x00 local function return status code.
1035 * Context:
1036 * Kernel context.
1039 qla2x00_get_fw_version(scsi_qla_host_t *vha)
1041 int rval;
1042 mbx_cmd_t mc;
1043 mbx_cmd_t *mcp = &mc;
1044 struct qla_hw_data *ha = vha->hw;
1046 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
1047 "Entered %s.\n", __func__);
1049 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
1050 mcp->out_mb = MBX_0;
1051 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1052 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1053 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
1054 if (IS_FWI2_CAPABLE(ha))
1055 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
1056 if (IS_QLA27XX(ha))
1057 mcp->in_mb |=
1058 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
1059 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
1061 mcp->flags = 0;
1062 mcp->tov = MBX_TOV_SECONDS;
1063 rval = qla2x00_mailbox_command(vha, mcp);
1064 if (rval != QLA_SUCCESS)
1065 goto failed;
1067 /* Return mailbox data. */
1068 ha->fw_major_version = mcp->mb[1];
1069 ha->fw_minor_version = mcp->mb[2];
1070 ha->fw_subminor_version = mcp->mb[3];
1071 ha->fw_attributes = mcp->mb[6];
1072 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
1073 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1074 else
1075 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1077 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1078 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1079 ha->mpi_version[1] = mcp->mb[11] >> 8;
1080 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1081 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1082 ha->phy_version[0] = mcp->mb[8] & 0xff;
1083 ha->phy_version[1] = mcp->mb[9] >> 8;
1084 ha->phy_version[2] = mcp->mb[9] & 0xff;
1087 if (IS_FWI2_CAPABLE(ha)) {
1088 ha->fw_attributes_h = mcp->mb[15];
1089 ha->fw_attributes_ext[0] = mcp->mb[16];
1090 ha->fw_attributes_ext[1] = mcp->mb[17];
1091 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1092 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1093 __func__, mcp->mb[15], mcp->mb[6]);
1094 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1095 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1096 __func__, mcp->mb[17], mcp->mb[16]);
1098 if (ha->fw_attributes_h & 0x4)
1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1100 "%s: Firmware supports Extended Login 0x%x\n",
1101 __func__, ha->fw_attributes_h);
1103 if (ha->fw_attributes_h & 0x8)
1104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1105 "%s: Firmware supports Exchange Offload 0x%x\n",
1106 __func__, ha->fw_attributes_h);
1109 * FW supports nvme and driver load parameter requested nvme.
1110 * BIT 26 of fw_attributes indicates NVMe support.
1112 if ((ha->fw_attributes_h &
1113 (FW_ATTR_H_NVME | FW_ATTR_H_NVME_UPDATED)) &&
1114 ql2xnvmeenable) {
1115 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1116 vha->flags.nvme_first_burst = 1;
1118 vha->flags.nvme_enabled = 1;
1119 ql_log(ql_log_info, vha, 0xd302,
1120 "%s: FC-NVMe is Enabled (0x%x)\n",
1121 __func__, ha->fw_attributes_h);
1125 if (IS_QLA27XX(ha)) {
1126 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1127 ha->mpi_version[1] = mcp->mb[11] >> 8;
1128 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1129 ha->pep_version[0] = mcp->mb[13] & 0xff;
1130 ha->pep_version[1] = mcp->mb[14] >> 8;
1131 ha->pep_version[2] = mcp->mb[14] & 0xff;
1132 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1133 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1134 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1135 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1138 failed:
1139 if (rval != QLA_SUCCESS) {
1140 /*EMPTY*/
1141 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1142 } else {
1143 /*EMPTY*/
1144 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1145 "Done %s.\n", __func__);
1147 return rval;
1151 * qla2x00_get_fw_options
1152 * Set firmware options.
1154 * Input:
1155 * ha = adapter block pointer.
1156 * fwopt = pointer for firmware options.
1158 * Returns:
1159 * qla2x00 local function return status code.
1161 * Context:
1162 * Kernel context.
1165 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1167 int rval;
1168 mbx_cmd_t mc;
1169 mbx_cmd_t *mcp = &mc;
1171 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1172 "Entered %s.\n", __func__);
1174 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1175 mcp->out_mb = MBX_0;
1176 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1177 mcp->tov = MBX_TOV_SECONDS;
1178 mcp->flags = 0;
1179 rval = qla2x00_mailbox_command(vha, mcp);
1181 if (rval != QLA_SUCCESS) {
1182 /*EMPTY*/
1183 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1184 } else {
1185 fwopts[0] = mcp->mb[0];
1186 fwopts[1] = mcp->mb[1];
1187 fwopts[2] = mcp->mb[2];
1188 fwopts[3] = mcp->mb[3];
1190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1191 "Done %s.\n", __func__);
1194 return rval;
1199 * qla2x00_set_fw_options
1200 * Set firmware options.
1202 * Input:
1203 * ha = adapter block pointer.
1204 * fwopt = pointer for firmware options.
1206 * Returns:
1207 * qla2x00 local function return status code.
1209 * Context:
1210 * Kernel context.
1213 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1215 int rval;
1216 mbx_cmd_t mc;
1217 mbx_cmd_t *mcp = &mc;
1219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1220 "Entered %s.\n", __func__);
1222 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1223 mcp->mb[1] = fwopts[1];
1224 mcp->mb[2] = fwopts[2];
1225 mcp->mb[3] = fwopts[3];
1226 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1227 mcp->in_mb = MBX_0;
1228 if (IS_FWI2_CAPABLE(vha->hw)) {
1229 mcp->in_mb |= MBX_1;
1230 mcp->mb[10] = fwopts[10];
1231 mcp->out_mb |= MBX_10;
1232 } else {
1233 mcp->mb[10] = fwopts[10];
1234 mcp->mb[11] = fwopts[11];
1235 mcp->mb[12] = 0; /* Undocumented, but used */
1236 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1238 mcp->tov = MBX_TOV_SECONDS;
1239 mcp->flags = 0;
1240 rval = qla2x00_mailbox_command(vha, mcp);
1242 fwopts[0] = mcp->mb[0];
1244 if (rval != QLA_SUCCESS) {
1245 /*EMPTY*/
1246 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1247 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1248 } else {
1249 /*EMPTY*/
1250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1251 "Done %s.\n", __func__);
1254 return rval;
1258 * qla2x00_mbx_reg_test
1259 * Mailbox register wrap test.
1261 * Input:
1262 * ha = adapter block pointer.
1263 * TARGET_QUEUE_LOCK must be released.
1264 * ADAPTER_STATE_LOCK must be released.
1266 * Returns:
1267 * qla2x00 local function return status code.
1269 * Context:
1270 * Kernel context.
1273 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1275 int rval;
1276 mbx_cmd_t mc;
1277 mbx_cmd_t *mcp = &mc;
1279 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1280 "Entered %s.\n", __func__);
1282 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1283 mcp->mb[1] = 0xAAAA;
1284 mcp->mb[2] = 0x5555;
1285 mcp->mb[3] = 0xAA55;
1286 mcp->mb[4] = 0x55AA;
1287 mcp->mb[5] = 0xA5A5;
1288 mcp->mb[6] = 0x5A5A;
1289 mcp->mb[7] = 0x2525;
1290 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1291 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1292 mcp->tov = MBX_TOV_SECONDS;
1293 mcp->flags = 0;
1294 rval = qla2x00_mailbox_command(vha, mcp);
1296 if (rval == QLA_SUCCESS) {
1297 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1298 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1299 rval = QLA_FUNCTION_FAILED;
1300 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1301 mcp->mb[7] != 0x2525)
1302 rval = QLA_FUNCTION_FAILED;
1305 if (rval != QLA_SUCCESS) {
1306 /*EMPTY*/
1307 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1308 } else {
1309 /*EMPTY*/
1310 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1311 "Done %s.\n", __func__);
1314 return rval;
1318 * qla2x00_verify_checksum
1319 * Verify firmware checksum.
1321 * Input:
1322 * ha = adapter block pointer.
1323 * TARGET_QUEUE_LOCK must be released.
1324 * ADAPTER_STATE_LOCK must be released.
1326 * Returns:
1327 * qla2x00 local function return status code.
1329 * Context:
1330 * Kernel context.
1333 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1335 int rval;
1336 mbx_cmd_t mc;
1337 mbx_cmd_t *mcp = &mc;
1339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1340 "Entered %s.\n", __func__);
1342 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1343 mcp->out_mb = MBX_0;
1344 mcp->in_mb = MBX_0;
1345 if (IS_FWI2_CAPABLE(vha->hw)) {
1346 mcp->mb[1] = MSW(risc_addr);
1347 mcp->mb[2] = LSW(risc_addr);
1348 mcp->out_mb |= MBX_2|MBX_1;
1349 mcp->in_mb |= MBX_2|MBX_1;
1350 } else {
1351 mcp->mb[1] = LSW(risc_addr);
1352 mcp->out_mb |= MBX_1;
1353 mcp->in_mb |= MBX_1;
1356 mcp->tov = MBX_TOV_SECONDS;
1357 mcp->flags = 0;
1358 rval = qla2x00_mailbox_command(vha, mcp);
1360 if (rval != QLA_SUCCESS) {
1361 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1362 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1363 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1364 } else {
1365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1366 "Done %s.\n", __func__);
1369 return rval;
1373 * qla2x00_issue_iocb
1374 * Issue IOCB using mailbox command
1376 * Input:
1377 * ha = adapter state pointer.
1378 * buffer = buffer pointer.
1379 * phys_addr = physical address of buffer.
1380 * size = size of buffer.
1381 * TARGET_QUEUE_LOCK must be released.
1382 * ADAPTER_STATE_LOCK must be released.
1384 * Returns:
1385 * qla2x00 local function return status code.
1387 * Context:
1388 * Kernel context.
1391 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1392 dma_addr_t phys_addr, size_t size, uint32_t tov)
1394 int rval;
1395 mbx_cmd_t mc;
1396 mbx_cmd_t *mcp = &mc;
1398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1399 "Entered %s.\n", __func__);
1401 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1402 mcp->mb[1] = 0;
1403 mcp->mb[2] = MSW(phys_addr);
1404 mcp->mb[3] = LSW(phys_addr);
1405 mcp->mb[6] = MSW(MSD(phys_addr));
1406 mcp->mb[7] = LSW(MSD(phys_addr));
1407 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1408 mcp->in_mb = MBX_2|MBX_0;
1409 mcp->tov = tov;
1410 mcp->flags = 0;
1411 rval = qla2x00_mailbox_command(vha, mcp);
1413 if (rval != QLA_SUCCESS) {
1414 /*EMPTY*/
1415 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1416 } else {
1417 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1419 /* Mask reserved bits. */
1420 sts_entry->entry_status &=
1421 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1422 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1423 "Done %s.\n", __func__);
1426 return rval;
1430 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1431 size_t size)
1433 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1434 MBX_TOV_SECONDS);
1438 * qla2x00_abort_command
1439 * Abort command aborts a specified IOCB.
1441 * Input:
1442 * ha = adapter block pointer.
1443 * sp = SB structure pointer.
1445 * Returns:
1446 * qla2x00 local function return status code.
1448 * Context:
1449 * Kernel context.
1452 qla2x00_abort_command(srb_t *sp)
1454 unsigned long flags = 0;
1455 int rval;
1456 uint32_t handle = 0;
1457 mbx_cmd_t mc;
1458 mbx_cmd_t *mcp = &mc;
1459 fc_port_t *fcport = sp->fcport;
1460 scsi_qla_host_t *vha = fcport->vha;
1461 struct qla_hw_data *ha = vha->hw;
1462 struct req_que *req;
1463 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1466 "Entered %s.\n", __func__);
1468 if (vha->flags.qpairs_available && sp->qpair)
1469 req = sp->qpair->req;
1470 else
1471 req = vha->req;
1473 spin_lock_irqsave(&ha->hardware_lock, flags);
1474 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1475 if (req->outstanding_cmds[handle] == sp)
1476 break;
1478 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1480 if (handle == req->num_outstanding_cmds) {
1481 /* command not found */
1482 return QLA_FUNCTION_FAILED;
1485 mcp->mb[0] = MBC_ABORT_COMMAND;
1486 if (HAS_EXTENDED_IDS(ha))
1487 mcp->mb[1] = fcport->loop_id;
1488 else
1489 mcp->mb[1] = fcport->loop_id << 8;
1490 mcp->mb[2] = (uint16_t)handle;
1491 mcp->mb[3] = (uint16_t)(handle >> 16);
1492 mcp->mb[6] = (uint16_t)cmd->device->lun;
1493 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1494 mcp->in_mb = MBX_0;
1495 mcp->tov = MBX_TOV_SECONDS;
1496 mcp->flags = 0;
1497 rval = qla2x00_mailbox_command(vha, mcp);
1499 if (rval != QLA_SUCCESS) {
1500 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1501 } else {
1502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1503 "Done %s.\n", __func__);
1506 return rval;
1510 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1512 int rval, rval2;
1513 mbx_cmd_t mc;
1514 mbx_cmd_t *mcp = &mc;
1515 scsi_qla_host_t *vha;
1517 vha = fcport->vha;
1519 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1520 "Entered %s.\n", __func__);
1522 mcp->mb[0] = MBC_ABORT_TARGET;
1523 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1524 if (HAS_EXTENDED_IDS(vha->hw)) {
1525 mcp->mb[1] = fcport->loop_id;
1526 mcp->mb[10] = 0;
1527 mcp->out_mb |= MBX_10;
1528 } else {
1529 mcp->mb[1] = fcport->loop_id << 8;
1531 mcp->mb[2] = vha->hw->loop_reset_delay;
1532 mcp->mb[9] = vha->vp_idx;
1534 mcp->in_mb = MBX_0;
1535 mcp->tov = MBX_TOV_SECONDS;
1536 mcp->flags = 0;
1537 rval = qla2x00_mailbox_command(vha, mcp);
1538 if (rval != QLA_SUCCESS) {
1539 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1540 "Failed=%x.\n", rval);
1543 /* Issue marker IOCB. */
1544 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, 0,
1545 MK_SYNC_ID);
1546 if (rval2 != QLA_SUCCESS) {
1547 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1548 "Failed to issue marker IOCB (%x).\n", rval2);
1549 } else {
1550 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1551 "Done %s.\n", __func__);
1554 return rval;
1558 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1560 int rval, rval2;
1561 mbx_cmd_t mc;
1562 mbx_cmd_t *mcp = &mc;
1563 scsi_qla_host_t *vha;
1565 vha = fcport->vha;
1567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1568 "Entered %s.\n", __func__);
1570 mcp->mb[0] = MBC_LUN_RESET;
1571 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1572 if (HAS_EXTENDED_IDS(vha->hw))
1573 mcp->mb[1] = fcport->loop_id;
1574 else
1575 mcp->mb[1] = fcport->loop_id << 8;
1576 mcp->mb[2] = (u32)l;
1577 mcp->mb[3] = 0;
1578 mcp->mb[9] = vha->vp_idx;
1580 mcp->in_mb = MBX_0;
1581 mcp->tov = MBX_TOV_SECONDS;
1582 mcp->flags = 0;
1583 rval = qla2x00_mailbox_command(vha, mcp);
1584 if (rval != QLA_SUCCESS) {
1585 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1588 /* Issue marker IOCB. */
1589 rval2 = qla2x00_marker(vha, vha->hw->base_qpair, fcport->loop_id, l,
1590 MK_SYNC_ID_LUN);
1591 if (rval2 != QLA_SUCCESS) {
1592 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1593 "Failed to issue marker IOCB (%x).\n", rval2);
1594 } else {
1595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1596 "Done %s.\n", __func__);
1599 return rval;
1603 * qla2x00_get_adapter_id
1604 * Get adapter ID and topology.
1606 * Input:
1607 * ha = adapter block pointer.
1608 * id = pointer for loop ID.
1609 * al_pa = pointer for AL_PA.
1610 * area = pointer for area.
1611 * domain = pointer for domain.
1612 * top = pointer for topology.
1613 * TARGET_QUEUE_LOCK must be released.
1614 * ADAPTER_STATE_LOCK must be released.
1616 * Returns:
1617 * qla2x00 local function return status code.
1619 * Context:
1620 * Kernel context.
1623 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1624 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1626 int rval;
1627 mbx_cmd_t mc;
1628 mbx_cmd_t *mcp = &mc;
1630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1631 "Entered %s.\n", __func__);
1633 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1634 mcp->mb[9] = vha->vp_idx;
1635 mcp->out_mb = MBX_9|MBX_0;
1636 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1637 if (IS_CNA_CAPABLE(vha->hw))
1638 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1639 if (IS_FWI2_CAPABLE(vha->hw))
1640 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1641 if (IS_QLA27XX(vha->hw))
1642 mcp->in_mb |= MBX_15;
1643 mcp->tov = MBX_TOV_SECONDS;
1644 mcp->flags = 0;
1645 rval = qla2x00_mailbox_command(vha, mcp);
1646 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1647 rval = QLA_COMMAND_ERROR;
1648 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1649 rval = QLA_INVALID_COMMAND;
1651 /* Return data. */
1652 *id = mcp->mb[1];
1653 *al_pa = LSB(mcp->mb[2]);
1654 *area = MSB(mcp->mb[2]);
1655 *domain = LSB(mcp->mb[3]);
1656 *top = mcp->mb[6];
1657 *sw_cap = mcp->mb[7];
1659 if (rval != QLA_SUCCESS) {
1660 /*EMPTY*/
1661 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1662 } else {
1663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1664 "Done %s.\n", __func__);
1666 if (IS_CNA_CAPABLE(vha->hw)) {
1667 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1668 vha->fcoe_fcf_idx = mcp->mb[10];
1669 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1670 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1671 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1672 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1673 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1674 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1676 /* If FA-WWN supported */
1677 if (IS_FAWWN_CAPABLE(vha->hw)) {
1678 if (mcp->mb[7] & BIT_14) {
1679 vha->port_name[0] = MSB(mcp->mb[16]);
1680 vha->port_name[1] = LSB(mcp->mb[16]);
1681 vha->port_name[2] = MSB(mcp->mb[17]);
1682 vha->port_name[3] = LSB(mcp->mb[17]);
1683 vha->port_name[4] = MSB(mcp->mb[18]);
1684 vha->port_name[5] = LSB(mcp->mb[18]);
1685 vha->port_name[6] = MSB(mcp->mb[19]);
1686 vha->port_name[7] = LSB(mcp->mb[19]);
1687 fc_host_port_name(vha->host) =
1688 wwn_to_u64(vha->port_name);
1689 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1690 "FA-WWN acquired %016llx\n",
1691 wwn_to_u64(vha->port_name));
1695 if (IS_QLA27XX(vha->hw))
1696 vha->bbcr = mcp->mb[15];
1699 return rval;
1703 * qla2x00_get_retry_cnt
1704 * Get current firmware login retry count and delay.
1706 * Input:
1707 * ha = adapter block pointer.
1708 * retry_cnt = pointer to login retry count.
1709 * tov = pointer to login timeout value.
1711 * Returns:
1712 * qla2x00 local function return status code.
1714 * Context:
1715 * Kernel context.
1718 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1719 uint16_t *r_a_tov)
1721 int rval;
1722 uint16_t ratov;
1723 mbx_cmd_t mc;
1724 mbx_cmd_t *mcp = &mc;
1726 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1727 "Entered %s.\n", __func__);
1729 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1730 mcp->out_mb = MBX_0;
1731 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1732 mcp->tov = MBX_TOV_SECONDS;
1733 mcp->flags = 0;
1734 rval = qla2x00_mailbox_command(vha, mcp);
1736 if (rval != QLA_SUCCESS) {
1737 /*EMPTY*/
1738 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1739 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1740 } else {
1741 /* Convert returned data and check our values. */
1742 *r_a_tov = mcp->mb[3] / 2;
1743 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1744 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1745 /* Update to the larger values */
1746 *retry_cnt = (uint8_t)mcp->mb[1];
1747 *tov = ratov;
1750 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1751 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1754 return rval;
1758 * qla2x00_init_firmware
1759 * Initialize adapter firmware.
1761 * Input:
1762 * ha = adapter block pointer.
1763 * dptr = Initialization control block pointer.
1764 * size = size of initialization control block.
1765 * TARGET_QUEUE_LOCK must be released.
1766 * ADAPTER_STATE_LOCK must be released.
1768 * Returns:
1769 * qla2x00 local function return status code.
1771 * Context:
1772 * Kernel context.
1775 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1777 int rval;
1778 mbx_cmd_t mc;
1779 mbx_cmd_t *mcp = &mc;
1780 struct qla_hw_data *ha = vha->hw;
1782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1783 "Entered %s.\n", __func__);
1785 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1786 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1787 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1789 if (ha->flags.npiv_supported)
1790 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1791 else
1792 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1794 mcp->mb[1] = 0;
1795 mcp->mb[2] = MSW(ha->init_cb_dma);
1796 mcp->mb[3] = LSW(ha->init_cb_dma);
1797 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1798 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1799 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1800 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1801 mcp->mb[1] = BIT_0;
1802 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1803 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1804 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1805 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1806 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1807 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1809 /* 1 and 2 should normally be captured. */
1810 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1811 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1812 /* mb3 is additional info about the installed SFP. */
1813 mcp->in_mb |= MBX_3;
1814 mcp->buf_size = size;
1815 mcp->flags = MBX_DMA_OUT;
1816 mcp->tov = MBX_TOV_SECONDS;
1817 rval = qla2x00_mailbox_command(vha, mcp);
1819 if (rval != QLA_SUCCESS) {
1820 /*EMPTY*/
1821 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1822 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1823 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1824 } else {
1825 if (IS_QLA27XX(ha)) {
1826 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1827 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1828 "Invalid SFP/Validation Failed\n");
1830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1831 "Done %s.\n", __func__);
1834 return rval;
1839 * qla2x00_get_port_database
1840 * Issue normal/enhanced get port database mailbox command
1841 * and copy device name as necessary.
1843 * Input:
1844 * ha = adapter state pointer.
1845 * dev = structure pointer.
1846 * opt = enhanced cmd option byte.
1848 * Returns:
1849 * qla2x00 local function return status code.
1851 * Context:
1852 * Kernel context.
1855 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1857 int rval;
1858 mbx_cmd_t mc;
1859 mbx_cmd_t *mcp = &mc;
1860 port_database_t *pd;
1861 struct port_database_24xx *pd24;
1862 dma_addr_t pd_dma;
1863 struct qla_hw_data *ha = vha->hw;
1865 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1866 "Entered %s.\n", __func__);
1868 pd24 = NULL;
1869 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1870 if (pd == NULL) {
1871 ql_log(ql_log_warn, vha, 0x1050,
1872 "Failed to allocate port database structure.\n");
1873 fcport->query = 0;
1874 return QLA_MEMORY_ALLOC_FAILED;
1877 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1878 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1879 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1880 mcp->mb[2] = MSW(pd_dma);
1881 mcp->mb[3] = LSW(pd_dma);
1882 mcp->mb[6] = MSW(MSD(pd_dma));
1883 mcp->mb[7] = LSW(MSD(pd_dma));
1884 mcp->mb[9] = vha->vp_idx;
1885 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1886 mcp->in_mb = MBX_0;
1887 if (IS_FWI2_CAPABLE(ha)) {
1888 mcp->mb[1] = fcport->loop_id;
1889 mcp->mb[10] = opt;
1890 mcp->out_mb |= MBX_10|MBX_1;
1891 mcp->in_mb |= MBX_1;
1892 } else if (HAS_EXTENDED_IDS(ha)) {
1893 mcp->mb[1] = fcport->loop_id;
1894 mcp->mb[10] = opt;
1895 mcp->out_mb |= MBX_10|MBX_1;
1896 } else {
1897 mcp->mb[1] = fcport->loop_id << 8 | opt;
1898 mcp->out_mb |= MBX_1;
1900 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1901 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1902 mcp->flags = MBX_DMA_IN;
1903 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1904 rval = qla2x00_mailbox_command(vha, mcp);
1905 if (rval != QLA_SUCCESS)
1906 goto gpd_error_out;
1908 if (IS_FWI2_CAPABLE(ha)) {
1909 uint64_t zero = 0;
1910 u8 current_login_state, last_login_state;
1912 pd24 = (struct port_database_24xx *) pd;
1914 /* Check for logged in state. */
1915 if (fcport->fc4f_nvme) {
1916 current_login_state = pd24->current_login_state >> 4;
1917 last_login_state = pd24->last_login_state >> 4;
1918 } else {
1919 current_login_state = pd24->current_login_state & 0xf;
1920 last_login_state = pd24->last_login_state & 0xf;
1922 fcport->current_login_state = pd24->current_login_state;
1923 fcport->last_login_state = pd24->last_login_state;
1925 /* Check for logged in state. */
1926 if (current_login_state != PDS_PRLI_COMPLETE &&
1927 last_login_state != PDS_PRLI_COMPLETE) {
1928 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1929 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1930 current_login_state, last_login_state,
1931 fcport->loop_id);
1932 rval = QLA_FUNCTION_FAILED;
1934 if (!fcport->query)
1935 goto gpd_error_out;
1938 if (fcport->loop_id == FC_NO_LOOP_ID ||
1939 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1940 memcmp(fcport->port_name, pd24->port_name, 8))) {
1941 /* We lost the device mid way. */
1942 rval = QLA_NOT_LOGGED_IN;
1943 goto gpd_error_out;
1946 /* Names are little-endian. */
1947 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1948 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1950 /* Get port_id of device. */
1951 fcport->d_id.b.domain = pd24->port_id[0];
1952 fcport->d_id.b.area = pd24->port_id[1];
1953 fcport->d_id.b.al_pa = pd24->port_id[2];
1954 fcport->d_id.b.rsvd_1 = 0;
1956 /* If not target must be initiator or unknown type. */
1957 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1958 fcport->port_type = FCT_INITIATOR;
1959 else
1960 fcport->port_type = FCT_TARGET;
1962 /* Passback COS information. */
1963 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1964 FC_COS_CLASS2 : FC_COS_CLASS3;
1966 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1967 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1968 } else {
1969 uint64_t zero = 0;
1971 /* Check for logged in state. */
1972 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1973 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1974 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1975 "Unable to verify login-state (%x/%x) - "
1976 "portid=%02x%02x%02x.\n", pd->master_state,
1977 pd->slave_state, fcport->d_id.b.domain,
1978 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1979 rval = QLA_FUNCTION_FAILED;
1980 goto gpd_error_out;
1983 if (fcport->loop_id == FC_NO_LOOP_ID ||
1984 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1985 memcmp(fcport->port_name, pd->port_name, 8))) {
1986 /* We lost the device mid way. */
1987 rval = QLA_NOT_LOGGED_IN;
1988 goto gpd_error_out;
1991 /* Names are little-endian. */
1992 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1993 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1995 /* Get port_id of device. */
1996 fcport->d_id.b.domain = pd->port_id[0];
1997 fcport->d_id.b.area = pd->port_id[3];
1998 fcport->d_id.b.al_pa = pd->port_id[2];
1999 fcport->d_id.b.rsvd_1 = 0;
2001 /* If not target must be initiator or unknown type. */
2002 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
2003 fcport->port_type = FCT_INITIATOR;
2004 else
2005 fcport->port_type = FCT_TARGET;
2007 /* Passback COS information. */
2008 fcport->supported_classes = (pd->options & BIT_4) ?
2009 FC_COS_CLASS2: FC_COS_CLASS3;
2012 gpd_error_out:
2013 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2014 fcport->query = 0;
2016 if (rval != QLA_SUCCESS) {
2017 ql_dbg(ql_dbg_mbx, vha, 0x1052,
2018 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
2019 mcp->mb[0], mcp->mb[1]);
2020 } else {
2021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
2022 "Done %s.\n", __func__);
2025 return rval;
2029 * qla2x00_get_firmware_state
2030 * Get adapter firmware state.
2032 * Input:
2033 * ha = adapter block pointer.
2034 * dptr = pointer for firmware state.
2035 * TARGET_QUEUE_LOCK must be released.
2036 * ADAPTER_STATE_LOCK must be released.
2038 * Returns:
2039 * qla2x00 local function return status code.
2041 * Context:
2042 * Kernel context.
2045 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
2047 int rval;
2048 mbx_cmd_t mc;
2049 mbx_cmd_t *mcp = &mc;
2050 struct qla_hw_data *ha = vha->hw;
2052 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
2053 "Entered %s.\n", __func__);
2055 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
2056 mcp->out_mb = MBX_0;
2057 if (IS_FWI2_CAPABLE(vha->hw))
2058 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2059 else
2060 mcp->in_mb = MBX_1|MBX_0;
2061 mcp->tov = MBX_TOV_SECONDS;
2062 mcp->flags = 0;
2063 rval = qla2x00_mailbox_command(vha, mcp);
2065 /* Return firmware states. */
2066 states[0] = mcp->mb[1];
2067 if (IS_FWI2_CAPABLE(vha->hw)) {
2068 states[1] = mcp->mb[2];
2069 states[2] = mcp->mb[3]; /* SFP info */
2070 states[3] = mcp->mb[4];
2071 states[4] = mcp->mb[5];
2072 states[5] = mcp->mb[6]; /* DPORT status */
2075 if (rval != QLA_SUCCESS) {
2076 /*EMPTY*/
2077 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
2078 } else {
2079 if (IS_QLA27XX(ha)) {
2080 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
2081 ql_dbg(ql_dbg_mbx, vha, 0x119e,
2082 "Invalid SFP/Validation Failed\n");
2084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2085 "Done %s.\n", __func__);
2088 return rval;
2092 * qla2x00_get_port_name
2093 * Issue get port name mailbox command.
2094 * Returned name is in big endian format.
2096 * Input:
2097 * ha = adapter block pointer.
2098 * loop_id = loop ID of device.
2099 * name = pointer for name.
2100 * TARGET_QUEUE_LOCK must be released.
2101 * ADAPTER_STATE_LOCK must be released.
2103 * Returns:
2104 * qla2x00 local function return status code.
2106 * Context:
2107 * Kernel context.
2110 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2111 uint8_t opt)
2113 int rval;
2114 mbx_cmd_t mc;
2115 mbx_cmd_t *mcp = &mc;
2117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2118 "Entered %s.\n", __func__);
2120 mcp->mb[0] = MBC_GET_PORT_NAME;
2121 mcp->mb[9] = vha->vp_idx;
2122 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2123 if (HAS_EXTENDED_IDS(vha->hw)) {
2124 mcp->mb[1] = loop_id;
2125 mcp->mb[10] = opt;
2126 mcp->out_mb |= MBX_10;
2127 } else {
2128 mcp->mb[1] = loop_id << 8 | opt;
2131 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2132 mcp->tov = MBX_TOV_SECONDS;
2133 mcp->flags = 0;
2134 rval = qla2x00_mailbox_command(vha, mcp);
2136 if (rval != QLA_SUCCESS) {
2137 /*EMPTY*/
2138 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2139 } else {
2140 if (name != NULL) {
2141 /* This function returns name in big endian. */
2142 name[0] = MSB(mcp->mb[2]);
2143 name[1] = LSB(mcp->mb[2]);
2144 name[2] = MSB(mcp->mb[3]);
2145 name[3] = LSB(mcp->mb[3]);
2146 name[4] = MSB(mcp->mb[6]);
2147 name[5] = LSB(mcp->mb[6]);
2148 name[6] = MSB(mcp->mb[7]);
2149 name[7] = LSB(mcp->mb[7]);
2152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2153 "Done %s.\n", __func__);
2156 return rval;
2160 * qla24xx_link_initialization
2161 * Issue link initialization mailbox command.
2163 * Input:
2164 * ha = adapter block pointer.
2165 * TARGET_QUEUE_LOCK must be released.
2166 * ADAPTER_STATE_LOCK must be released.
2168 * Returns:
2169 * qla2x00 local function return status code.
2171 * Context:
2172 * Kernel context.
2175 qla24xx_link_initialize(scsi_qla_host_t *vha)
2177 int rval;
2178 mbx_cmd_t mc;
2179 mbx_cmd_t *mcp = &mc;
2181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2182 "Entered %s.\n", __func__);
2184 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2185 return QLA_FUNCTION_FAILED;
2187 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2188 mcp->mb[1] = BIT_4;
2189 if (vha->hw->operating_mode == LOOP)
2190 mcp->mb[1] |= BIT_6;
2191 else
2192 mcp->mb[1] |= BIT_5;
2193 mcp->mb[2] = 0;
2194 mcp->mb[3] = 0;
2195 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2196 mcp->in_mb = MBX_0;
2197 mcp->tov = MBX_TOV_SECONDS;
2198 mcp->flags = 0;
2199 rval = qla2x00_mailbox_command(vha, mcp);
2201 if (rval != QLA_SUCCESS) {
2202 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2203 } else {
2204 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2205 "Done %s.\n", __func__);
2208 return rval;
2212 * qla2x00_lip_reset
2213 * Issue LIP reset mailbox command.
2215 * Input:
2216 * ha = adapter block pointer.
2217 * TARGET_QUEUE_LOCK must be released.
2218 * ADAPTER_STATE_LOCK must be released.
2220 * Returns:
2221 * qla2x00 local function return status code.
2223 * Context:
2224 * Kernel context.
2227 qla2x00_lip_reset(scsi_qla_host_t *vha)
2229 int rval;
2230 mbx_cmd_t mc;
2231 mbx_cmd_t *mcp = &mc;
2233 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2234 "Entered %s.\n", __func__);
2236 if (IS_CNA_CAPABLE(vha->hw)) {
2237 /* Logout across all FCFs. */
2238 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2239 mcp->mb[1] = BIT_1;
2240 mcp->mb[2] = 0;
2241 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2242 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2243 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2244 mcp->mb[1] = BIT_4;
2245 mcp->mb[2] = 0;
2246 mcp->mb[3] = vha->hw->loop_reset_delay;
2247 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2248 } else {
2249 mcp->mb[0] = MBC_LIP_RESET;
2250 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2251 if (HAS_EXTENDED_IDS(vha->hw)) {
2252 mcp->mb[1] = 0x00ff;
2253 mcp->mb[10] = 0;
2254 mcp->out_mb |= MBX_10;
2255 } else {
2256 mcp->mb[1] = 0xff00;
2258 mcp->mb[2] = vha->hw->loop_reset_delay;
2259 mcp->mb[3] = 0;
2261 mcp->in_mb = MBX_0;
2262 mcp->tov = MBX_TOV_SECONDS;
2263 mcp->flags = 0;
2264 rval = qla2x00_mailbox_command(vha, mcp);
2266 if (rval != QLA_SUCCESS) {
2267 /*EMPTY*/
2268 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2269 } else {
2270 /*EMPTY*/
2271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2272 "Done %s.\n", __func__);
2275 return rval;
2279 * qla2x00_send_sns
2280 * Send SNS command.
2282 * Input:
2283 * ha = adapter block pointer.
2284 * sns = pointer for command.
2285 * cmd_size = command size.
2286 * buf_size = response/command size.
2287 * TARGET_QUEUE_LOCK must be released.
2288 * ADAPTER_STATE_LOCK must be released.
2290 * Returns:
2291 * qla2x00 local function return status code.
2293 * Context:
2294 * Kernel context.
2297 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2298 uint16_t cmd_size, size_t buf_size)
2300 int rval;
2301 mbx_cmd_t mc;
2302 mbx_cmd_t *mcp = &mc;
2304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2305 "Entered %s.\n", __func__);
2307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2308 "Retry cnt=%d ratov=%d total tov=%d.\n",
2309 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2311 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2312 mcp->mb[1] = cmd_size;
2313 mcp->mb[2] = MSW(sns_phys_address);
2314 mcp->mb[3] = LSW(sns_phys_address);
2315 mcp->mb[6] = MSW(MSD(sns_phys_address));
2316 mcp->mb[7] = LSW(MSD(sns_phys_address));
2317 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2318 mcp->in_mb = MBX_0|MBX_1;
2319 mcp->buf_size = buf_size;
2320 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2321 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2322 rval = qla2x00_mailbox_command(vha, mcp);
2324 if (rval != QLA_SUCCESS) {
2325 /*EMPTY*/
2326 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2327 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2328 rval, mcp->mb[0], mcp->mb[1]);
2329 } else {
2330 /*EMPTY*/
2331 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2332 "Done %s.\n", __func__);
2335 return rval;
2339 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2340 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2342 int rval;
2344 struct logio_entry_24xx *lg;
2345 dma_addr_t lg_dma;
2346 uint32_t iop[2];
2347 struct qla_hw_data *ha = vha->hw;
2348 struct req_que *req;
2350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2351 "Entered %s.\n", __func__);
2353 if (vha->vp_idx && vha->qpair)
2354 req = vha->qpair->req;
2355 else
2356 req = ha->req_q_map[0];
2358 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2359 if (lg == NULL) {
2360 ql_log(ql_log_warn, vha, 0x1062,
2361 "Failed to allocate login IOCB.\n");
2362 return QLA_MEMORY_ALLOC_FAILED;
2365 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2366 lg->entry_count = 1;
2367 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2368 lg->nport_handle = cpu_to_le16(loop_id);
2369 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2370 if (opt & BIT_0)
2371 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2372 if (opt & BIT_1)
2373 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2374 lg->port_id[0] = al_pa;
2375 lg->port_id[1] = area;
2376 lg->port_id[2] = domain;
2377 lg->vp_index = vha->vp_idx;
2378 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2379 (ha->r_a_tov / 10 * 2) + 2);
2380 if (rval != QLA_SUCCESS) {
2381 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2382 "Failed to issue login IOCB (%x).\n", rval);
2383 } else if (lg->entry_status != 0) {
2384 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2385 "Failed to complete IOCB -- error status (%x).\n",
2386 lg->entry_status);
2387 rval = QLA_FUNCTION_FAILED;
2388 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2389 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2390 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2392 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2393 "Failed to complete IOCB -- completion status (%x) "
2394 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2395 iop[0], iop[1]);
2397 switch (iop[0]) {
2398 case LSC_SCODE_PORTID_USED:
2399 mb[0] = MBS_PORT_ID_USED;
2400 mb[1] = LSW(iop[1]);
2401 break;
2402 case LSC_SCODE_NPORT_USED:
2403 mb[0] = MBS_LOOP_ID_USED;
2404 break;
2405 case LSC_SCODE_NOLINK:
2406 case LSC_SCODE_NOIOCB:
2407 case LSC_SCODE_NOXCB:
2408 case LSC_SCODE_CMD_FAILED:
2409 case LSC_SCODE_NOFABRIC:
2410 case LSC_SCODE_FW_NOT_READY:
2411 case LSC_SCODE_NOT_LOGGED_IN:
2412 case LSC_SCODE_NOPCB:
2413 case LSC_SCODE_ELS_REJECT:
2414 case LSC_SCODE_CMD_PARAM_ERR:
2415 case LSC_SCODE_NONPORT:
2416 case LSC_SCODE_LOGGED_IN:
2417 case LSC_SCODE_NOFLOGI_ACC:
2418 default:
2419 mb[0] = MBS_COMMAND_ERROR;
2420 break;
2422 } else {
2423 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2424 "Done %s.\n", __func__);
2426 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2428 mb[0] = MBS_COMMAND_COMPLETE;
2429 mb[1] = 0;
2430 if (iop[0] & BIT_4) {
2431 if (iop[0] & BIT_8)
2432 mb[1] |= BIT_1;
2433 } else
2434 mb[1] = BIT_0;
2436 /* Passback COS information. */
2437 mb[10] = 0;
2438 if (lg->io_parameter[7] || lg->io_parameter[8])
2439 mb[10] |= BIT_0; /* Class 2. */
2440 if (lg->io_parameter[9] || lg->io_parameter[10])
2441 mb[10] |= BIT_1; /* Class 3. */
2442 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2443 mb[10] |= BIT_7; /* Confirmed Completion
2444 * Allowed
2448 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2450 return rval;
2454 * qla2x00_login_fabric
2455 * Issue login fabric port mailbox command.
2457 * Input:
2458 * ha = adapter block pointer.
2459 * loop_id = device loop ID.
2460 * domain = device domain.
2461 * area = device area.
2462 * al_pa = device AL_PA.
2463 * status = pointer for return status.
2464 * opt = command options.
2465 * TARGET_QUEUE_LOCK must be released.
2466 * ADAPTER_STATE_LOCK must be released.
2468 * Returns:
2469 * qla2x00 local function return status code.
2471 * Context:
2472 * Kernel context.
2475 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2476 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2478 int rval;
2479 mbx_cmd_t mc;
2480 mbx_cmd_t *mcp = &mc;
2481 struct qla_hw_data *ha = vha->hw;
2483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2484 "Entered %s.\n", __func__);
2486 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2487 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2488 if (HAS_EXTENDED_IDS(ha)) {
2489 mcp->mb[1] = loop_id;
2490 mcp->mb[10] = opt;
2491 mcp->out_mb |= MBX_10;
2492 } else {
2493 mcp->mb[1] = (loop_id << 8) | opt;
2495 mcp->mb[2] = domain;
2496 mcp->mb[3] = area << 8 | al_pa;
2498 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2499 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2500 mcp->flags = 0;
2501 rval = qla2x00_mailbox_command(vha, mcp);
2503 /* Return mailbox statuses. */
2504 if (mb != NULL) {
2505 mb[0] = mcp->mb[0];
2506 mb[1] = mcp->mb[1];
2507 mb[2] = mcp->mb[2];
2508 mb[6] = mcp->mb[6];
2509 mb[7] = mcp->mb[7];
2510 /* COS retrieved from Get-Port-Database mailbox command. */
2511 mb[10] = 0;
2514 if (rval != QLA_SUCCESS) {
2515 /* RLU tmp code: need to change main mailbox_command function to
2516 * return ok even when the mailbox completion value is not
2517 * SUCCESS. The caller needs to be responsible to interpret
2518 * the return values of this mailbox command if we're not
2519 * to change too much of the existing code.
2521 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2522 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2523 mcp->mb[0] == 0x4006)
2524 rval = QLA_SUCCESS;
2526 /*EMPTY*/
2527 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2528 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2529 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2530 } else {
2531 /*EMPTY*/
2532 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2533 "Done %s.\n", __func__);
2536 return rval;
2540 * qla2x00_login_local_device
2541 * Issue login loop port mailbox command.
2543 * Input:
2544 * ha = adapter block pointer.
2545 * loop_id = device loop ID.
2546 * opt = command options.
2548 * Returns:
2549 * Return status code.
2551 * Context:
2552 * Kernel context.
2556 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2557 uint16_t *mb_ret, uint8_t opt)
2559 int rval;
2560 mbx_cmd_t mc;
2561 mbx_cmd_t *mcp = &mc;
2562 struct qla_hw_data *ha = vha->hw;
2564 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2565 "Entered %s.\n", __func__);
2567 if (IS_FWI2_CAPABLE(ha))
2568 return qla24xx_login_fabric(vha, fcport->loop_id,
2569 fcport->d_id.b.domain, fcport->d_id.b.area,
2570 fcport->d_id.b.al_pa, mb_ret, opt);
2572 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2573 if (HAS_EXTENDED_IDS(ha))
2574 mcp->mb[1] = fcport->loop_id;
2575 else
2576 mcp->mb[1] = fcport->loop_id << 8;
2577 mcp->mb[2] = opt;
2578 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2579 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2580 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2581 mcp->flags = 0;
2582 rval = qla2x00_mailbox_command(vha, mcp);
2584 /* Return mailbox statuses. */
2585 if (mb_ret != NULL) {
2586 mb_ret[0] = mcp->mb[0];
2587 mb_ret[1] = mcp->mb[1];
2588 mb_ret[6] = mcp->mb[6];
2589 mb_ret[7] = mcp->mb[7];
2592 if (rval != QLA_SUCCESS) {
2593 /* AV tmp code: need to change main mailbox_command function to
2594 * return ok even when the mailbox completion value is not
2595 * SUCCESS. The caller needs to be responsible to interpret
2596 * the return values of this mailbox command if we're not
2597 * to change too much of the existing code.
2599 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2600 rval = QLA_SUCCESS;
2602 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2603 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2604 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2605 } else {
2606 /*EMPTY*/
2607 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2608 "Done %s.\n", __func__);
2611 return (rval);
2615 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2616 uint8_t area, uint8_t al_pa)
2618 int rval;
2619 struct logio_entry_24xx *lg;
2620 dma_addr_t lg_dma;
2621 struct qla_hw_data *ha = vha->hw;
2622 struct req_que *req;
2624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2625 "Entered %s.\n", __func__);
2627 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2628 if (lg == NULL) {
2629 ql_log(ql_log_warn, vha, 0x106e,
2630 "Failed to allocate logout IOCB.\n");
2631 return QLA_MEMORY_ALLOC_FAILED;
2634 req = vha->req;
2635 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2636 lg->entry_count = 1;
2637 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2638 lg->nport_handle = cpu_to_le16(loop_id);
2639 lg->control_flags =
2640 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2641 LCF_FREE_NPORT);
2642 lg->port_id[0] = al_pa;
2643 lg->port_id[1] = area;
2644 lg->port_id[2] = domain;
2645 lg->vp_index = vha->vp_idx;
2646 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2647 (ha->r_a_tov / 10 * 2) + 2);
2648 if (rval != QLA_SUCCESS) {
2649 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2650 "Failed to issue logout IOCB (%x).\n", rval);
2651 } else if (lg->entry_status != 0) {
2652 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2653 "Failed to complete IOCB -- error status (%x).\n",
2654 lg->entry_status);
2655 rval = QLA_FUNCTION_FAILED;
2656 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2657 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2658 "Failed to complete IOCB -- completion status (%x) "
2659 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2660 le32_to_cpu(lg->io_parameter[0]),
2661 le32_to_cpu(lg->io_parameter[1]));
2662 } else {
2663 /*EMPTY*/
2664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2665 "Done %s.\n", __func__);
2668 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2670 return rval;
2674 * qla2x00_fabric_logout
2675 * Issue logout fabric port mailbox command.
2677 * Input:
2678 * ha = adapter block pointer.
2679 * loop_id = device loop ID.
2680 * TARGET_QUEUE_LOCK must be released.
2681 * ADAPTER_STATE_LOCK must be released.
2683 * Returns:
2684 * qla2x00 local function return status code.
2686 * Context:
2687 * Kernel context.
2690 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2691 uint8_t area, uint8_t al_pa)
2693 int rval;
2694 mbx_cmd_t mc;
2695 mbx_cmd_t *mcp = &mc;
2697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2698 "Entered %s.\n", __func__);
2700 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2701 mcp->out_mb = MBX_1|MBX_0;
2702 if (HAS_EXTENDED_IDS(vha->hw)) {
2703 mcp->mb[1] = loop_id;
2704 mcp->mb[10] = 0;
2705 mcp->out_mb |= MBX_10;
2706 } else {
2707 mcp->mb[1] = loop_id << 8;
2710 mcp->in_mb = MBX_1|MBX_0;
2711 mcp->tov = MBX_TOV_SECONDS;
2712 mcp->flags = 0;
2713 rval = qla2x00_mailbox_command(vha, mcp);
2715 if (rval != QLA_SUCCESS) {
2716 /*EMPTY*/
2717 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2718 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2719 } else {
2720 /*EMPTY*/
2721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2722 "Done %s.\n", __func__);
2725 return rval;
2729 * qla2x00_full_login_lip
2730 * Issue full login LIP mailbox command.
2732 * Input:
2733 * ha = adapter block pointer.
2734 * TARGET_QUEUE_LOCK must be released.
2735 * ADAPTER_STATE_LOCK must be released.
2737 * Returns:
2738 * qla2x00 local function return status code.
2740 * Context:
2741 * Kernel context.
2744 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2746 int rval;
2747 mbx_cmd_t mc;
2748 mbx_cmd_t *mcp = &mc;
2750 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2751 "Entered %s.\n", __func__);
2753 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2754 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_4 : 0;
2755 mcp->mb[2] = 0;
2756 mcp->mb[3] = 0;
2757 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2758 mcp->in_mb = MBX_0;
2759 mcp->tov = MBX_TOV_SECONDS;
2760 mcp->flags = 0;
2761 rval = qla2x00_mailbox_command(vha, mcp);
2763 if (rval != QLA_SUCCESS) {
2764 /*EMPTY*/
2765 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2766 } else {
2767 /*EMPTY*/
2768 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2769 "Done %s.\n", __func__);
2772 return rval;
2776 * qla2x00_get_id_list
2778 * Input:
2779 * ha = adapter block pointer.
2781 * Returns:
2782 * qla2x00 local function return status code.
2784 * Context:
2785 * Kernel context.
2788 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2789 uint16_t *entries)
2791 int rval;
2792 mbx_cmd_t mc;
2793 mbx_cmd_t *mcp = &mc;
2795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2796 "Entered %s.\n", __func__);
2798 if (id_list == NULL)
2799 return QLA_FUNCTION_FAILED;
2801 mcp->mb[0] = MBC_GET_ID_LIST;
2802 mcp->out_mb = MBX_0;
2803 if (IS_FWI2_CAPABLE(vha->hw)) {
2804 mcp->mb[2] = MSW(id_list_dma);
2805 mcp->mb[3] = LSW(id_list_dma);
2806 mcp->mb[6] = MSW(MSD(id_list_dma));
2807 mcp->mb[7] = LSW(MSD(id_list_dma));
2808 mcp->mb[8] = 0;
2809 mcp->mb[9] = vha->vp_idx;
2810 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2811 } else {
2812 mcp->mb[1] = MSW(id_list_dma);
2813 mcp->mb[2] = LSW(id_list_dma);
2814 mcp->mb[3] = MSW(MSD(id_list_dma));
2815 mcp->mb[6] = LSW(MSD(id_list_dma));
2816 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2818 mcp->in_mb = MBX_1|MBX_0;
2819 mcp->tov = MBX_TOV_SECONDS;
2820 mcp->flags = 0;
2821 rval = qla2x00_mailbox_command(vha, mcp);
2823 if (rval != QLA_SUCCESS) {
2824 /*EMPTY*/
2825 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2826 } else {
2827 *entries = mcp->mb[1];
2828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2829 "Done %s.\n", __func__);
2832 return rval;
2836 * qla2x00_get_resource_cnts
2837 * Get current firmware resource counts.
2839 * Input:
2840 * ha = adapter block pointer.
2842 * Returns:
2843 * qla2x00 local function return status code.
2845 * Context:
2846 * Kernel context.
2849 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2851 struct qla_hw_data *ha = vha->hw;
2852 int rval;
2853 mbx_cmd_t mc;
2854 mbx_cmd_t *mcp = &mc;
2856 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2857 "Entered %s.\n", __func__);
2859 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2860 mcp->out_mb = MBX_0;
2861 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2862 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2863 mcp->in_mb |= MBX_12;
2864 mcp->tov = MBX_TOV_SECONDS;
2865 mcp->flags = 0;
2866 rval = qla2x00_mailbox_command(vha, mcp);
2868 if (rval != QLA_SUCCESS) {
2869 /*EMPTY*/
2870 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2871 "Failed mb[0]=%x.\n", mcp->mb[0]);
2872 } else {
2873 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2874 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2875 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2876 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2877 mcp->mb[11], mcp->mb[12]);
2879 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2880 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2881 ha->cur_fw_xcb_count = mcp->mb[3];
2882 ha->orig_fw_xcb_count = mcp->mb[6];
2883 ha->cur_fw_iocb_count = mcp->mb[7];
2884 ha->orig_fw_iocb_count = mcp->mb[10];
2885 if (ha->flags.npiv_supported)
2886 ha->max_npiv_vports = mcp->mb[11];
2887 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2888 ha->fw_max_fcf_count = mcp->mb[12];
2891 return (rval);
2895 * qla2x00_get_fcal_position_map
2896 * Get FCAL (LILP) position map using mailbox command
2898 * Input:
2899 * ha = adapter state pointer.
2900 * pos_map = buffer pointer (can be NULL).
2902 * Returns:
2903 * qla2x00 local function return status code.
2905 * Context:
2906 * Kernel context.
2909 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2911 int rval;
2912 mbx_cmd_t mc;
2913 mbx_cmd_t *mcp = &mc;
2914 char *pmap;
2915 dma_addr_t pmap_dma;
2916 struct qla_hw_data *ha = vha->hw;
2918 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2919 "Entered %s.\n", __func__);
2921 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2922 if (pmap == NULL) {
2923 ql_log(ql_log_warn, vha, 0x1080,
2924 "Memory alloc failed.\n");
2925 return QLA_MEMORY_ALLOC_FAILED;
2928 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2929 mcp->mb[2] = MSW(pmap_dma);
2930 mcp->mb[3] = LSW(pmap_dma);
2931 mcp->mb[6] = MSW(MSD(pmap_dma));
2932 mcp->mb[7] = LSW(MSD(pmap_dma));
2933 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2934 mcp->in_mb = MBX_1|MBX_0;
2935 mcp->buf_size = FCAL_MAP_SIZE;
2936 mcp->flags = MBX_DMA_IN;
2937 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2938 rval = qla2x00_mailbox_command(vha, mcp);
2940 if (rval == QLA_SUCCESS) {
2941 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2942 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2943 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2944 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2945 pmap, pmap[0] + 1);
2947 if (pos_map)
2948 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2950 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2952 if (rval != QLA_SUCCESS) {
2953 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2954 } else {
2955 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2956 "Done %s.\n", __func__);
2959 return rval;
2963 * qla2x00_get_link_status
2965 * Input:
2966 * ha = adapter block pointer.
2967 * loop_id = device loop ID.
2968 * ret_buf = pointer to link status return buffer.
2970 * Returns:
2971 * 0 = success.
2972 * BIT_0 = mem alloc error.
2973 * BIT_1 = mailbox error.
2976 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2977 struct link_statistics *stats, dma_addr_t stats_dma)
2979 int rval;
2980 mbx_cmd_t mc;
2981 mbx_cmd_t *mcp = &mc;
2982 uint32_t *iter = (void *)stats;
2983 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2984 struct qla_hw_data *ha = vha->hw;
2986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2987 "Entered %s.\n", __func__);
2989 mcp->mb[0] = MBC_GET_LINK_STATUS;
2990 mcp->mb[2] = MSW(LSD(stats_dma));
2991 mcp->mb[3] = LSW(LSD(stats_dma));
2992 mcp->mb[6] = MSW(MSD(stats_dma));
2993 mcp->mb[7] = LSW(MSD(stats_dma));
2994 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2995 mcp->in_mb = MBX_0;
2996 if (IS_FWI2_CAPABLE(ha)) {
2997 mcp->mb[1] = loop_id;
2998 mcp->mb[4] = 0;
2999 mcp->mb[10] = 0;
3000 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
3001 mcp->in_mb |= MBX_1;
3002 } else if (HAS_EXTENDED_IDS(ha)) {
3003 mcp->mb[1] = loop_id;
3004 mcp->mb[10] = 0;
3005 mcp->out_mb |= MBX_10|MBX_1;
3006 } else {
3007 mcp->mb[1] = loop_id << 8;
3008 mcp->out_mb |= MBX_1;
3010 mcp->tov = MBX_TOV_SECONDS;
3011 mcp->flags = IOCTL_CMD;
3012 rval = qla2x00_mailbox_command(vha, mcp);
3014 if (rval == QLA_SUCCESS) {
3015 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3016 ql_dbg(ql_dbg_mbx, vha, 0x1085,
3017 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3018 rval = QLA_FUNCTION_FAILED;
3019 } else {
3020 /* Re-endianize - firmware data is le32. */
3021 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
3022 "Done %s.\n", __func__);
3023 for ( ; dwords--; iter++)
3024 le32_to_cpus(iter);
3026 } else {
3027 /* Failed. */
3028 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
3031 return rval;
3035 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
3036 dma_addr_t stats_dma, uint16_t options)
3038 int rval;
3039 mbx_cmd_t mc;
3040 mbx_cmd_t *mcp = &mc;
3041 uint32_t *iter, dwords;
3043 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
3044 "Entered %s.\n", __func__);
3046 memset(&mc, 0, sizeof(mc));
3047 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
3048 mc.mb[2] = MSW(stats_dma);
3049 mc.mb[3] = LSW(stats_dma);
3050 mc.mb[6] = MSW(MSD(stats_dma));
3051 mc.mb[7] = LSW(MSD(stats_dma));
3052 mc.mb[8] = sizeof(struct link_statistics) / 4;
3053 mc.mb[9] = cpu_to_le16(vha->vp_idx);
3054 mc.mb[10] = cpu_to_le16(options);
3056 rval = qla24xx_send_mb_cmd(vha, &mc);
3058 if (rval == QLA_SUCCESS) {
3059 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3060 ql_dbg(ql_dbg_mbx, vha, 0x1089,
3061 "Failed mb[0]=%x.\n", mcp->mb[0]);
3062 rval = QLA_FUNCTION_FAILED;
3063 } else {
3064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
3065 "Done %s.\n", __func__);
3066 /* Re-endianize - firmware data is le32. */
3067 dwords = sizeof(struct link_statistics) / 4;
3068 iter = &stats->link_fail_cnt;
3069 for ( ; dwords--; iter++)
3070 le32_to_cpus(iter);
3072 } else {
3073 /* Failed. */
3074 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
3077 return rval;
3081 qla24xx_abort_command(srb_t *sp)
3083 int rval;
3084 unsigned long flags = 0;
3086 struct abort_entry_24xx *abt;
3087 dma_addr_t abt_dma;
3088 uint32_t handle;
3089 fc_port_t *fcport = sp->fcport;
3090 struct scsi_qla_host *vha = fcport->vha;
3091 struct qla_hw_data *ha = vha->hw;
3092 struct req_que *req = vha->req;
3093 struct qla_qpair *qpair = sp->qpair;
3095 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3096 "Entered %s.\n", __func__);
3098 if (vha->flags.qpairs_available && sp->qpair)
3099 req = sp->qpair->req;
3100 else
3101 return QLA_FUNCTION_FAILED;
3103 if (ql2xasynctmfenable)
3104 return qla24xx_async_abort_command(sp);
3106 spin_lock_irqsave(qpair->qp_lock_ptr, flags);
3107 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3108 if (req->outstanding_cmds[handle] == sp)
3109 break;
3111 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
3112 if (handle == req->num_outstanding_cmds) {
3113 /* Command not found. */
3114 return QLA_FUNCTION_FAILED;
3117 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3118 if (abt == NULL) {
3119 ql_log(ql_log_warn, vha, 0x108d,
3120 "Failed to allocate abort IOCB.\n");
3121 return QLA_MEMORY_ALLOC_FAILED;
3124 abt->entry_type = ABORT_IOCB_TYPE;
3125 abt->entry_count = 1;
3126 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3127 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3128 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3129 abt->port_id[0] = fcport->d_id.b.al_pa;
3130 abt->port_id[1] = fcport->d_id.b.area;
3131 abt->port_id[2] = fcport->d_id.b.domain;
3132 abt->vp_index = fcport->vha->vp_idx;
3134 abt->req_que_no = cpu_to_le16(req->id);
3136 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3137 if (rval != QLA_SUCCESS) {
3138 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3139 "Failed to issue IOCB (%x).\n", rval);
3140 } else if (abt->entry_status != 0) {
3141 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3142 "Failed to complete IOCB -- error status (%x).\n",
3143 abt->entry_status);
3144 rval = QLA_FUNCTION_FAILED;
3145 } else if (abt->nport_handle != cpu_to_le16(0)) {
3146 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3147 "Failed to complete IOCB -- completion status (%x).\n",
3148 le16_to_cpu(abt->nport_handle));
3149 if (abt->nport_handle == CS_IOCB_ERROR)
3150 rval = QLA_FUNCTION_PARAMETER_ERROR;
3151 else
3152 rval = QLA_FUNCTION_FAILED;
3153 } else {
3154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3155 "Done %s.\n", __func__);
3158 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3160 return rval;
3163 struct tsk_mgmt_cmd {
3164 union {
3165 struct tsk_mgmt_entry tsk;
3166 struct sts_entry_24xx sts;
3167 } p;
3170 static int
3171 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3172 uint64_t l, int tag)
3174 int rval, rval2;
3175 struct tsk_mgmt_cmd *tsk;
3176 struct sts_entry_24xx *sts;
3177 dma_addr_t tsk_dma;
3178 scsi_qla_host_t *vha;
3179 struct qla_hw_data *ha;
3180 struct req_que *req;
3181 struct qla_qpair *qpair;
3183 vha = fcport->vha;
3184 ha = vha->hw;
3185 req = vha->req;
3187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3188 "Entered %s.\n", __func__);
3190 if (vha->vp_idx && vha->qpair) {
3191 /* NPIV port */
3192 qpair = vha->qpair;
3193 req = qpair->req;
3196 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3197 if (tsk == NULL) {
3198 ql_log(ql_log_warn, vha, 0x1093,
3199 "Failed to allocate task management IOCB.\n");
3200 return QLA_MEMORY_ALLOC_FAILED;
3203 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3204 tsk->p.tsk.entry_count = 1;
3205 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3206 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3207 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3208 tsk->p.tsk.control_flags = cpu_to_le32(type);
3209 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3210 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3211 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3212 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3213 if (type == TCF_LUN_RESET) {
3214 int_to_scsilun(l, &tsk->p.tsk.lun);
3215 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3216 sizeof(tsk->p.tsk.lun));
3219 sts = &tsk->p.sts;
3220 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3221 if (rval != QLA_SUCCESS) {
3222 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3223 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3224 } else if (sts->entry_status != 0) {
3225 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3226 "Failed to complete IOCB -- error status (%x).\n",
3227 sts->entry_status);
3228 rval = QLA_FUNCTION_FAILED;
3229 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3230 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3231 "Failed to complete IOCB -- completion status (%x).\n",
3232 le16_to_cpu(sts->comp_status));
3233 rval = QLA_FUNCTION_FAILED;
3234 } else if (le16_to_cpu(sts->scsi_status) &
3235 SS_RESPONSE_INFO_LEN_VALID) {
3236 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3237 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3238 "Ignoring inconsistent data length -- not enough "
3239 "response info (%d).\n",
3240 le32_to_cpu(sts->rsp_data_len));
3241 } else if (sts->data[3]) {
3242 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3243 "Failed to complete IOCB -- response (%x).\n",
3244 sts->data[3]);
3245 rval = QLA_FUNCTION_FAILED;
3249 /* Issue marker IOCB. */
3250 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3251 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3252 if (rval2 != QLA_SUCCESS) {
3253 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3254 "Failed to issue marker IOCB (%x).\n", rval2);
3255 } else {
3256 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3257 "Done %s.\n", __func__);
3260 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3262 return rval;
3266 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3268 struct qla_hw_data *ha = fcport->vha->hw;
3270 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3271 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3273 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3277 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3279 struct qla_hw_data *ha = fcport->vha->hw;
3281 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3282 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3284 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3288 qla2x00_system_error(scsi_qla_host_t *vha)
3290 int rval;
3291 mbx_cmd_t mc;
3292 mbx_cmd_t *mcp = &mc;
3293 struct qla_hw_data *ha = vha->hw;
3295 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3296 return QLA_FUNCTION_FAILED;
3298 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3299 "Entered %s.\n", __func__);
3301 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3302 mcp->out_mb = MBX_0;
3303 mcp->in_mb = MBX_0;
3304 mcp->tov = 5;
3305 mcp->flags = 0;
3306 rval = qla2x00_mailbox_command(vha, mcp);
3308 if (rval != QLA_SUCCESS) {
3309 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3310 } else {
3311 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3312 "Done %s.\n", __func__);
3315 return rval;
3319 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3321 int rval;
3322 mbx_cmd_t mc;
3323 mbx_cmd_t *mcp = &mc;
3325 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3326 !IS_QLA27XX(vha->hw))
3327 return QLA_FUNCTION_FAILED;
3329 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3330 "Entered %s.\n", __func__);
3332 mcp->mb[0] = MBC_WRITE_SERDES;
3333 mcp->mb[1] = addr;
3334 if (IS_QLA2031(vha->hw))
3335 mcp->mb[2] = data & 0xff;
3336 else
3337 mcp->mb[2] = data;
3339 mcp->mb[3] = 0;
3340 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3341 mcp->in_mb = MBX_0;
3342 mcp->tov = MBX_TOV_SECONDS;
3343 mcp->flags = 0;
3344 rval = qla2x00_mailbox_command(vha, mcp);
3346 if (rval != QLA_SUCCESS) {
3347 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3348 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3349 } else {
3350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3351 "Done %s.\n", __func__);
3354 return rval;
3358 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3360 int rval;
3361 mbx_cmd_t mc;
3362 mbx_cmd_t *mcp = &mc;
3364 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3365 !IS_QLA27XX(vha->hw))
3366 return QLA_FUNCTION_FAILED;
3368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3369 "Entered %s.\n", __func__);
3371 mcp->mb[0] = MBC_READ_SERDES;
3372 mcp->mb[1] = addr;
3373 mcp->mb[3] = 0;
3374 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3375 mcp->in_mb = MBX_1|MBX_0;
3376 mcp->tov = MBX_TOV_SECONDS;
3377 mcp->flags = 0;
3378 rval = qla2x00_mailbox_command(vha, mcp);
3380 if (IS_QLA2031(vha->hw))
3381 *data = mcp->mb[1] & 0xff;
3382 else
3383 *data = mcp->mb[1];
3385 if (rval != QLA_SUCCESS) {
3386 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3387 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3388 } else {
3389 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3390 "Done %s.\n", __func__);
3393 return rval;
3397 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3399 int rval;
3400 mbx_cmd_t mc;
3401 mbx_cmd_t *mcp = &mc;
3403 if (!IS_QLA8044(vha->hw))
3404 return QLA_FUNCTION_FAILED;
3406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3407 "Entered %s.\n", __func__);
3409 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3410 mcp->mb[1] = HCS_WRITE_SERDES;
3411 mcp->mb[3] = LSW(addr);
3412 mcp->mb[4] = MSW(addr);
3413 mcp->mb[5] = LSW(data);
3414 mcp->mb[6] = MSW(data);
3415 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3416 mcp->in_mb = MBX_0;
3417 mcp->tov = MBX_TOV_SECONDS;
3418 mcp->flags = 0;
3419 rval = qla2x00_mailbox_command(vha, mcp);
3421 if (rval != QLA_SUCCESS) {
3422 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3423 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3424 } else {
3425 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3426 "Done %s.\n", __func__);
3429 return rval;
3433 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3435 int rval;
3436 mbx_cmd_t mc;
3437 mbx_cmd_t *mcp = &mc;
3439 if (!IS_QLA8044(vha->hw))
3440 return QLA_FUNCTION_FAILED;
3442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3443 "Entered %s.\n", __func__);
3445 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3446 mcp->mb[1] = HCS_READ_SERDES;
3447 mcp->mb[3] = LSW(addr);
3448 mcp->mb[4] = MSW(addr);
3449 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3450 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3451 mcp->tov = MBX_TOV_SECONDS;
3452 mcp->flags = 0;
3453 rval = qla2x00_mailbox_command(vha, mcp);
3455 *data = mcp->mb[2] << 16 | mcp->mb[1];
3457 if (rval != QLA_SUCCESS) {
3458 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3459 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3460 } else {
3461 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3462 "Done %s.\n", __func__);
3465 return rval;
3469 * qla2x00_set_serdes_params() -
3470 * @vha: HA context
3471 * @sw_em_1g: serial link options
3472 * @sw_em_2g: serial link options
3473 * @sw_em_4g: serial link options
3475 * Returns
3478 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3479 uint16_t sw_em_2g, uint16_t sw_em_4g)
3481 int rval;
3482 mbx_cmd_t mc;
3483 mbx_cmd_t *mcp = &mc;
3485 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3486 "Entered %s.\n", __func__);
3488 mcp->mb[0] = MBC_SERDES_PARAMS;
3489 mcp->mb[1] = BIT_0;
3490 mcp->mb[2] = sw_em_1g | BIT_15;
3491 mcp->mb[3] = sw_em_2g | BIT_15;
3492 mcp->mb[4] = sw_em_4g | BIT_15;
3493 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3494 mcp->in_mb = MBX_0;
3495 mcp->tov = MBX_TOV_SECONDS;
3496 mcp->flags = 0;
3497 rval = qla2x00_mailbox_command(vha, mcp);
3499 if (rval != QLA_SUCCESS) {
3500 /*EMPTY*/
3501 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3502 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3503 } else {
3504 /*EMPTY*/
3505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3506 "Done %s.\n", __func__);
3509 return rval;
3513 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3515 int rval;
3516 mbx_cmd_t mc;
3517 mbx_cmd_t *mcp = &mc;
3519 if (!IS_FWI2_CAPABLE(vha->hw))
3520 return QLA_FUNCTION_FAILED;
3522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3523 "Entered %s.\n", __func__);
3525 mcp->mb[0] = MBC_STOP_FIRMWARE;
3526 mcp->mb[1] = 0;
3527 mcp->out_mb = MBX_1|MBX_0;
3528 mcp->in_mb = MBX_0;
3529 mcp->tov = 5;
3530 mcp->flags = 0;
3531 rval = qla2x00_mailbox_command(vha, mcp);
3533 if (rval != QLA_SUCCESS) {
3534 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3535 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3536 rval = QLA_INVALID_COMMAND;
3537 } else {
3538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3539 "Done %s.\n", __func__);
3542 return rval;
3546 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3547 uint16_t buffers)
3549 int rval;
3550 mbx_cmd_t mc;
3551 mbx_cmd_t *mcp = &mc;
3553 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3554 "Entered %s.\n", __func__);
3556 if (!IS_FWI2_CAPABLE(vha->hw))
3557 return QLA_FUNCTION_FAILED;
3559 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3560 return QLA_FUNCTION_FAILED;
3562 mcp->mb[0] = MBC_TRACE_CONTROL;
3563 mcp->mb[1] = TC_EFT_ENABLE;
3564 mcp->mb[2] = LSW(eft_dma);
3565 mcp->mb[3] = MSW(eft_dma);
3566 mcp->mb[4] = LSW(MSD(eft_dma));
3567 mcp->mb[5] = MSW(MSD(eft_dma));
3568 mcp->mb[6] = buffers;
3569 mcp->mb[7] = TC_AEN_DISABLE;
3570 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3571 mcp->in_mb = MBX_1|MBX_0;
3572 mcp->tov = MBX_TOV_SECONDS;
3573 mcp->flags = 0;
3574 rval = qla2x00_mailbox_command(vha, mcp);
3575 if (rval != QLA_SUCCESS) {
3576 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3577 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3578 rval, mcp->mb[0], mcp->mb[1]);
3579 } else {
3580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3581 "Done %s.\n", __func__);
3584 return rval;
3588 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3590 int rval;
3591 mbx_cmd_t mc;
3592 mbx_cmd_t *mcp = &mc;
3594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3595 "Entered %s.\n", __func__);
3597 if (!IS_FWI2_CAPABLE(vha->hw))
3598 return QLA_FUNCTION_FAILED;
3600 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3601 return QLA_FUNCTION_FAILED;
3603 mcp->mb[0] = MBC_TRACE_CONTROL;
3604 mcp->mb[1] = TC_EFT_DISABLE;
3605 mcp->out_mb = MBX_1|MBX_0;
3606 mcp->in_mb = MBX_1|MBX_0;
3607 mcp->tov = MBX_TOV_SECONDS;
3608 mcp->flags = 0;
3609 rval = qla2x00_mailbox_command(vha, mcp);
3610 if (rval != QLA_SUCCESS) {
3611 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3612 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3613 rval, mcp->mb[0], mcp->mb[1]);
3614 } else {
3615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3616 "Done %s.\n", __func__);
3619 return rval;
3623 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3624 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3626 int rval;
3627 mbx_cmd_t mc;
3628 mbx_cmd_t *mcp = &mc;
3630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3631 "Entered %s.\n", __func__);
3633 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3634 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3635 return QLA_FUNCTION_FAILED;
3637 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3638 return QLA_FUNCTION_FAILED;
3640 mcp->mb[0] = MBC_TRACE_CONTROL;
3641 mcp->mb[1] = TC_FCE_ENABLE;
3642 mcp->mb[2] = LSW(fce_dma);
3643 mcp->mb[3] = MSW(fce_dma);
3644 mcp->mb[4] = LSW(MSD(fce_dma));
3645 mcp->mb[5] = MSW(MSD(fce_dma));
3646 mcp->mb[6] = buffers;
3647 mcp->mb[7] = TC_AEN_DISABLE;
3648 mcp->mb[8] = 0;
3649 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3650 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3651 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3652 MBX_1|MBX_0;
3653 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3654 mcp->tov = MBX_TOV_SECONDS;
3655 mcp->flags = 0;
3656 rval = qla2x00_mailbox_command(vha, mcp);
3657 if (rval != QLA_SUCCESS) {
3658 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3659 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3660 rval, mcp->mb[0], mcp->mb[1]);
3661 } else {
3662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3663 "Done %s.\n", __func__);
3665 if (mb)
3666 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3667 if (dwords)
3668 *dwords = buffers;
3671 return rval;
3675 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3677 int rval;
3678 mbx_cmd_t mc;
3679 mbx_cmd_t *mcp = &mc;
3681 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3682 "Entered %s.\n", __func__);
3684 if (!IS_FWI2_CAPABLE(vha->hw))
3685 return QLA_FUNCTION_FAILED;
3687 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3688 return QLA_FUNCTION_FAILED;
3690 mcp->mb[0] = MBC_TRACE_CONTROL;
3691 mcp->mb[1] = TC_FCE_DISABLE;
3692 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3693 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3694 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3695 MBX_1|MBX_0;
3696 mcp->tov = MBX_TOV_SECONDS;
3697 mcp->flags = 0;
3698 rval = qla2x00_mailbox_command(vha, mcp);
3699 if (rval != QLA_SUCCESS) {
3700 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3701 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3702 rval, mcp->mb[0], mcp->mb[1]);
3703 } else {
3704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3705 "Done %s.\n", __func__);
3707 if (wr)
3708 *wr = (uint64_t) mcp->mb[5] << 48 |
3709 (uint64_t) mcp->mb[4] << 32 |
3710 (uint64_t) mcp->mb[3] << 16 |
3711 (uint64_t) mcp->mb[2];
3712 if (rd)
3713 *rd = (uint64_t) mcp->mb[9] << 48 |
3714 (uint64_t) mcp->mb[8] << 32 |
3715 (uint64_t) mcp->mb[7] << 16 |
3716 (uint64_t) mcp->mb[6];
3719 return rval;
3723 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3724 uint16_t *port_speed, uint16_t *mb)
3726 int rval;
3727 mbx_cmd_t mc;
3728 mbx_cmd_t *mcp = &mc;
3730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3731 "Entered %s.\n", __func__);
3733 if (!IS_IIDMA_CAPABLE(vha->hw))
3734 return QLA_FUNCTION_FAILED;
3736 mcp->mb[0] = MBC_PORT_PARAMS;
3737 mcp->mb[1] = loop_id;
3738 mcp->mb[2] = mcp->mb[3] = 0;
3739 mcp->mb[9] = vha->vp_idx;
3740 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3741 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3742 mcp->tov = MBX_TOV_SECONDS;
3743 mcp->flags = 0;
3744 rval = qla2x00_mailbox_command(vha, mcp);
3746 /* Return mailbox statuses. */
3747 if (mb != NULL) {
3748 mb[0] = mcp->mb[0];
3749 mb[1] = mcp->mb[1];
3750 mb[3] = mcp->mb[3];
3753 if (rval != QLA_SUCCESS) {
3754 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3755 } else {
3756 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3757 "Done %s.\n", __func__);
3758 if (port_speed)
3759 *port_speed = mcp->mb[3];
3762 return rval;
3766 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3767 uint16_t port_speed, uint16_t *mb)
3769 int rval;
3770 mbx_cmd_t mc;
3771 mbx_cmd_t *mcp = &mc;
3773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3774 "Entered %s.\n", __func__);
3776 if (!IS_IIDMA_CAPABLE(vha->hw))
3777 return QLA_FUNCTION_FAILED;
3779 mcp->mb[0] = MBC_PORT_PARAMS;
3780 mcp->mb[1] = loop_id;
3781 mcp->mb[2] = BIT_0;
3782 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3783 mcp->mb[9] = vha->vp_idx;
3784 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3785 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3786 mcp->tov = MBX_TOV_SECONDS;
3787 mcp->flags = 0;
3788 rval = qla2x00_mailbox_command(vha, mcp);
3790 /* Return mailbox statuses. */
3791 if (mb != NULL) {
3792 mb[0] = mcp->mb[0];
3793 mb[1] = mcp->mb[1];
3794 mb[3] = mcp->mb[3];
3797 if (rval != QLA_SUCCESS) {
3798 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3799 "Failed=%x.\n", rval);
3800 } else {
3801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3802 "Done %s.\n", __func__);
3805 return rval;
3808 void
3809 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3810 struct vp_rpt_id_entry_24xx *rptid_entry)
3812 struct qla_hw_data *ha = vha->hw;
3813 scsi_qla_host_t *vp = NULL;
3814 unsigned long flags;
3815 int found;
3816 port_id_t id;
3817 struct fc_port *fcport;
3819 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3820 "Entered %s.\n", __func__);
3822 if (rptid_entry->entry_status != 0)
3823 return;
3825 id.b.domain = rptid_entry->port_id[2];
3826 id.b.area = rptid_entry->port_id[1];
3827 id.b.al_pa = rptid_entry->port_id[0];
3828 id.b.rsvd_1 = 0;
3829 ha->flags.n2n_ae = 0;
3831 if (rptid_entry->format == 0) {
3832 /* loop */
3833 ql_dbg(ql_dbg_async, vha, 0x10b7,
3834 "Format 0 : Number of VPs setup %d, number of "
3835 "VPs acquired %d.\n", rptid_entry->vp_setup,
3836 rptid_entry->vp_acquired);
3837 ql_dbg(ql_dbg_async, vha, 0x10b8,
3838 "Primary port id %02x%02x%02x.\n",
3839 rptid_entry->port_id[2], rptid_entry->port_id[1],
3840 rptid_entry->port_id[0]);
3841 ha->current_topology = ISP_CFG_NL;
3842 qlt_update_host_map(vha, id);
3844 } else if (rptid_entry->format == 1) {
3845 /* fabric */
3846 ql_dbg(ql_dbg_async, vha, 0x10b9,
3847 "Format 1: VP[%d] enabled - status %d - with "
3848 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3849 rptid_entry->vp_status,
3850 rptid_entry->port_id[2], rptid_entry->port_id[1],
3851 rptid_entry->port_id[0]);
3852 ql_dbg(ql_dbg_async, vha, 0x5075,
3853 "Format 1: Remote WWPN %8phC.\n",
3854 rptid_entry->u.f1.port_name);
3856 ql_dbg(ql_dbg_async, vha, 0x5075,
3857 "Format 1: WWPN %8phC.\n",
3858 vha->port_name);
3860 switch (rptid_entry->u.f1.flags & TOPO_MASK) {
3861 case TOPO_N2N:
3862 ha->current_topology = ISP_CFG_N;
3863 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3864 fcport = qla2x00_find_fcport_by_wwpn(vha,
3865 rptid_entry->u.f1.port_name, 1);
3866 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3868 if (fcport) {
3869 fcport->plogi_nack_done_deadline = jiffies + HZ;
3870 fcport->dm_login_expire = jiffies + 3*HZ;
3871 fcport->scan_state = QLA_FCPORT_FOUND;
3872 switch (fcport->disc_state) {
3873 case DSC_DELETED:
3874 set_bit(RELOGIN_NEEDED,
3875 &vha->dpc_flags);
3876 break;
3877 case DSC_DELETE_PEND:
3878 break;
3879 default:
3880 qlt_schedule_sess_for_deletion(fcport);
3881 break;
3883 } else {
3884 id.b24 = 0;
3885 if (wwn_to_u64(vha->port_name) >
3886 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3887 vha->d_id.b24 = 0;
3888 vha->d_id.b.al_pa = 1;
3889 ha->flags.n2n_bigger = 1;
3891 id.b.al_pa = 2;
3892 ql_dbg(ql_dbg_async, vha, 0x5075,
3893 "Format 1: assign local id %x remote id %x\n",
3894 vha->d_id.b24, id.b24);
3895 } else {
3896 ql_dbg(ql_dbg_async, vha, 0x5075,
3897 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3898 rptid_entry->u.f1.port_name);
3899 ha->flags.n2n_bigger = 0;
3901 qla24xx_post_newsess_work(vha, &id,
3902 rptid_entry->u.f1.port_name,
3903 rptid_entry->u.f1.node_name,
3904 NULL,
3905 FC4_TYPE_UNKNOWN);
3908 /* if our portname is higher then initiate N2N login */
3910 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3911 ha->flags.n2n_ae = 1;
3912 return;
3913 break;
3914 case TOPO_FL:
3915 ha->current_topology = ISP_CFG_FL;
3916 break;
3917 case TOPO_F:
3918 ha->current_topology = ISP_CFG_F;
3919 break;
3920 default:
3921 break;
3924 ha->flags.gpsc_supported = 1;
3925 ha->current_topology = ISP_CFG_F;
3926 /* buffer to buffer credit flag */
3927 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3929 if (rptid_entry->vp_idx == 0) {
3930 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3931 /* FA-WWN is only for physical port */
3932 if (qla_ini_mode_enabled(vha) &&
3933 ha->flags.fawwpn_enabled &&
3934 (rptid_entry->u.f1.flags &
3935 BIT_6)) {
3936 memcpy(vha->port_name,
3937 rptid_entry->u.f1.port_name,
3938 WWN_SIZE);
3941 qlt_update_host_map(vha, id);
3944 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3945 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3946 } else {
3947 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3948 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3949 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3950 "Could not acquire ID for VP[%d].\n",
3951 rptid_entry->vp_idx);
3952 return;
3955 found = 0;
3956 spin_lock_irqsave(&ha->vport_slock, flags);
3957 list_for_each_entry(vp, &ha->vp_list, list) {
3958 if (rptid_entry->vp_idx == vp->vp_idx) {
3959 found = 1;
3960 break;
3963 spin_unlock_irqrestore(&ha->vport_slock, flags);
3965 if (!found)
3966 return;
3968 qlt_update_host_map(vp, id);
3971 * Cannot configure here as we are still sitting on the
3972 * response queue. Handle it in dpc context.
3974 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3975 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3976 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3978 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3979 qla2xxx_wake_dpc(vha);
3980 } else if (rptid_entry->format == 2) {
3981 ql_dbg(ql_dbg_async, vha, 0x505f,
3982 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3983 rptid_entry->port_id[2], rptid_entry->port_id[1],
3984 rptid_entry->port_id[0]);
3986 ql_dbg(ql_dbg_async, vha, 0x5075,
3987 "N2N: Remote WWPN %8phC.\n",
3988 rptid_entry->u.f2.port_name);
3990 /* N2N. direct connect */
3991 ha->current_topology = ISP_CFG_N;
3992 ha->flags.rida_fmt2 = 1;
3993 vha->d_id.b.domain = rptid_entry->port_id[2];
3994 vha->d_id.b.area = rptid_entry->port_id[1];
3995 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3997 ha->flags.n2n_ae = 1;
3998 spin_lock_irqsave(&ha->vport_slock, flags);
3999 qlt_update_vp_map(vha, SET_AL_PA);
4000 spin_unlock_irqrestore(&ha->vport_slock, flags);
4002 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4003 fcport->scan_state = QLA_FCPORT_SCAN;
4006 fcport = qla2x00_find_fcport_by_wwpn(vha,
4007 rptid_entry->u.f2.port_name, 1);
4009 if (fcport) {
4010 fcport->login_retry = vha->hw->login_retry_count;
4011 fcport->plogi_nack_done_deadline = jiffies + HZ;
4012 fcport->scan_state = QLA_FCPORT_FOUND;
4018 * qla24xx_modify_vp_config
4019 * Change VP configuration for vha
4021 * Input:
4022 * vha = adapter block pointer.
4024 * Returns:
4025 * qla2xxx local function return status code.
4027 * Context:
4028 * Kernel context.
4031 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
4033 int rval;
4034 struct vp_config_entry_24xx *vpmod;
4035 dma_addr_t vpmod_dma;
4036 struct qla_hw_data *ha = vha->hw;
4037 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4039 /* This can be called by the parent */
4041 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
4042 "Entered %s.\n", __func__);
4044 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4045 if (!vpmod) {
4046 ql_log(ql_log_warn, vha, 0x10bc,
4047 "Failed to allocate modify VP IOCB.\n");
4048 return QLA_MEMORY_ALLOC_FAILED;
4051 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
4052 vpmod->entry_count = 1;
4053 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
4054 vpmod->vp_count = 1;
4055 vpmod->vp_index1 = vha->vp_idx;
4056 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
4058 qlt_modify_vp_config(vha, vpmod);
4060 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
4061 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
4062 vpmod->entry_count = 1;
4064 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
4065 if (rval != QLA_SUCCESS) {
4066 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
4067 "Failed to issue VP config IOCB (%x).\n", rval);
4068 } else if (vpmod->comp_status != 0) {
4069 ql_dbg(ql_dbg_mbx, vha, 0x10be,
4070 "Failed to complete IOCB -- error status (%x).\n",
4071 vpmod->comp_status);
4072 rval = QLA_FUNCTION_FAILED;
4073 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
4074 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
4075 "Failed to complete IOCB -- completion status (%x).\n",
4076 le16_to_cpu(vpmod->comp_status));
4077 rval = QLA_FUNCTION_FAILED;
4078 } else {
4079 /* EMPTY */
4080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
4081 "Done %s.\n", __func__);
4082 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
4084 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4086 return rval;
4090 * qla2x00_send_change_request
4091 * Receive or disable RSCN request from fabric controller
4093 * Input:
4094 * ha = adapter block pointer
4095 * format = registration format:
4096 * 0 - Reserved
4097 * 1 - Fabric detected registration
4098 * 2 - N_port detected registration
4099 * 3 - Full registration
4100 * FF - clear registration
4101 * vp_idx = Virtual port index
4103 * Returns:
4104 * qla2x00 local function return status code.
4106 * Context:
4107 * Kernel Context
4111 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4112 uint16_t vp_idx)
4114 int rval;
4115 mbx_cmd_t mc;
4116 mbx_cmd_t *mcp = &mc;
4118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4119 "Entered %s.\n", __func__);
4121 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4122 mcp->mb[1] = format;
4123 mcp->mb[9] = vp_idx;
4124 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4125 mcp->in_mb = MBX_0|MBX_1;
4126 mcp->tov = MBX_TOV_SECONDS;
4127 mcp->flags = 0;
4128 rval = qla2x00_mailbox_command(vha, mcp);
4130 if (rval == QLA_SUCCESS) {
4131 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4132 rval = BIT_1;
4134 } else
4135 rval = BIT_1;
4137 return rval;
4141 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4142 uint32_t size)
4144 int rval;
4145 mbx_cmd_t mc;
4146 mbx_cmd_t *mcp = &mc;
4148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4149 "Entered %s.\n", __func__);
4151 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4152 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4153 mcp->mb[8] = MSW(addr);
4154 mcp->out_mb = MBX_8|MBX_0;
4155 } else {
4156 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4157 mcp->out_mb = MBX_0;
4159 mcp->mb[1] = LSW(addr);
4160 mcp->mb[2] = MSW(req_dma);
4161 mcp->mb[3] = LSW(req_dma);
4162 mcp->mb[6] = MSW(MSD(req_dma));
4163 mcp->mb[7] = LSW(MSD(req_dma));
4164 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4165 if (IS_FWI2_CAPABLE(vha->hw)) {
4166 mcp->mb[4] = MSW(size);
4167 mcp->mb[5] = LSW(size);
4168 mcp->out_mb |= MBX_5|MBX_4;
4169 } else {
4170 mcp->mb[4] = LSW(size);
4171 mcp->out_mb |= MBX_4;
4174 mcp->in_mb = MBX_0;
4175 mcp->tov = MBX_TOV_SECONDS;
4176 mcp->flags = 0;
4177 rval = qla2x00_mailbox_command(vha, mcp);
4179 if (rval != QLA_SUCCESS) {
4180 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4181 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4182 } else {
4183 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4184 "Done %s.\n", __func__);
4187 return rval;
4189 /* 84XX Support **************************************************************/
4191 struct cs84xx_mgmt_cmd {
4192 union {
4193 struct verify_chip_entry_84xx req;
4194 struct verify_chip_rsp_84xx rsp;
4195 } p;
4199 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4201 int rval, retry;
4202 struct cs84xx_mgmt_cmd *mn;
4203 dma_addr_t mn_dma;
4204 uint16_t options;
4205 unsigned long flags;
4206 struct qla_hw_data *ha = vha->hw;
4208 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4209 "Entered %s.\n", __func__);
4211 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4212 if (mn == NULL) {
4213 return QLA_MEMORY_ALLOC_FAILED;
4216 /* Force Update? */
4217 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4218 /* Diagnostic firmware? */
4219 /* options |= MENLO_DIAG_FW; */
4220 /* We update the firmware with only one data sequence. */
4221 options |= VCO_END_OF_DATA;
4223 do {
4224 retry = 0;
4225 memset(mn, 0, sizeof(*mn));
4226 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4227 mn->p.req.entry_count = 1;
4228 mn->p.req.options = cpu_to_le16(options);
4230 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4231 "Dump of Verify Request.\n");
4232 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4233 (uint8_t *)mn, sizeof(*mn));
4235 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4236 if (rval != QLA_SUCCESS) {
4237 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4238 "Failed to issue verify IOCB (%x).\n", rval);
4239 goto verify_done;
4242 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4243 "Dump of Verify Response.\n");
4244 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4245 (uint8_t *)mn, sizeof(*mn));
4247 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4248 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4249 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4251 "cs=%x fc=%x.\n", status[0], status[1]);
4253 if (status[0] != CS_COMPLETE) {
4254 rval = QLA_FUNCTION_FAILED;
4255 if (!(options & VCO_DONT_UPDATE_FW)) {
4256 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4257 "Firmware update failed. Retrying "
4258 "without update firmware.\n");
4259 options |= VCO_DONT_UPDATE_FW;
4260 options &= ~VCO_FORCE_UPDATE;
4261 retry = 1;
4263 } else {
4264 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4265 "Firmware updated to %x.\n",
4266 le32_to_cpu(mn->p.rsp.fw_ver));
4268 /* NOTE: we only update OP firmware. */
4269 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4270 ha->cs84xx->op_fw_version =
4271 le32_to_cpu(mn->p.rsp.fw_ver);
4272 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4273 flags);
4275 } while (retry);
4277 verify_done:
4278 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4280 if (rval != QLA_SUCCESS) {
4281 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4282 "Failed=%x.\n", rval);
4283 } else {
4284 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4285 "Done %s.\n", __func__);
4288 return rval;
4292 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4294 int rval;
4295 unsigned long flags;
4296 mbx_cmd_t mc;
4297 mbx_cmd_t *mcp = &mc;
4298 struct qla_hw_data *ha = vha->hw;
4300 if (!ha->flags.fw_started)
4301 return QLA_SUCCESS;
4303 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4304 "Entered %s.\n", __func__);
4306 if (IS_SHADOW_REG_CAPABLE(ha))
4307 req->options |= BIT_13;
4309 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4310 mcp->mb[1] = req->options;
4311 mcp->mb[2] = MSW(LSD(req->dma));
4312 mcp->mb[3] = LSW(LSD(req->dma));
4313 mcp->mb[6] = MSW(MSD(req->dma));
4314 mcp->mb[7] = LSW(MSD(req->dma));
4315 mcp->mb[5] = req->length;
4316 if (req->rsp)
4317 mcp->mb[10] = req->rsp->id;
4318 mcp->mb[12] = req->qos;
4319 mcp->mb[11] = req->vp_idx;
4320 mcp->mb[13] = req->rid;
4321 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4322 mcp->mb[15] = 0;
4324 mcp->mb[4] = req->id;
4325 /* que in ptr index */
4326 mcp->mb[8] = 0;
4327 /* que out ptr index */
4328 mcp->mb[9] = *req->out_ptr = 0;
4329 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4330 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4331 mcp->in_mb = MBX_0;
4332 mcp->flags = MBX_DMA_OUT;
4333 mcp->tov = MBX_TOV_SECONDS * 2;
4335 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4336 mcp->in_mb |= MBX_1;
4337 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4338 mcp->out_mb |= MBX_15;
4339 /* debug q create issue in SR-IOV */
4340 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4343 spin_lock_irqsave(&ha->hardware_lock, flags);
4344 if (!(req->options & BIT_0)) {
4345 WRT_REG_DWORD(req->req_q_in, 0);
4346 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4347 WRT_REG_DWORD(req->req_q_out, 0);
4349 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4351 rval = qla2x00_mailbox_command(vha, mcp);
4352 if (rval != QLA_SUCCESS) {
4353 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4354 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4355 } else {
4356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4357 "Done %s.\n", __func__);
4360 return rval;
4364 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4366 int rval;
4367 unsigned long flags;
4368 mbx_cmd_t mc;
4369 mbx_cmd_t *mcp = &mc;
4370 struct qla_hw_data *ha = vha->hw;
4372 if (!ha->flags.fw_started)
4373 return QLA_SUCCESS;
4375 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4376 "Entered %s.\n", __func__);
4378 if (IS_SHADOW_REG_CAPABLE(ha))
4379 rsp->options |= BIT_13;
4381 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4382 mcp->mb[1] = rsp->options;
4383 mcp->mb[2] = MSW(LSD(rsp->dma));
4384 mcp->mb[3] = LSW(LSD(rsp->dma));
4385 mcp->mb[6] = MSW(MSD(rsp->dma));
4386 mcp->mb[7] = LSW(MSD(rsp->dma));
4387 mcp->mb[5] = rsp->length;
4388 mcp->mb[14] = rsp->msix->entry;
4389 mcp->mb[13] = rsp->rid;
4390 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4391 mcp->mb[15] = 0;
4393 mcp->mb[4] = rsp->id;
4394 /* que in ptr index */
4395 mcp->mb[8] = *rsp->in_ptr = 0;
4396 /* que out ptr index */
4397 mcp->mb[9] = 0;
4398 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4399 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4400 mcp->in_mb = MBX_0;
4401 mcp->flags = MBX_DMA_OUT;
4402 mcp->tov = MBX_TOV_SECONDS * 2;
4404 if (IS_QLA81XX(ha)) {
4405 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4406 mcp->in_mb |= MBX_1;
4407 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4408 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4409 mcp->in_mb |= MBX_1;
4410 /* debug q create issue in SR-IOV */
4411 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4414 spin_lock_irqsave(&ha->hardware_lock, flags);
4415 if (!(rsp->options & BIT_0)) {
4416 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4417 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4418 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4421 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4423 rval = qla2x00_mailbox_command(vha, mcp);
4424 if (rval != QLA_SUCCESS) {
4425 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4426 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4427 } else {
4428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4429 "Done %s.\n", __func__);
4432 return rval;
4436 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4438 int rval;
4439 mbx_cmd_t mc;
4440 mbx_cmd_t *mcp = &mc;
4442 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4443 "Entered %s.\n", __func__);
4445 mcp->mb[0] = MBC_IDC_ACK;
4446 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4447 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4448 mcp->in_mb = MBX_0;
4449 mcp->tov = MBX_TOV_SECONDS;
4450 mcp->flags = 0;
4451 rval = qla2x00_mailbox_command(vha, mcp);
4453 if (rval != QLA_SUCCESS) {
4454 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4455 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4456 } else {
4457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4458 "Done %s.\n", __func__);
4461 return rval;
4465 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4467 int rval;
4468 mbx_cmd_t mc;
4469 mbx_cmd_t *mcp = &mc;
4471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4472 "Entered %s.\n", __func__);
4474 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4475 !IS_QLA27XX(vha->hw))
4476 return QLA_FUNCTION_FAILED;
4478 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4479 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4480 mcp->out_mb = MBX_1|MBX_0;
4481 mcp->in_mb = MBX_1|MBX_0;
4482 mcp->tov = MBX_TOV_SECONDS;
4483 mcp->flags = 0;
4484 rval = qla2x00_mailbox_command(vha, mcp);
4486 if (rval != QLA_SUCCESS) {
4487 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4488 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4489 rval, mcp->mb[0], mcp->mb[1]);
4490 } else {
4491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4492 "Done %s.\n", __func__);
4493 *sector_size = mcp->mb[1];
4496 return rval;
4500 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4502 int rval;
4503 mbx_cmd_t mc;
4504 mbx_cmd_t *mcp = &mc;
4506 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4507 !IS_QLA27XX(vha->hw))
4508 return QLA_FUNCTION_FAILED;
4510 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4511 "Entered %s.\n", __func__);
4513 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4514 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4515 FAC_OPT_CMD_WRITE_PROTECT;
4516 mcp->out_mb = MBX_1|MBX_0;
4517 mcp->in_mb = MBX_1|MBX_0;
4518 mcp->tov = MBX_TOV_SECONDS;
4519 mcp->flags = 0;
4520 rval = qla2x00_mailbox_command(vha, mcp);
4522 if (rval != QLA_SUCCESS) {
4523 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4524 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4525 rval, mcp->mb[0], mcp->mb[1]);
4526 } else {
4527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4528 "Done %s.\n", __func__);
4531 return rval;
4535 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4537 int rval;
4538 mbx_cmd_t mc;
4539 mbx_cmd_t *mcp = &mc;
4541 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4542 !IS_QLA27XX(vha->hw))
4543 return QLA_FUNCTION_FAILED;
4545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4546 "Entered %s.\n", __func__);
4548 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4549 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4550 mcp->mb[2] = LSW(start);
4551 mcp->mb[3] = MSW(start);
4552 mcp->mb[4] = LSW(finish);
4553 mcp->mb[5] = MSW(finish);
4554 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4555 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4556 mcp->tov = MBX_TOV_SECONDS;
4557 mcp->flags = 0;
4558 rval = qla2x00_mailbox_command(vha, mcp);
4560 if (rval != QLA_SUCCESS) {
4561 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4562 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4563 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4564 } else {
4565 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4566 "Done %s.\n", __func__);
4569 return rval;
4573 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4575 int rval = 0;
4576 mbx_cmd_t mc;
4577 mbx_cmd_t *mcp = &mc;
4579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4580 "Entered %s.\n", __func__);
4582 mcp->mb[0] = MBC_RESTART_MPI_FW;
4583 mcp->out_mb = MBX_0;
4584 mcp->in_mb = MBX_0|MBX_1;
4585 mcp->tov = MBX_TOV_SECONDS;
4586 mcp->flags = 0;
4587 rval = qla2x00_mailbox_command(vha, mcp);
4589 if (rval != QLA_SUCCESS) {
4590 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4591 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4592 rval, mcp->mb[0], mcp->mb[1]);
4593 } else {
4594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4595 "Done %s.\n", __func__);
4598 return rval;
4602 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4604 int rval;
4605 mbx_cmd_t mc;
4606 mbx_cmd_t *mcp = &mc;
4607 int i;
4608 int len;
4609 uint16_t *str;
4610 struct qla_hw_data *ha = vha->hw;
4612 if (!IS_P3P_TYPE(ha))
4613 return QLA_FUNCTION_FAILED;
4615 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4616 "Entered %s.\n", __func__);
4618 str = (void *)version;
4619 len = strlen(version);
4621 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4622 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4623 mcp->out_mb = MBX_1|MBX_0;
4624 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4625 mcp->mb[i] = cpu_to_le16p(str);
4626 mcp->out_mb |= 1<<i;
4628 for (; i < 16; i++) {
4629 mcp->mb[i] = 0;
4630 mcp->out_mb |= 1<<i;
4632 mcp->in_mb = MBX_1|MBX_0;
4633 mcp->tov = MBX_TOV_SECONDS;
4634 mcp->flags = 0;
4635 rval = qla2x00_mailbox_command(vha, mcp);
4637 if (rval != QLA_SUCCESS) {
4638 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4639 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4640 } else {
4641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4642 "Done %s.\n", __func__);
4645 return rval;
4649 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4651 int rval;
4652 mbx_cmd_t mc;
4653 mbx_cmd_t *mcp = &mc;
4654 int len;
4655 uint16_t dwlen;
4656 uint8_t *str;
4657 dma_addr_t str_dma;
4658 struct qla_hw_data *ha = vha->hw;
4660 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4661 IS_P3P_TYPE(ha))
4662 return QLA_FUNCTION_FAILED;
4664 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4665 "Entered %s.\n", __func__);
4667 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4668 if (!str) {
4669 ql_log(ql_log_warn, vha, 0x117f,
4670 "Failed to allocate driver version param.\n");
4671 return QLA_MEMORY_ALLOC_FAILED;
4674 memcpy(str, "\x7\x3\x11\x0", 4);
4675 dwlen = str[0];
4676 len = dwlen * 4 - 4;
4677 memset(str + 4, 0, len);
4678 if (len > strlen(version))
4679 len = strlen(version);
4680 memcpy(str + 4, version, len);
4682 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4683 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4684 mcp->mb[2] = MSW(LSD(str_dma));
4685 mcp->mb[3] = LSW(LSD(str_dma));
4686 mcp->mb[6] = MSW(MSD(str_dma));
4687 mcp->mb[7] = LSW(MSD(str_dma));
4688 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4689 mcp->in_mb = MBX_1|MBX_0;
4690 mcp->tov = MBX_TOV_SECONDS;
4691 mcp->flags = 0;
4692 rval = qla2x00_mailbox_command(vha, mcp);
4694 if (rval != QLA_SUCCESS) {
4695 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4696 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4697 } else {
4698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4699 "Done %s.\n", __func__);
4702 dma_pool_free(ha->s_dma_pool, str, str_dma);
4704 return rval;
4708 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4709 void *buf, uint16_t bufsiz)
4711 int rval, i;
4712 mbx_cmd_t mc;
4713 mbx_cmd_t *mcp = &mc;
4714 uint32_t *bp;
4716 if (!IS_FWI2_CAPABLE(vha->hw))
4717 return QLA_FUNCTION_FAILED;
4719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4720 "Entered %s.\n", __func__);
4722 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4723 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4724 mcp->mb[2] = MSW(buf_dma);
4725 mcp->mb[3] = LSW(buf_dma);
4726 mcp->mb[6] = MSW(MSD(buf_dma));
4727 mcp->mb[7] = LSW(MSD(buf_dma));
4728 mcp->mb[8] = bufsiz/4;
4729 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4730 mcp->in_mb = MBX_1|MBX_0;
4731 mcp->tov = MBX_TOV_SECONDS;
4732 mcp->flags = 0;
4733 rval = qla2x00_mailbox_command(vha, mcp);
4735 if (rval != QLA_SUCCESS) {
4736 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4737 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4738 } else {
4739 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4740 "Done %s.\n", __func__);
4741 bp = (uint32_t *) buf;
4742 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4743 *bp = le32_to_cpu(*bp);
4746 return rval;
4749 static int
4750 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4752 int rval;
4753 mbx_cmd_t mc;
4754 mbx_cmd_t *mcp = &mc;
4756 if (!IS_FWI2_CAPABLE(vha->hw))
4757 return QLA_FUNCTION_FAILED;
4759 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4760 "Entered %s.\n", __func__);
4762 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4763 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4764 mcp->out_mb = MBX_1|MBX_0;
4765 mcp->in_mb = MBX_1|MBX_0;
4766 mcp->tov = MBX_TOV_SECONDS;
4767 mcp->flags = 0;
4768 rval = qla2x00_mailbox_command(vha, mcp);
4769 *temp = mcp->mb[1];
4771 if (rval != QLA_SUCCESS) {
4772 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4773 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4774 } else {
4775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4776 "Done %s.\n", __func__);
4779 return rval;
4783 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4784 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4786 int rval;
4787 mbx_cmd_t mc;
4788 mbx_cmd_t *mcp = &mc;
4789 struct qla_hw_data *ha = vha->hw;
4791 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4792 "Entered %s.\n", __func__);
4794 if (!IS_FWI2_CAPABLE(ha))
4795 return QLA_FUNCTION_FAILED;
4797 if (len == 1)
4798 opt |= BIT_0;
4800 mcp->mb[0] = MBC_READ_SFP;
4801 mcp->mb[1] = dev;
4802 mcp->mb[2] = MSW(sfp_dma);
4803 mcp->mb[3] = LSW(sfp_dma);
4804 mcp->mb[6] = MSW(MSD(sfp_dma));
4805 mcp->mb[7] = LSW(MSD(sfp_dma));
4806 mcp->mb[8] = len;
4807 mcp->mb[9] = off;
4808 mcp->mb[10] = opt;
4809 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4810 mcp->in_mb = MBX_1|MBX_0;
4811 mcp->tov = MBX_TOV_SECONDS;
4812 mcp->flags = 0;
4813 rval = qla2x00_mailbox_command(vha, mcp);
4815 if (opt & BIT_0)
4816 *sfp = mcp->mb[1];
4818 if (rval != QLA_SUCCESS) {
4819 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4820 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4821 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4822 mcp->mb[1] == 0x22)
4823 /* sfp is not there */
4824 rval = QLA_INTERFACE_ERROR;
4825 } else {
4826 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4827 "Done %s.\n", __func__);
4830 return rval;
4834 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4835 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4837 int rval;
4838 mbx_cmd_t mc;
4839 mbx_cmd_t *mcp = &mc;
4840 struct qla_hw_data *ha = vha->hw;
4842 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4843 "Entered %s.\n", __func__);
4845 if (!IS_FWI2_CAPABLE(ha))
4846 return QLA_FUNCTION_FAILED;
4848 if (len == 1)
4849 opt |= BIT_0;
4851 if (opt & BIT_0)
4852 len = *sfp;
4854 mcp->mb[0] = MBC_WRITE_SFP;
4855 mcp->mb[1] = dev;
4856 mcp->mb[2] = MSW(sfp_dma);
4857 mcp->mb[3] = LSW(sfp_dma);
4858 mcp->mb[6] = MSW(MSD(sfp_dma));
4859 mcp->mb[7] = LSW(MSD(sfp_dma));
4860 mcp->mb[8] = len;
4861 mcp->mb[9] = off;
4862 mcp->mb[10] = opt;
4863 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4864 mcp->in_mb = MBX_1|MBX_0;
4865 mcp->tov = MBX_TOV_SECONDS;
4866 mcp->flags = 0;
4867 rval = qla2x00_mailbox_command(vha, mcp);
4869 if (rval != QLA_SUCCESS) {
4870 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4871 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4872 } else {
4873 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4874 "Done %s.\n", __func__);
4877 return rval;
4881 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4882 uint16_t size_in_bytes, uint16_t *actual_size)
4884 int rval;
4885 mbx_cmd_t mc;
4886 mbx_cmd_t *mcp = &mc;
4888 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4889 "Entered %s.\n", __func__);
4891 if (!IS_CNA_CAPABLE(vha->hw))
4892 return QLA_FUNCTION_FAILED;
4894 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4895 mcp->mb[2] = MSW(stats_dma);
4896 mcp->mb[3] = LSW(stats_dma);
4897 mcp->mb[6] = MSW(MSD(stats_dma));
4898 mcp->mb[7] = LSW(MSD(stats_dma));
4899 mcp->mb[8] = size_in_bytes >> 2;
4900 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4901 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4902 mcp->tov = MBX_TOV_SECONDS;
4903 mcp->flags = 0;
4904 rval = qla2x00_mailbox_command(vha, mcp);
4906 if (rval != QLA_SUCCESS) {
4907 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4908 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4909 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4910 } else {
4911 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4912 "Done %s.\n", __func__);
4915 *actual_size = mcp->mb[2] << 2;
4918 return rval;
4922 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4923 uint16_t size)
4925 int rval;
4926 mbx_cmd_t mc;
4927 mbx_cmd_t *mcp = &mc;
4929 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4930 "Entered %s.\n", __func__);
4932 if (!IS_CNA_CAPABLE(vha->hw))
4933 return QLA_FUNCTION_FAILED;
4935 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4936 mcp->mb[1] = 0;
4937 mcp->mb[2] = MSW(tlv_dma);
4938 mcp->mb[3] = LSW(tlv_dma);
4939 mcp->mb[6] = MSW(MSD(tlv_dma));
4940 mcp->mb[7] = LSW(MSD(tlv_dma));
4941 mcp->mb[8] = size;
4942 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4943 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4944 mcp->tov = MBX_TOV_SECONDS;
4945 mcp->flags = 0;
4946 rval = qla2x00_mailbox_command(vha, mcp);
4948 if (rval != QLA_SUCCESS) {
4949 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4950 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4951 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4952 } else {
4953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4954 "Done %s.\n", __func__);
4957 return rval;
4961 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4963 int rval;
4964 mbx_cmd_t mc;
4965 mbx_cmd_t *mcp = &mc;
4967 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4968 "Entered %s.\n", __func__);
4970 if (!IS_FWI2_CAPABLE(vha->hw))
4971 return QLA_FUNCTION_FAILED;
4973 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4974 mcp->mb[1] = LSW(risc_addr);
4975 mcp->mb[8] = MSW(risc_addr);
4976 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4977 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4978 mcp->tov = 30;
4979 mcp->flags = 0;
4980 rval = qla2x00_mailbox_command(vha, mcp);
4981 if (rval != QLA_SUCCESS) {
4982 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4983 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4984 } else {
4985 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4986 "Done %s.\n", __func__);
4987 *data = mcp->mb[3] << 16 | mcp->mb[2];
4990 return rval;
4994 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4995 uint16_t *mresp)
4997 int rval;
4998 mbx_cmd_t mc;
4999 mbx_cmd_t *mcp = &mc;
5001 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
5002 "Entered %s.\n", __func__);
5004 memset(mcp->mb, 0 , sizeof(mcp->mb));
5005 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
5006 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
5008 /* transfer count */
5009 mcp->mb[10] = LSW(mreq->transfer_size);
5010 mcp->mb[11] = MSW(mreq->transfer_size);
5012 /* send data address */
5013 mcp->mb[14] = LSW(mreq->send_dma);
5014 mcp->mb[15] = MSW(mreq->send_dma);
5015 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5016 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5018 /* receive data address */
5019 mcp->mb[16] = LSW(mreq->rcv_dma);
5020 mcp->mb[17] = MSW(mreq->rcv_dma);
5021 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5022 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5024 /* Iteration count */
5025 mcp->mb[18] = LSW(mreq->iteration_count);
5026 mcp->mb[19] = MSW(mreq->iteration_count);
5028 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
5029 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5030 if (IS_CNA_CAPABLE(vha->hw))
5031 mcp->out_mb |= MBX_2;
5032 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
5034 mcp->buf_size = mreq->transfer_size;
5035 mcp->tov = MBX_TOV_SECONDS;
5036 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5038 rval = qla2x00_mailbox_command(vha, mcp);
5040 if (rval != QLA_SUCCESS) {
5041 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
5042 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
5043 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
5044 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
5045 } else {
5046 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
5047 "Done %s.\n", __func__);
5050 /* Copy mailbox information */
5051 memcpy( mresp, mcp->mb, 64);
5052 return rval;
5056 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
5057 uint16_t *mresp)
5059 int rval;
5060 mbx_cmd_t mc;
5061 mbx_cmd_t *mcp = &mc;
5062 struct qla_hw_data *ha = vha->hw;
5064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
5065 "Entered %s.\n", __func__);
5067 memset(mcp->mb, 0 , sizeof(mcp->mb));
5068 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
5069 /* BIT_6 specifies 64bit address */
5070 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5071 if (IS_CNA_CAPABLE(ha)) {
5072 mcp->mb[2] = vha->fcoe_fcf_idx;
5074 mcp->mb[16] = LSW(mreq->rcv_dma);
5075 mcp->mb[17] = MSW(mreq->rcv_dma);
5076 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5077 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5079 mcp->mb[10] = LSW(mreq->transfer_size);
5081 mcp->mb[14] = LSW(mreq->send_dma);
5082 mcp->mb[15] = MSW(mreq->send_dma);
5083 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5084 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5086 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5087 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5088 if (IS_CNA_CAPABLE(ha))
5089 mcp->out_mb |= MBX_2;
5091 mcp->in_mb = MBX_0;
5092 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5093 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5094 mcp->in_mb |= MBX_1;
5095 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5096 mcp->in_mb |= MBX_3;
5098 mcp->tov = MBX_TOV_SECONDS;
5099 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5100 mcp->buf_size = mreq->transfer_size;
5102 rval = qla2x00_mailbox_command(vha, mcp);
5104 if (rval != QLA_SUCCESS) {
5105 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5106 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5107 rval, mcp->mb[0], mcp->mb[1]);
5108 } else {
5109 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5110 "Done %s.\n", __func__);
5113 /* Copy mailbox information */
5114 memcpy(mresp, mcp->mb, 64);
5115 return rval;
5119 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5121 int rval;
5122 mbx_cmd_t mc;
5123 mbx_cmd_t *mcp = &mc;
5125 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5126 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5128 mcp->mb[0] = MBC_ISP84XX_RESET;
5129 mcp->mb[1] = enable_diagnostic;
5130 mcp->out_mb = MBX_1|MBX_0;
5131 mcp->in_mb = MBX_1|MBX_0;
5132 mcp->tov = MBX_TOV_SECONDS;
5133 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5134 rval = qla2x00_mailbox_command(vha, mcp);
5136 if (rval != QLA_SUCCESS)
5137 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5138 else
5139 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5140 "Done %s.\n", __func__);
5142 return rval;
5146 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5148 int rval;
5149 mbx_cmd_t mc;
5150 mbx_cmd_t *mcp = &mc;
5152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5153 "Entered %s.\n", __func__);
5155 if (!IS_FWI2_CAPABLE(vha->hw))
5156 return QLA_FUNCTION_FAILED;
5158 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5159 mcp->mb[1] = LSW(risc_addr);
5160 mcp->mb[2] = LSW(data);
5161 mcp->mb[3] = MSW(data);
5162 mcp->mb[8] = MSW(risc_addr);
5163 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5164 mcp->in_mb = MBX_0;
5165 mcp->tov = 30;
5166 mcp->flags = 0;
5167 rval = qla2x00_mailbox_command(vha, mcp);
5168 if (rval != QLA_SUCCESS) {
5169 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5170 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5171 } else {
5172 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5173 "Done %s.\n", __func__);
5176 return rval;
5180 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5182 int rval;
5183 uint32_t stat, timer;
5184 uint16_t mb0 = 0;
5185 struct qla_hw_data *ha = vha->hw;
5186 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5188 rval = QLA_SUCCESS;
5190 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5191 "Entered %s.\n", __func__);
5193 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5195 /* Write the MBC data to the registers */
5196 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5197 WRT_REG_WORD(&reg->mailbox1, mb[0]);
5198 WRT_REG_WORD(&reg->mailbox2, mb[1]);
5199 WRT_REG_WORD(&reg->mailbox3, mb[2]);
5200 WRT_REG_WORD(&reg->mailbox4, mb[3]);
5202 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5204 /* Poll for MBC interrupt */
5205 for (timer = 6000000; timer; timer--) {
5206 /* Check for pending interrupts. */
5207 stat = RD_REG_DWORD(&reg->host_status);
5208 if (stat & HSRX_RISC_INT) {
5209 stat &= 0xff;
5211 if (stat == 0x1 || stat == 0x2 ||
5212 stat == 0x10 || stat == 0x11) {
5213 set_bit(MBX_INTERRUPT,
5214 &ha->mbx_cmd_flags);
5215 mb0 = RD_REG_WORD(&reg->mailbox0);
5216 WRT_REG_DWORD(&reg->hccr,
5217 HCCRX_CLR_RISC_INT);
5218 RD_REG_DWORD(&reg->hccr);
5219 break;
5222 udelay(5);
5225 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5226 rval = mb0 & MBS_MASK;
5227 else
5228 rval = QLA_FUNCTION_FAILED;
5230 if (rval != QLA_SUCCESS) {
5231 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5232 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5233 } else {
5234 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5235 "Done %s.\n", __func__);
5238 return rval;
5241 /* Set the specified data rate */
5243 qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
5245 int rval;
5246 mbx_cmd_t mc;
5247 mbx_cmd_t *mcp = &mc;
5248 struct qla_hw_data *ha = vha->hw;
5249 uint16_t val;
5251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5252 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5253 mode);
5255 if (!IS_FWI2_CAPABLE(ha))
5256 return QLA_FUNCTION_FAILED;
5258 memset(mcp, 0, sizeof(*mcp));
5259 switch (ha->set_data_rate) {
5260 case PORT_SPEED_AUTO:
5261 case PORT_SPEED_4GB:
5262 case PORT_SPEED_8GB:
5263 case PORT_SPEED_16GB:
5264 case PORT_SPEED_32GB:
5265 val = ha->set_data_rate;
5266 break;
5267 default:
5268 ql_log(ql_log_warn, vha, 0x1199,
5269 "Unrecognized speed setting:%d. Setting Autoneg\n",
5270 ha->set_data_rate);
5271 val = ha->set_data_rate = PORT_SPEED_AUTO;
5272 break;
5275 mcp->mb[0] = MBC_DATA_RATE;
5276 mcp->mb[1] = mode;
5277 mcp->mb[2] = val;
5279 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5280 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5281 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5282 mcp->in_mb |= MBX_4|MBX_3;
5283 mcp->tov = MBX_TOV_SECONDS;
5284 mcp->flags = 0;
5285 rval = qla2x00_mailbox_command(vha, mcp);
5286 if (rval != QLA_SUCCESS) {
5287 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5288 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5289 } else {
5290 if (mcp->mb[1] != 0x7)
5291 ql_dbg(ql_dbg_mbx, vha, 0x1179,
5292 "Speed set:0x%x\n", mcp->mb[1]);
5294 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5295 "Done %s.\n", __func__);
5298 return rval;
5302 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5304 int rval;
5305 mbx_cmd_t mc;
5306 mbx_cmd_t *mcp = &mc;
5307 struct qla_hw_data *ha = vha->hw;
5309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5310 "Entered %s.\n", __func__);
5312 if (!IS_FWI2_CAPABLE(ha))
5313 return QLA_FUNCTION_FAILED;
5315 mcp->mb[0] = MBC_DATA_RATE;
5316 mcp->mb[1] = QLA_GET_DATA_RATE;
5317 mcp->out_mb = MBX_1|MBX_0;
5318 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5319 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5320 mcp->in_mb |= MBX_3;
5321 mcp->tov = MBX_TOV_SECONDS;
5322 mcp->flags = 0;
5323 rval = qla2x00_mailbox_command(vha, mcp);
5324 if (rval != QLA_SUCCESS) {
5325 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5326 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5327 } else {
5328 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5329 "Done %s.\n", __func__);
5330 if (mcp->mb[1] != 0x7)
5331 ha->link_data_rate = mcp->mb[1];
5334 return rval;
5338 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5340 int rval;
5341 mbx_cmd_t mc;
5342 mbx_cmd_t *mcp = &mc;
5343 struct qla_hw_data *ha = vha->hw;
5345 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5346 "Entered %s.\n", __func__);
5348 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5349 !IS_QLA27XX(ha))
5350 return QLA_FUNCTION_FAILED;
5351 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5352 mcp->out_mb = MBX_0;
5353 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5354 mcp->tov = MBX_TOV_SECONDS;
5355 mcp->flags = 0;
5357 rval = qla2x00_mailbox_command(vha, mcp);
5359 if (rval != QLA_SUCCESS) {
5360 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5361 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5362 } else {
5363 /* Copy all bits to preserve original value */
5364 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5366 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5367 "Done %s.\n", __func__);
5369 return rval;
5373 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5375 int rval;
5376 mbx_cmd_t mc;
5377 mbx_cmd_t *mcp = &mc;
5379 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5380 "Entered %s.\n", __func__);
5382 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5383 /* Copy all bits to preserve original setting */
5384 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5385 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5386 mcp->in_mb = MBX_0;
5387 mcp->tov = MBX_TOV_SECONDS;
5388 mcp->flags = 0;
5389 rval = qla2x00_mailbox_command(vha, mcp);
5391 if (rval != QLA_SUCCESS) {
5392 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5393 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5394 } else
5395 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5396 "Done %s.\n", __func__);
5398 return rval;
5403 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5404 uint16_t *mb)
5406 int rval;
5407 mbx_cmd_t mc;
5408 mbx_cmd_t *mcp = &mc;
5409 struct qla_hw_data *ha = vha->hw;
5411 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5412 "Entered %s.\n", __func__);
5414 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5415 return QLA_FUNCTION_FAILED;
5417 mcp->mb[0] = MBC_PORT_PARAMS;
5418 mcp->mb[1] = loop_id;
5419 if (ha->flags.fcp_prio_enabled)
5420 mcp->mb[2] = BIT_1;
5421 else
5422 mcp->mb[2] = BIT_2;
5423 mcp->mb[4] = priority & 0xf;
5424 mcp->mb[9] = vha->vp_idx;
5425 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5426 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5427 mcp->tov = 30;
5428 mcp->flags = 0;
5429 rval = qla2x00_mailbox_command(vha, mcp);
5430 if (mb != NULL) {
5431 mb[0] = mcp->mb[0];
5432 mb[1] = mcp->mb[1];
5433 mb[3] = mcp->mb[3];
5434 mb[4] = mcp->mb[4];
5437 if (rval != QLA_SUCCESS) {
5438 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5439 } else {
5440 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5441 "Done %s.\n", __func__);
5444 return rval;
5448 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5450 int rval = QLA_FUNCTION_FAILED;
5451 struct qla_hw_data *ha = vha->hw;
5452 uint8_t byte;
5454 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5455 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5456 "Thermal not supported by this card.\n");
5457 return rval;
5460 if (IS_QLA25XX(ha)) {
5461 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5462 ha->pdev->subsystem_device == 0x0175) {
5463 rval = qla2x00_read_sfp(vha, 0, &byte,
5464 0x98, 0x1, 1, BIT_13|BIT_0);
5465 *temp = byte;
5466 return rval;
5468 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5469 ha->pdev->subsystem_device == 0x338e) {
5470 rval = qla2x00_read_sfp(vha, 0, &byte,
5471 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5472 *temp = byte;
5473 return rval;
5475 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5476 "Thermal not supported by this card.\n");
5477 return rval;
5480 if (IS_QLA82XX(ha)) {
5481 *temp = qla82xx_read_temperature(vha);
5482 rval = QLA_SUCCESS;
5483 return rval;
5484 } else if (IS_QLA8044(ha)) {
5485 *temp = qla8044_read_temperature(vha);
5486 rval = QLA_SUCCESS;
5487 return rval;
5490 rval = qla2x00_read_asic_temperature(vha, temp);
5491 return rval;
5495 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5497 int rval;
5498 struct qla_hw_data *ha = vha->hw;
5499 mbx_cmd_t mc;
5500 mbx_cmd_t *mcp = &mc;
5502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5503 "Entered %s.\n", __func__);
5505 if (!IS_FWI2_CAPABLE(ha))
5506 return QLA_FUNCTION_FAILED;
5508 memset(mcp, 0, sizeof(mbx_cmd_t));
5509 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5510 mcp->mb[1] = 1;
5512 mcp->out_mb = MBX_1|MBX_0;
5513 mcp->in_mb = MBX_0;
5514 mcp->tov = 30;
5515 mcp->flags = 0;
5517 rval = qla2x00_mailbox_command(vha, mcp);
5518 if (rval != QLA_SUCCESS) {
5519 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5520 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5521 } else {
5522 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5523 "Done %s.\n", __func__);
5526 return rval;
5530 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5532 int rval;
5533 struct qla_hw_data *ha = vha->hw;
5534 mbx_cmd_t mc;
5535 mbx_cmd_t *mcp = &mc;
5537 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5538 "Entered %s.\n", __func__);
5540 if (!IS_P3P_TYPE(ha))
5541 return QLA_FUNCTION_FAILED;
5543 memset(mcp, 0, sizeof(mbx_cmd_t));
5544 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5545 mcp->mb[1] = 0;
5547 mcp->out_mb = MBX_1|MBX_0;
5548 mcp->in_mb = MBX_0;
5549 mcp->tov = 30;
5550 mcp->flags = 0;
5552 rval = qla2x00_mailbox_command(vha, mcp);
5553 if (rval != QLA_SUCCESS) {
5554 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5555 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5556 } else {
5557 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5558 "Done %s.\n", __func__);
5561 return rval;
5565 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5567 struct qla_hw_data *ha = vha->hw;
5568 mbx_cmd_t mc;
5569 mbx_cmd_t *mcp = &mc;
5570 int rval = QLA_FUNCTION_FAILED;
5572 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5573 "Entered %s.\n", __func__);
5575 memset(mcp->mb, 0 , sizeof(mcp->mb));
5576 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5577 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5578 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5579 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5581 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5582 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5583 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5585 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5586 mcp->tov = MBX_TOV_SECONDS;
5587 rval = qla2x00_mailbox_command(vha, mcp);
5589 /* Always copy back return mailbox values. */
5590 if (rval != QLA_SUCCESS) {
5591 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5592 "mailbox command FAILED=0x%x, subcode=%x.\n",
5593 (mcp->mb[1] << 16) | mcp->mb[0],
5594 (mcp->mb[3] << 16) | mcp->mb[2]);
5595 } else {
5596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5597 "Done %s.\n", __func__);
5598 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5599 if (!ha->md_template_size) {
5600 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5601 "Null template size obtained.\n");
5602 rval = QLA_FUNCTION_FAILED;
5605 return rval;
5609 qla82xx_md_get_template(scsi_qla_host_t *vha)
5611 struct qla_hw_data *ha = vha->hw;
5612 mbx_cmd_t mc;
5613 mbx_cmd_t *mcp = &mc;
5614 int rval = QLA_FUNCTION_FAILED;
5616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5617 "Entered %s.\n", __func__);
5619 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5620 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5621 if (!ha->md_tmplt_hdr) {
5622 ql_log(ql_log_warn, vha, 0x1124,
5623 "Unable to allocate memory for Minidump template.\n");
5624 return rval;
5627 memset(mcp->mb, 0 , sizeof(mcp->mb));
5628 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5629 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5630 mcp->mb[2] = LSW(RQST_TMPLT);
5631 mcp->mb[3] = MSW(RQST_TMPLT);
5632 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5633 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5634 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5635 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5636 mcp->mb[8] = LSW(ha->md_template_size);
5637 mcp->mb[9] = MSW(ha->md_template_size);
5639 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5640 mcp->tov = MBX_TOV_SECONDS;
5641 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5642 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5643 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5644 rval = qla2x00_mailbox_command(vha, mcp);
5646 if (rval != QLA_SUCCESS) {
5647 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5648 "mailbox command FAILED=0x%x, subcode=%x.\n",
5649 ((mcp->mb[1] << 16) | mcp->mb[0]),
5650 ((mcp->mb[3] << 16) | mcp->mb[2]));
5651 } else
5652 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5653 "Done %s.\n", __func__);
5654 return rval;
5658 qla8044_md_get_template(scsi_qla_host_t *vha)
5660 struct qla_hw_data *ha = vha->hw;
5661 mbx_cmd_t mc;
5662 mbx_cmd_t *mcp = &mc;
5663 int rval = QLA_FUNCTION_FAILED;
5664 int offset = 0, size = MINIDUMP_SIZE_36K;
5665 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5666 "Entered %s.\n", __func__);
5668 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5669 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5670 if (!ha->md_tmplt_hdr) {
5671 ql_log(ql_log_warn, vha, 0xb11b,
5672 "Unable to allocate memory for Minidump template.\n");
5673 return rval;
5676 memset(mcp->mb, 0 , sizeof(mcp->mb));
5677 while (offset < ha->md_template_size) {
5678 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5679 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5680 mcp->mb[2] = LSW(RQST_TMPLT);
5681 mcp->mb[3] = MSW(RQST_TMPLT);
5682 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5683 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5684 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5685 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5686 mcp->mb[8] = LSW(size);
5687 mcp->mb[9] = MSW(size);
5688 mcp->mb[10] = offset & 0x0000FFFF;
5689 mcp->mb[11] = offset & 0xFFFF0000;
5690 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5691 mcp->tov = MBX_TOV_SECONDS;
5692 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5693 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5694 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5695 rval = qla2x00_mailbox_command(vha, mcp);
5697 if (rval != QLA_SUCCESS) {
5698 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5699 "mailbox command FAILED=0x%x, subcode=%x.\n",
5700 ((mcp->mb[1] << 16) | mcp->mb[0]),
5701 ((mcp->mb[3] << 16) | mcp->mb[2]));
5702 return rval;
5703 } else
5704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5705 "Done %s.\n", __func__);
5706 offset = offset + size;
5708 return rval;
5712 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5714 int rval;
5715 struct qla_hw_data *ha = vha->hw;
5716 mbx_cmd_t mc;
5717 mbx_cmd_t *mcp = &mc;
5719 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5720 return QLA_FUNCTION_FAILED;
5722 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5723 "Entered %s.\n", __func__);
5725 memset(mcp, 0, sizeof(mbx_cmd_t));
5726 mcp->mb[0] = MBC_SET_LED_CONFIG;
5727 mcp->mb[1] = led_cfg[0];
5728 mcp->mb[2] = led_cfg[1];
5729 if (IS_QLA8031(ha)) {
5730 mcp->mb[3] = led_cfg[2];
5731 mcp->mb[4] = led_cfg[3];
5732 mcp->mb[5] = led_cfg[4];
5733 mcp->mb[6] = led_cfg[5];
5736 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5737 if (IS_QLA8031(ha))
5738 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5739 mcp->in_mb = MBX_0;
5740 mcp->tov = 30;
5741 mcp->flags = 0;
5743 rval = qla2x00_mailbox_command(vha, mcp);
5744 if (rval != QLA_SUCCESS) {
5745 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5746 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5747 } else {
5748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5749 "Done %s.\n", __func__);
5752 return rval;
5756 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5758 int rval;
5759 struct qla_hw_data *ha = vha->hw;
5760 mbx_cmd_t mc;
5761 mbx_cmd_t *mcp = &mc;
5763 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5764 return QLA_FUNCTION_FAILED;
5766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5767 "Entered %s.\n", __func__);
5769 memset(mcp, 0, sizeof(mbx_cmd_t));
5770 mcp->mb[0] = MBC_GET_LED_CONFIG;
5772 mcp->out_mb = MBX_0;
5773 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5774 if (IS_QLA8031(ha))
5775 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5776 mcp->tov = 30;
5777 mcp->flags = 0;
5779 rval = qla2x00_mailbox_command(vha, mcp);
5780 if (rval != QLA_SUCCESS) {
5781 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5782 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5783 } else {
5784 led_cfg[0] = mcp->mb[1];
5785 led_cfg[1] = mcp->mb[2];
5786 if (IS_QLA8031(ha)) {
5787 led_cfg[2] = mcp->mb[3];
5788 led_cfg[3] = mcp->mb[4];
5789 led_cfg[4] = mcp->mb[5];
5790 led_cfg[5] = mcp->mb[6];
5792 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5793 "Done %s.\n", __func__);
5796 return rval;
5800 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5802 int rval;
5803 struct qla_hw_data *ha = vha->hw;
5804 mbx_cmd_t mc;
5805 mbx_cmd_t *mcp = &mc;
5807 if (!IS_P3P_TYPE(ha))
5808 return QLA_FUNCTION_FAILED;
5810 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5811 "Entered %s.\n", __func__);
5813 memset(mcp, 0, sizeof(mbx_cmd_t));
5814 mcp->mb[0] = MBC_SET_LED_CONFIG;
5815 if (enable)
5816 mcp->mb[7] = 0xE;
5817 else
5818 mcp->mb[7] = 0xD;
5820 mcp->out_mb = MBX_7|MBX_0;
5821 mcp->in_mb = MBX_0;
5822 mcp->tov = MBX_TOV_SECONDS;
5823 mcp->flags = 0;
5825 rval = qla2x00_mailbox_command(vha, mcp);
5826 if (rval != QLA_SUCCESS) {
5827 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5828 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5829 } else {
5830 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5831 "Done %s.\n", __func__);
5834 return rval;
5838 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5840 int rval;
5841 struct qla_hw_data *ha = vha->hw;
5842 mbx_cmd_t mc;
5843 mbx_cmd_t *mcp = &mc;
5845 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5846 return QLA_FUNCTION_FAILED;
5848 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5849 "Entered %s.\n", __func__);
5851 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5852 mcp->mb[1] = LSW(reg);
5853 mcp->mb[2] = MSW(reg);
5854 mcp->mb[3] = LSW(data);
5855 mcp->mb[4] = MSW(data);
5856 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5858 mcp->in_mb = MBX_1|MBX_0;
5859 mcp->tov = MBX_TOV_SECONDS;
5860 mcp->flags = 0;
5861 rval = qla2x00_mailbox_command(vha, mcp);
5863 if (rval != QLA_SUCCESS) {
5864 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5865 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5866 } else {
5867 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5868 "Done %s.\n", __func__);
5871 return rval;
5875 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5877 int rval;
5878 struct qla_hw_data *ha = vha->hw;
5879 mbx_cmd_t mc;
5880 mbx_cmd_t *mcp = &mc;
5882 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5883 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5884 "Implicit LOGO Unsupported.\n");
5885 return QLA_FUNCTION_FAILED;
5889 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5890 "Entering %s.\n", __func__);
5892 /* Perform Implicit LOGO. */
5893 mcp->mb[0] = MBC_PORT_LOGOUT;
5894 mcp->mb[1] = fcport->loop_id;
5895 mcp->mb[10] = BIT_15;
5896 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5897 mcp->in_mb = MBX_0;
5898 mcp->tov = MBX_TOV_SECONDS;
5899 mcp->flags = 0;
5900 rval = qla2x00_mailbox_command(vha, mcp);
5901 if (rval != QLA_SUCCESS)
5902 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5903 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5904 else
5905 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5906 "Done %s.\n", __func__);
5908 return rval;
5912 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5914 int rval;
5915 mbx_cmd_t mc;
5916 mbx_cmd_t *mcp = &mc;
5917 struct qla_hw_data *ha = vha->hw;
5918 unsigned long retry_max_time = jiffies + (2 * HZ);
5920 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5921 return QLA_FUNCTION_FAILED;
5923 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5925 retry_rd_reg:
5926 mcp->mb[0] = MBC_READ_REMOTE_REG;
5927 mcp->mb[1] = LSW(reg);
5928 mcp->mb[2] = MSW(reg);
5929 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5930 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5931 mcp->tov = MBX_TOV_SECONDS;
5932 mcp->flags = 0;
5933 rval = qla2x00_mailbox_command(vha, mcp);
5935 if (rval != QLA_SUCCESS) {
5936 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5937 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5938 rval, mcp->mb[0], mcp->mb[1]);
5939 } else {
5940 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5941 if (*data == QLA8XXX_BAD_VALUE) {
5943 * During soft-reset CAMRAM register reads might
5944 * return 0xbad0bad0. So retry for MAX of 2 sec
5945 * while reading camram registers.
5947 if (time_after(jiffies, retry_max_time)) {
5948 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5949 "Failure to read CAMRAM register. "
5950 "data=0x%x.\n", *data);
5951 return QLA_FUNCTION_FAILED;
5953 msleep(100);
5954 goto retry_rd_reg;
5956 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5959 return rval;
5963 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5965 int rval;
5966 mbx_cmd_t mc;
5967 mbx_cmd_t *mcp = &mc;
5968 struct qla_hw_data *ha = vha->hw;
5970 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5971 return QLA_FUNCTION_FAILED;
5973 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5975 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5976 mcp->out_mb = MBX_0;
5977 mcp->in_mb = MBX_1|MBX_0;
5978 mcp->tov = MBX_TOV_SECONDS;
5979 mcp->flags = 0;
5980 rval = qla2x00_mailbox_command(vha, mcp);
5982 if (rval != QLA_SUCCESS) {
5983 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5984 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5985 rval, mcp->mb[0], mcp->mb[1]);
5986 ha->isp_ops->fw_dump(vha, 0);
5987 } else {
5988 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5991 return rval;
5995 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5996 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5998 int rval;
5999 mbx_cmd_t mc;
6000 mbx_cmd_t *mcp = &mc;
6001 uint8_t subcode = (uint8_t)options;
6002 struct qla_hw_data *ha = vha->hw;
6004 if (!IS_QLA8031(ha))
6005 return QLA_FUNCTION_FAILED;
6007 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
6009 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
6010 mcp->mb[1] = options;
6011 mcp->out_mb = MBX_1|MBX_0;
6012 if (subcode & BIT_2) {
6013 mcp->mb[2] = LSW(start_addr);
6014 mcp->mb[3] = MSW(start_addr);
6015 mcp->mb[4] = LSW(end_addr);
6016 mcp->mb[5] = MSW(end_addr);
6017 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
6019 mcp->in_mb = MBX_2|MBX_1|MBX_0;
6020 if (!(subcode & (BIT_2 | BIT_5)))
6021 mcp->in_mb |= MBX_4|MBX_3;
6022 mcp->tov = MBX_TOV_SECONDS;
6023 mcp->flags = 0;
6024 rval = qla2x00_mailbox_command(vha, mcp);
6026 if (rval != QLA_SUCCESS) {
6027 ql_dbg(ql_dbg_mbx, vha, 0x1147,
6028 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
6029 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
6030 mcp->mb[4]);
6031 ha->isp_ops->fw_dump(vha, 0);
6032 } else {
6033 if (subcode & BIT_5)
6034 *sector_size = mcp->mb[1];
6035 else if (subcode & (BIT_6 | BIT_7)) {
6036 ql_dbg(ql_dbg_mbx, vha, 0x1148,
6037 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6038 } else if (subcode & (BIT_3 | BIT_4)) {
6039 ql_dbg(ql_dbg_mbx, vha, 0x1149,
6040 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
6042 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
6045 return rval;
6049 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
6050 uint32_t size)
6052 int rval;
6053 mbx_cmd_t mc;
6054 mbx_cmd_t *mcp = &mc;
6056 if (!IS_MCTP_CAPABLE(vha->hw))
6057 return QLA_FUNCTION_FAILED;
6059 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
6060 "Entered %s.\n", __func__);
6062 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
6063 mcp->mb[1] = LSW(addr);
6064 mcp->mb[2] = MSW(req_dma);
6065 mcp->mb[3] = LSW(req_dma);
6066 mcp->mb[4] = MSW(size);
6067 mcp->mb[5] = LSW(size);
6068 mcp->mb[6] = MSW(MSD(req_dma));
6069 mcp->mb[7] = LSW(MSD(req_dma));
6070 mcp->mb[8] = MSW(addr);
6071 /* Setting RAM ID to valid */
6072 mcp->mb[10] |= BIT_7;
6073 /* For MCTP RAM ID is 0x40 */
6074 mcp->mb[10] |= 0x40;
6076 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
6077 MBX_0;
6079 mcp->in_mb = MBX_0;
6080 mcp->tov = MBX_TOV_SECONDS;
6081 mcp->flags = 0;
6082 rval = qla2x00_mailbox_command(vha, mcp);
6084 if (rval != QLA_SUCCESS) {
6085 ql_dbg(ql_dbg_mbx, vha, 0x114e,
6086 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
6087 } else {
6088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
6089 "Done %s.\n", __func__);
6092 return rval;
6096 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
6097 void *dd_buf, uint size, uint options)
6099 int rval;
6100 mbx_cmd_t mc;
6101 mbx_cmd_t *mcp = &mc;
6102 dma_addr_t dd_dma;
6104 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
6105 return QLA_FUNCTION_FAILED;
6107 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
6108 "Entered %s.\n", __func__);
6110 dd_dma = dma_map_single(&vha->hw->pdev->dev,
6111 dd_buf, size, DMA_FROM_DEVICE);
6112 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
6113 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
6114 return QLA_MEMORY_ALLOC_FAILED;
6117 memset(dd_buf, 0, size);
6119 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
6120 mcp->mb[1] = options;
6121 mcp->mb[2] = MSW(LSD(dd_dma));
6122 mcp->mb[3] = LSW(LSD(dd_dma));
6123 mcp->mb[6] = MSW(MSD(dd_dma));
6124 mcp->mb[7] = LSW(MSD(dd_dma));
6125 mcp->mb[8] = size;
6126 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
6127 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
6128 mcp->buf_size = size;
6129 mcp->flags = MBX_DMA_IN;
6130 mcp->tov = MBX_TOV_SECONDS * 4;
6131 rval = qla2x00_mailbox_command(vha, mcp);
6133 if (rval != QLA_SUCCESS) {
6134 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6135 } else {
6136 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6137 "Done %s.\n", __func__);
6140 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6141 size, DMA_FROM_DEVICE);
6143 return rval;
6146 static void qla2x00_async_mb_sp_done(void *s, int res)
6148 struct srb *sp = s;
6150 sp->u.iocb_cmd.u.mbx.rc = res;
6152 complete(&sp->u.iocb_cmd.u.mbx.comp);
6153 /* don't free sp here. Let the caller do the free */
6157 * This mailbox uses the iocb interface to send MB command.
6158 * This allows non-critial (non chip setup) command to go
6159 * out in parrallel.
6161 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6163 int rval = QLA_FUNCTION_FAILED;
6164 srb_t *sp;
6165 struct srb_iocb *c;
6167 if (!vha->hw->flags.fw_started)
6168 goto done;
6170 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6171 if (!sp)
6172 goto done;
6174 sp->type = SRB_MB_IOCB;
6175 sp->name = mb_to_str(mcp->mb[0]);
6177 c = &sp->u.iocb_cmd;
6178 c->timeout = qla2x00_async_iocb_timeout;
6179 init_completion(&c->u.mbx.comp);
6181 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6183 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6185 sp->done = qla2x00_async_mb_sp_done;
6187 rval = qla2x00_start_sp(sp);
6188 if (rval != QLA_SUCCESS) {
6189 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6190 "%s: %s Failed submission. %x.\n",
6191 __func__, sp->name, rval);
6192 goto done_free_sp;
6195 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6196 sp->name, sp->handle);
6198 wait_for_completion(&c->u.mbx.comp);
6199 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6201 rval = c->u.mbx.rc;
6202 switch (rval) {
6203 case QLA_FUNCTION_TIMEOUT:
6204 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6205 __func__, sp->name, rval);
6206 break;
6207 case QLA_SUCCESS:
6208 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6209 __func__, sp->name);
6210 sp->free(sp);
6211 break;
6212 default:
6213 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6214 __func__, sp->name, rval);
6215 sp->free(sp);
6216 break;
6219 return rval;
6221 done_free_sp:
6222 sp->free(sp);
6223 done:
6224 return rval;
6228 * qla24xx_gpdb_wait
6229 * NOTE: Do not call this routine from DPC thread
6231 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6233 int rval = QLA_FUNCTION_FAILED;
6234 dma_addr_t pd_dma;
6235 struct port_database_24xx *pd;
6236 struct qla_hw_data *ha = vha->hw;
6237 mbx_cmd_t mc;
6239 if (!vha->hw->flags.fw_started)
6240 goto done;
6242 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6243 if (pd == NULL) {
6244 ql_log(ql_log_warn, vha, 0xd047,
6245 "Failed to allocate port database structure.\n");
6246 goto done_free_sp;
6249 memset(&mc, 0, sizeof(mc));
6250 mc.mb[0] = MBC_GET_PORT_DATABASE;
6251 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6252 mc.mb[2] = MSW(pd_dma);
6253 mc.mb[3] = LSW(pd_dma);
6254 mc.mb[6] = MSW(MSD(pd_dma));
6255 mc.mb[7] = LSW(MSD(pd_dma));
6256 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6257 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6259 rval = qla24xx_send_mb_cmd(vha, &mc);
6260 if (rval != QLA_SUCCESS) {
6261 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6262 "%s: %8phC fail\n", __func__, fcport->port_name);
6263 goto done_free_sp;
6266 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6268 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6269 __func__, fcport->port_name);
6271 done_free_sp:
6272 if (pd)
6273 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6274 done:
6275 return rval;
6278 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6279 struct port_database_24xx *pd)
6281 int rval = QLA_SUCCESS;
6282 uint64_t zero = 0;
6283 u8 current_login_state, last_login_state;
6285 if (fcport->fc4f_nvme) {
6286 current_login_state = pd->current_login_state >> 4;
6287 last_login_state = pd->last_login_state >> 4;
6288 } else {
6289 current_login_state = pd->current_login_state & 0xf;
6290 last_login_state = pd->last_login_state & 0xf;
6293 /* Check for logged in state. */
6294 if (current_login_state != PDS_PRLI_COMPLETE) {
6295 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6296 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6297 current_login_state, last_login_state, fcport->loop_id);
6298 rval = QLA_FUNCTION_FAILED;
6299 goto gpd_error_out;
6302 if (fcport->loop_id == FC_NO_LOOP_ID ||
6303 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6304 memcmp(fcport->port_name, pd->port_name, 8))) {
6305 /* We lost the device mid way. */
6306 rval = QLA_NOT_LOGGED_IN;
6307 goto gpd_error_out;
6310 /* Names are little-endian. */
6311 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6312 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6314 /* Get port_id of device. */
6315 fcport->d_id.b.domain = pd->port_id[0];
6316 fcport->d_id.b.area = pd->port_id[1];
6317 fcport->d_id.b.al_pa = pd->port_id[2];
6318 fcport->d_id.b.rsvd_1 = 0;
6320 if (fcport->fc4f_nvme) {
6321 fcport->port_type = FCT_NVME;
6322 } else {
6323 /* If not target must be initiator or unknown type. */
6324 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6325 fcport->port_type = FCT_INITIATOR;
6326 else
6327 fcport->port_type = FCT_TARGET;
6329 /* Passback COS information. */
6330 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6331 FC_COS_CLASS2 : FC_COS_CLASS3;
6333 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6334 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6335 fcport->conf_compl_supported = 1;
6338 gpd_error_out:
6339 return rval;
6343 * qla24xx_gidlist__wait
6344 * NOTE: don't call this routine from DPC thread.
6346 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6347 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6349 int rval = QLA_FUNCTION_FAILED;
6350 mbx_cmd_t mc;
6352 if (!vha->hw->flags.fw_started)
6353 goto done;
6355 memset(&mc, 0, sizeof(mc));
6356 mc.mb[0] = MBC_GET_ID_LIST;
6357 mc.mb[2] = MSW(id_list_dma);
6358 mc.mb[3] = LSW(id_list_dma);
6359 mc.mb[6] = MSW(MSD(id_list_dma));
6360 mc.mb[7] = LSW(MSD(id_list_dma));
6361 mc.mb[8] = 0;
6362 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6364 rval = qla24xx_send_mb_cmd(vha, &mc);
6365 if (rval != QLA_SUCCESS) {
6366 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6367 "%s: fail\n", __func__);
6368 } else {
6369 *entries = mc.mb[1];
6370 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6371 "%s: done\n", __func__);
6373 done:
6374 return rval;
6377 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6379 int rval;
6380 mbx_cmd_t mc;
6381 mbx_cmd_t *mcp = &mc;
6383 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6384 "Entered %s\n", __func__);
6386 memset(mcp->mb, 0 , sizeof(mcp->mb));
6387 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6388 mcp->mb[1] = cpu_to_le16(1);
6389 mcp->mb[2] = cpu_to_le16(value);
6390 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6391 mcp->in_mb = MBX_2 | MBX_0;
6392 mcp->tov = MBX_TOV_SECONDS;
6393 mcp->flags = 0;
6395 rval = qla2x00_mailbox_command(vha, mcp);
6397 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6398 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6400 return rval;
6403 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6405 int rval;
6406 mbx_cmd_t mc;
6407 mbx_cmd_t *mcp = &mc;
6409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6410 "Entered %s\n", __func__);
6412 memset(mcp->mb, 0, sizeof(mcp->mb));
6413 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6414 mcp->mb[1] = cpu_to_le16(0);
6415 mcp->out_mb = MBX_1 | MBX_0;
6416 mcp->in_mb = MBX_2 | MBX_0;
6417 mcp->tov = MBX_TOV_SECONDS;
6418 mcp->flags = 0;
6420 rval = qla2x00_mailbox_command(vha, mcp);
6421 if (rval == QLA_SUCCESS)
6422 *value = mc.mb[2];
6424 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6425 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6427 return rval;
6431 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6433 struct qla_hw_data *ha = vha->hw;
6434 uint16_t iter, addr, offset;
6435 dma_addr_t phys_addr;
6436 int rval, c;
6437 u8 *sfp_data;
6439 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6440 addr = 0xa0;
6441 phys_addr = ha->sfp_data_dma;
6442 sfp_data = ha->sfp_data;
6443 offset = c = 0;
6445 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6446 if (iter == 4) {
6447 /* Skip to next device address. */
6448 addr = 0xa2;
6449 offset = 0;
6452 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6453 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6454 if (rval != QLA_SUCCESS) {
6455 ql_log(ql_log_warn, vha, 0x706d,
6456 "Unable to read SFP data (%x/%x/%x).\n", rval,
6457 addr, offset);
6459 return rval;
6462 if (buf && (c < count)) {
6463 u16 sz;
6465 if ((count - c) >= SFP_BLOCK_SIZE)
6466 sz = SFP_BLOCK_SIZE;
6467 else
6468 sz = count - c;
6470 memcpy(buf, sfp_data, sz);
6471 buf += SFP_BLOCK_SIZE;
6472 c += sz;
6474 phys_addr += SFP_BLOCK_SIZE;
6475 sfp_data += SFP_BLOCK_SIZE;
6476 offset += SFP_BLOCK_SIZE;
6479 return rval;
6482 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6483 uint16_t *out_mb, int out_mb_sz)
6485 int rval = QLA_FUNCTION_FAILED;
6486 mbx_cmd_t mc;
6488 if (!vha->hw->flags.fw_started)
6489 goto done;
6491 memset(&mc, 0, sizeof(mc));
6492 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6494 rval = qla24xx_send_mb_cmd(vha, &mc);
6495 if (rval != QLA_SUCCESS) {
6496 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6497 "%s: fail\n", __func__);
6498 } else {
6499 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6500 memcpy(out_mb, mc.mb, out_mb_sz);
6501 else
6502 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6504 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6505 "%s: done\n", __func__);
6507 done:
6508 return rval;