Merge tag 'ceph-for-4.13-rc8' of git://github.com/ceph/ceph-client
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_mbx.c
blob7c6d1a40401121dce41279d32adce5104147b203
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
22 static const char *mb_to_str(uint16_t cmd)
24 int i;
25 struct mb_cmd_name *e;
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 e = mb_str + i;
29 if (cmd == e->cmd)
30 return e->str;
32 return "unknown";
35 static struct rom_cmd {
36 uint16_t cmd;
37 } rom_cmds[] = {
38 { MBC_LOAD_RAM },
39 { MBC_EXECUTE_FIRMWARE },
40 { MBC_READ_RAM_WORD },
41 { MBC_MAILBOX_REGISTER_TEST },
42 { MBC_VERIFY_CHECKSUM },
43 { MBC_GET_FIRMWARE_VERSION },
44 { MBC_LOAD_RISC_RAM },
45 { MBC_DUMP_RISC_RAM },
46 { MBC_LOAD_RISC_RAM_EXTENDED },
47 { MBC_DUMP_RISC_RAM_EXTENDED },
48 { MBC_WRITE_RAM_WORD_EXTENDED },
49 { MBC_READ_RAM_EXTENDED },
50 { MBC_GET_RESOURCE_COUNTS },
51 { MBC_SET_FIRMWARE_OPTION },
52 { MBC_MID_INITIALIZE_FIRMWARE },
53 { MBC_GET_FIRMWARE_STATE },
54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 { MBC_GET_RETRY_COUNT },
56 { MBC_TRACE_CONTROL },
59 static int is_rom_cmd(uint16_t cmd)
61 int i;
62 struct rom_cmd *wc;
64 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
65 wc = rom_cmds + i;
66 if (wc->cmd == cmd)
67 return 1;
70 return 0;
74 * qla2x00_mailbox_command
75 * Issue mailbox command and waits for completion.
77 * Input:
78 * ha = adapter block pointer.
79 * mcp = driver internal mbx struct pointer.
81 * Output:
82 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
84 * Returns:
85 * 0 : QLA_SUCCESS = cmd performed success
86 * 1 : QLA_FUNCTION_FAILED (error encountered)
87 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
89 * Context:
90 * Kernel context.
92 static int
93 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
95 int rval, i;
96 unsigned long flags = 0;
97 device_reg_t *reg;
98 uint8_t abort_active;
99 uint8_t io_lock_on;
100 uint16_t command = 0;
101 uint16_t *iptr;
102 uint16_t __iomem *optr;
103 uint32_t cnt;
104 uint32_t mboxes;
105 uint16_t __iomem *mbx_reg;
106 unsigned long wait_time;
107 struct qla_hw_data *ha = vha->hw;
108 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
111 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
113 if (ha->pdev->error_state > pci_channel_io_frozen) {
114 ql_log(ql_log_warn, vha, 0x1001,
115 "error_state is greater than pci_channel_io_frozen, "
116 "exiting.\n");
117 return QLA_FUNCTION_TIMEOUT;
120 if (vha->device_flags & DFLG_DEV_FAILED) {
121 ql_log(ql_log_warn, vha, 0x1002,
122 "Device in failed state, exiting.\n");
123 return QLA_FUNCTION_TIMEOUT;
126 /* if PCI error, then avoid mbx processing.*/
127 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
128 test_bit(UNLOADING, &base_vha->dpc_flags)) {
129 ql_log(ql_log_warn, vha, 0xd04e,
130 "PCI error, exiting.\n");
131 return QLA_FUNCTION_TIMEOUT;
134 reg = ha->iobase;
135 io_lock_on = base_vha->flags.init_done;
137 rval = QLA_SUCCESS;
138 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
141 if (ha->flags.pci_channel_io_perm_failure) {
142 ql_log(ql_log_warn, vha, 0x1003,
143 "Perm failure on EEH timeout MBX, exiting.\n");
144 return QLA_FUNCTION_TIMEOUT;
147 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
148 /* Setting Link-Down error */
149 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
150 ql_log(ql_log_warn, vha, 0x1004,
151 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
152 return QLA_FUNCTION_TIMEOUT;
155 /* check if ISP abort is active and return cmd with timeout */
156 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
157 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
158 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
159 !is_rom_cmd(mcp->mb[0])) {
160 ql_log(ql_log_info, vha, 0x1005,
161 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
162 mcp->mb[0]);
163 return QLA_FUNCTION_TIMEOUT;
167 * Wait for active mailbox commands to finish by waiting at most tov
168 * seconds. This is to serialize actual issuing of mailbox cmds during
169 * non ISP abort time.
171 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
172 /* Timeout occurred. Return error. */
173 ql_log(ql_log_warn, vha, 0xd035,
174 "Cmd access timeout, cmd=0x%x, Exiting.\n",
175 mcp->mb[0]);
176 return QLA_FUNCTION_TIMEOUT;
179 ha->flags.mbox_busy = 1;
180 /* Save mailbox command for debug */
181 ha->mcp = mcp;
183 ql_dbg(ql_dbg_mbx, vha, 0x1006,
184 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
186 spin_lock_irqsave(&ha->hardware_lock, flags);
188 /* Load mailbox registers. */
189 if (IS_P3P_TYPE(ha))
190 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
191 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
192 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
193 else
194 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
196 iptr = mcp->mb;
197 command = mcp->mb[0];
198 mboxes = mcp->out_mb;
200 ql_dbg(ql_dbg_mbx, vha, 0x1111,
201 "Mailbox registers (OUT):\n");
202 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
203 if (IS_QLA2200(ha) && cnt == 8)
204 optr =
205 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
206 if (mboxes & BIT_0) {
207 ql_dbg(ql_dbg_mbx, vha, 0x1112,
208 "mbox[%d]<-0x%04x\n", cnt, *iptr);
209 WRT_REG_WORD(optr, *iptr);
212 mboxes >>= 1;
213 optr++;
214 iptr++;
217 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
218 "I/O Address = %p.\n", optr);
220 /* Issue set host interrupt command to send cmd out. */
221 ha->flags.mbox_int = 0;
222 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
224 /* Unlock mbx registers and wait for interrupt */
225 ql_dbg(ql_dbg_mbx, vha, 0x100f,
226 "Going to unlock irq & waiting for interrupts. "
227 "jiffies=%lx.\n", jiffies);
229 /* Wait for mbx cmd completion until timeout */
231 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
232 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
234 if (IS_P3P_TYPE(ha)) {
235 if (RD_REG_DWORD(&reg->isp82.hint) &
236 HINT_MBX_INT_PENDING) {
237 spin_unlock_irqrestore(&ha->hardware_lock,
238 flags);
239 ha->flags.mbox_busy = 0;
240 ql_dbg(ql_dbg_mbx, vha, 0x1010,
241 "Pending mailbox timeout, exiting.\n");
242 rval = QLA_FUNCTION_TIMEOUT;
243 goto premature_exit;
245 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
246 } else if (IS_FWI2_CAPABLE(ha))
247 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
248 else
249 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
250 spin_unlock_irqrestore(&ha->hardware_lock, flags);
252 wait_time = jiffies;
253 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
254 mcp->tov * HZ)) {
255 ql_dbg(ql_dbg_mbx, vha, 0x117a,
256 "cmd=%x Timeout.\n", command);
257 spin_lock_irqsave(&ha->hardware_lock, flags);
258 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
259 spin_unlock_irqrestore(&ha->hardware_lock, flags);
261 if (time_after(jiffies, wait_time + 5 * HZ))
262 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
263 command, jiffies_to_msecs(jiffies - wait_time));
264 } else {
265 ql_dbg(ql_dbg_mbx, vha, 0x1011,
266 "Cmd=%x Polling Mode.\n", command);
268 if (IS_P3P_TYPE(ha)) {
269 if (RD_REG_DWORD(&reg->isp82.hint) &
270 HINT_MBX_INT_PENDING) {
271 spin_unlock_irqrestore(&ha->hardware_lock,
272 flags);
273 ha->flags.mbox_busy = 0;
274 ql_dbg(ql_dbg_mbx, vha, 0x1012,
275 "Pending mailbox timeout, exiting.\n");
276 rval = QLA_FUNCTION_TIMEOUT;
277 goto premature_exit;
279 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
280 } else if (IS_FWI2_CAPABLE(ha))
281 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
282 else
283 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
284 spin_unlock_irqrestore(&ha->hardware_lock, flags);
286 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
287 while (!ha->flags.mbox_int) {
288 if (time_after(jiffies, wait_time))
289 break;
291 /* Check for pending interrupts. */
292 qla2x00_poll(ha->rsp_q_map[0]);
294 if (!ha->flags.mbox_int &&
295 !(IS_QLA2200(ha) &&
296 command == MBC_LOAD_RISC_RAM_EXTENDED))
297 msleep(10);
298 } /* while */
299 ql_dbg(ql_dbg_mbx, vha, 0x1013,
300 "Waited %d sec.\n",
301 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
304 /* Check whether we timed out */
305 if (ha->flags.mbox_int) {
306 uint16_t *iptr2;
308 ql_dbg(ql_dbg_mbx, vha, 0x1014,
309 "Cmd=%x completed.\n", command);
311 /* Got interrupt. Clear the flag. */
312 ha->flags.mbox_int = 0;
313 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
315 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
316 ha->flags.mbox_busy = 0;
317 /* Setting Link-Down error */
318 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
319 ha->mcp = NULL;
320 rval = QLA_FUNCTION_FAILED;
321 ql_log(ql_log_warn, vha, 0xd048,
322 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
323 goto premature_exit;
326 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
327 rval = QLA_FUNCTION_FAILED;
329 /* Load return mailbox registers. */
330 iptr2 = mcp->mb;
331 iptr = (uint16_t *)&ha->mailbox_out[0];
332 mboxes = mcp->in_mb;
334 ql_dbg(ql_dbg_mbx, vha, 0x1113,
335 "Mailbox registers (IN):\n");
336 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
337 if (mboxes & BIT_0) {
338 *iptr2 = *iptr;
339 ql_dbg(ql_dbg_mbx, vha, 0x1114,
340 "mbox[%d]->0x%04x\n", cnt, *iptr2);
343 mboxes >>= 1;
344 iptr2++;
345 iptr++;
347 } else {
349 uint16_t mb[8];
350 uint32_t ictrl, host_status, hccr;
351 uint16_t w;
353 if (IS_FWI2_CAPABLE(ha)) {
354 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
355 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
356 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
357 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
358 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
359 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
360 host_status = RD_REG_DWORD(&reg->isp24.host_status);
361 hccr = RD_REG_DWORD(&reg->isp24.hccr);
363 ql_log(ql_log_warn, vha, 0xd04c,
364 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
365 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
366 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
367 mb[7], host_status, hccr);
369 } else {
370 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
371 ictrl = RD_REG_WORD(&reg->isp.ictrl);
372 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
373 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
374 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
376 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
378 /* Capture FW dump only, if PCI device active */
379 if (!pci_channel_offline(vha->hw->pdev)) {
380 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
381 if (w == 0xffff || ictrl == 0xffffffff) {
382 /* This is special case if there is unload
383 * of driver happening and if PCI device go
384 * into bad state due to PCI error condition
385 * then only PCI ERR flag would be set.
386 * we will do premature exit for above case.
388 ha->flags.mbox_busy = 0;
389 rval = QLA_FUNCTION_TIMEOUT;
390 goto premature_exit;
393 /* Attempt to capture firmware dump for further
394 * anallysis of the current formware state. we do not
395 * need to do this if we are intentionally generating
396 * a dump
398 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
399 ha->isp_ops->fw_dump(vha, 0);
400 rval = QLA_FUNCTION_TIMEOUT;
404 ha->flags.mbox_busy = 0;
406 /* Clean up */
407 ha->mcp = NULL;
409 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
410 ql_dbg(ql_dbg_mbx, vha, 0x101a,
411 "Checking for additional resp interrupt.\n");
413 /* polling mode for non isp_abort commands. */
414 qla2x00_poll(ha->rsp_q_map[0]);
417 if (rval == QLA_FUNCTION_TIMEOUT &&
418 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
419 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
420 ha->flags.eeh_busy) {
421 /* not in dpc. schedule it for dpc to take over. */
422 ql_dbg(ql_dbg_mbx, vha, 0x101b,
423 "Timeout, schedule isp_abort_needed.\n");
425 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
426 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
427 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
428 if (IS_QLA82XX(ha)) {
429 ql_dbg(ql_dbg_mbx, vha, 0x112a,
430 "disabling pause transmit on port "
431 "0 & 1.\n");
432 qla82xx_wr_32(ha,
433 QLA82XX_CRB_NIU + 0x98,
434 CRB_NIU_XG_PAUSE_CTL_P0|
435 CRB_NIU_XG_PAUSE_CTL_P1);
437 ql_log(ql_log_info, base_vha, 0x101c,
438 "Mailbox cmd timeout occurred, cmd=0x%x, "
439 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
440 "abort.\n", command, mcp->mb[0],
441 ha->flags.eeh_busy);
442 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
443 qla2xxx_wake_dpc(vha);
445 } else if (!abort_active) {
446 /* call abort directly since we are in the DPC thread */
447 ql_dbg(ql_dbg_mbx, vha, 0x101d,
448 "Timeout, calling abort_isp.\n");
450 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
451 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
452 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
453 if (IS_QLA82XX(ha)) {
454 ql_dbg(ql_dbg_mbx, vha, 0x112b,
455 "disabling pause transmit on port "
456 "0 & 1.\n");
457 qla82xx_wr_32(ha,
458 QLA82XX_CRB_NIU + 0x98,
459 CRB_NIU_XG_PAUSE_CTL_P0|
460 CRB_NIU_XG_PAUSE_CTL_P1);
462 ql_log(ql_log_info, base_vha, 0x101e,
463 "Mailbox cmd timeout occurred, cmd=0x%x, "
464 "mb[0]=0x%x. Scheduling ISP abort ",
465 command, mcp->mb[0]);
466 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
467 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
468 /* Allow next mbx cmd to come in. */
469 complete(&ha->mbx_cmd_comp);
470 if (ha->isp_ops->abort_isp(vha)) {
471 /* Failed. retry later. */
472 set_bit(ISP_ABORT_NEEDED,
473 &vha->dpc_flags);
475 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
476 ql_dbg(ql_dbg_mbx, vha, 0x101f,
477 "Finished abort_isp.\n");
478 goto mbx_done;
483 premature_exit:
484 /* Allow next mbx cmd to come in. */
485 complete(&ha->mbx_cmd_comp);
487 mbx_done:
488 if (rval) {
489 ql_dbg(ql_dbg_disc, base_vha, 0x1020,
490 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
491 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
493 ql_dbg(ql_dbg_mbx, vha, 0x1198,
494 "host status: 0x%x, flags:0x%lx, intr ctrl reg:0x%x, intr status:0x%x\n",
495 RD_REG_DWORD(&reg->isp24.host_status),
496 ha->fw_dump_cap_flags,
497 RD_REG_DWORD(&reg->isp24.ictrl),
498 RD_REG_DWORD(&reg->isp24.istatus));
500 mbx_reg = &reg->isp24.mailbox0;
501 for (i = 0; i < 6; i++)
502 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1199,
503 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
504 } else {
505 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
508 return rval;
512 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
513 uint32_t risc_code_size)
515 int rval;
516 struct qla_hw_data *ha = vha->hw;
517 mbx_cmd_t mc;
518 mbx_cmd_t *mcp = &mc;
520 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
521 "Entered %s.\n", __func__);
523 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
524 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
525 mcp->mb[8] = MSW(risc_addr);
526 mcp->out_mb = MBX_8|MBX_0;
527 } else {
528 mcp->mb[0] = MBC_LOAD_RISC_RAM;
529 mcp->out_mb = MBX_0;
531 mcp->mb[1] = LSW(risc_addr);
532 mcp->mb[2] = MSW(req_dma);
533 mcp->mb[3] = LSW(req_dma);
534 mcp->mb[6] = MSW(MSD(req_dma));
535 mcp->mb[7] = LSW(MSD(req_dma));
536 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
537 if (IS_FWI2_CAPABLE(ha)) {
538 mcp->mb[4] = MSW(risc_code_size);
539 mcp->mb[5] = LSW(risc_code_size);
540 mcp->out_mb |= MBX_5|MBX_4;
541 } else {
542 mcp->mb[4] = LSW(risc_code_size);
543 mcp->out_mb |= MBX_4;
546 mcp->in_mb = MBX_0;
547 mcp->tov = MBX_TOV_SECONDS;
548 mcp->flags = 0;
549 rval = qla2x00_mailbox_command(vha, mcp);
551 if (rval != QLA_SUCCESS) {
552 ql_dbg(ql_dbg_mbx, vha, 0x1023,
553 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
554 } else {
555 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
556 "Done %s.\n", __func__);
559 return rval;
562 #define EXTENDED_BB_CREDITS BIT_0
563 #define NVME_ENABLE_FLAG BIT_3
566 * qla2x00_execute_fw
567 * Start adapter firmware.
569 * Input:
570 * ha = adapter block pointer.
571 * TARGET_QUEUE_LOCK must be released.
572 * ADAPTER_STATE_LOCK must be released.
574 * Returns:
575 * qla2x00 local function return status code.
577 * Context:
578 * Kernel context.
581 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
583 int rval;
584 struct qla_hw_data *ha = vha->hw;
585 mbx_cmd_t mc;
586 mbx_cmd_t *mcp = &mc;
588 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
589 "Entered %s.\n", __func__);
591 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
592 mcp->out_mb = MBX_0;
593 mcp->in_mb = MBX_0;
594 if (IS_FWI2_CAPABLE(ha)) {
595 mcp->mb[1] = MSW(risc_addr);
596 mcp->mb[2] = LSW(risc_addr);
597 mcp->mb[3] = 0;
598 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
599 IS_QLA27XX(ha)) {
600 struct nvram_81xx *nv = ha->nvram;
601 mcp->mb[4] = (nv->enhanced_features &
602 EXTENDED_BB_CREDITS);
603 } else
604 mcp->mb[4] = 0;
606 if (ql2xnvmeenable && IS_QLA27XX(ha))
607 mcp->mb[4] |= NVME_ENABLE_FLAG;
609 if (ha->flags.exlogins_enabled)
610 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
612 if (ha->flags.exchoffld_enabled)
613 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
615 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
616 mcp->in_mb |= MBX_1;
617 } else {
618 mcp->mb[1] = LSW(risc_addr);
619 mcp->out_mb |= MBX_1;
620 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
621 mcp->mb[2] = 0;
622 mcp->out_mb |= MBX_2;
626 mcp->tov = MBX_TOV_SECONDS;
627 mcp->flags = 0;
628 rval = qla2x00_mailbox_command(vha, mcp);
630 if (rval != QLA_SUCCESS) {
631 ql_dbg(ql_dbg_mbx, vha, 0x1026,
632 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
633 } else {
634 if (IS_FWI2_CAPABLE(ha)) {
635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
636 "Done exchanges=%x.\n", mcp->mb[1]);
637 } else {
638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
639 "Done %s.\n", __func__);
643 return rval;
647 * qla_get_exlogin_status
648 * Get extended login status
649 * uses the memory offload control/status Mailbox
651 * Input:
652 * ha: adapter state pointer.
653 * fwopt: firmware options
655 * Returns:
656 * qla2x00 local function status
658 * Context:
659 * Kernel context.
661 #define FETCH_XLOGINS_STAT 0x8
663 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
664 uint16_t *ex_logins_cnt)
666 int rval;
667 mbx_cmd_t mc;
668 mbx_cmd_t *mcp = &mc;
670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
671 "Entered %s\n", __func__);
673 memset(mcp->mb, 0 , sizeof(mcp->mb));
674 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
675 mcp->mb[1] = FETCH_XLOGINS_STAT;
676 mcp->out_mb = MBX_1|MBX_0;
677 mcp->in_mb = MBX_10|MBX_4|MBX_0;
678 mcp->tov = MBX_TOV_SECONDS;
679 mcp->flags = 0;
681 rval = qla2x00_mailbox_command(vha, mcp);
682 if (rval != QLA_SUCCESS) {
683 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
684 } else {
685 *buf_sz = mcp->mb[4];
686 *ex_logins_cnt = mcp->mb[10];
688 ql_log(ql_log_info, vha, 0x1190,
689 "buffer size 0x%x, exchange login count=%d\n",
690 mcp->mb[4], mcp->mb[10]);
692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
693 "Done %s.\n", __func__);
696 return rval;
700 * qla_set_exlogin_mem_cfg
701 * set extended login memory configuration
702 * Mbx needs to be issues before init_cb is set
704 * Input:
705 * ha: adapter state pointer.
706 * buffer: buffer pointer
707 * phys_addr: physical address of buffer
708 * size: size of buffer
709 * TARGET_QUEUE_LOCK must be released
710 * ADAPTER_STATE_LOCK must be release
712 * Returns:
713 * qla2x00 local funxtion status code.
715 * Context:
716 * Kernel context.
718 #define CONFIG_XLOGINS_MEM 0x3
720 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
722 int rval;
723 mbx_cmd_t mc;
724 mbx_cmd_t *mcp = &mc;
725 struct qla_hw_data *ha = vha->hw;
727 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
728 "Entered %s.\n", __func__);
730 memset(mcp->mb, 0 , sizeof(mcp->mb));
731 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
732 mcp->mb[1] = CONFIG_XLOGINS_MEM;
733 mcp->mb[2] = MSW(phys_addr);
734 mcp->mb[3] = LSW(phys_addr);
735 mcp->mb[6] = MSW(MSD(phys_addr));
736 mcp->mb[7] = LSW(MSD(phys_addr));
737 mcp->mb[8] = MSW(ha->exlogin_size);
738 mcp->mb[9] = LSW(ha->exlogin_size);
739 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
740 mcp->in_mb = MBX_11|MBX_0;
741 mcp->tov = MBX_TOV_SECONDS;
742 mcp->flags = 0;
743 rval = qla2x00_mailbox_command(vha, mcp);
744 if (rval != QLA_SUCCESS) {
745 /*EMPTY*/
746 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
747 } else {
748 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
749 "Done %s.\n", __func__);
752 return rval;
756 * qla_get_exchoffld_status
757 * Get exchange offload status
758 * uses the memory offload control/status Mailbox
760 * Input:
761 * ha: adapter state pointer.
762 * fwopt: firmware options
764 * Returns:
765 * qla2x00 local function status
767 * Context:
768 * Kernel context.
770 #define FETCH_XCHOFFLD_STAT 0x2
772 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
773 uint16_t *ex_logins_cnt)
775 int rval;
776 mbx_cmd_t mc;
777 mbx_cmd_t *mcp = &mc;
779 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
780 "Entered %s\n", __func__);
782 memset(mcp->mb, 0 , sizeof(mcp->mb));
783 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
784 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
785 mcp->out_mb = MBX_1|MBX_0;
786 mcp->in_mb = MBX_10|MBX_4|MBX_0;
787 mcp->tov = MBX_TOV_SECONDS;
788 mcp->flags = 0;
790 rval = qla2x00_mailbox_command(vha, mcp);
791 if (rval != QLA_SUCCESS) {
792 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
793 } else {
794 *buf_sz = mcp->mb[4];
795 *ex_logins_cnt = mcp->mb[10];
797 ql_log(ql_log_info, vha, 0x118e,
798 "buffer size 0x%x, exchange offload count=%d\n",
799 mcp->mb[4], mcp->mb[10]);
801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
802 "Done %s.\n", __func__);
805 return rval;
809 * qla_set_exchoffld_mem_cfg
810 * Set exchange offload memory configuration
811 * Mbx needs to be issues before init_cb is set
813 * Input:
814 * ha: adapter state pointer.
815 * buffer: buffer pointer
816 * phys_addr: physical address of buffer
817 * size: size of buffer
818 * TARGET_QUEUE_LOCK must be released
819 * ADAPTER_STATE_LOCK must be release
821 * Returns:
822 * qla2x00 local funxtion status code.
824 * Context:
825 * Kernel context.
827 #define CONFIG_XCHOFFLD_MEM 0x3
829 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
831 int rval;
832 mbx_cmd_t mc;
833 mbx_cmd_t *mcp = &mc;
834 struct qla_hw_data *ha = vha->hw;
836 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
837 "Entered %s.\n", __func__);
839 memset(mcp->mb, 0 , sizeof(mcp->mb));
840 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
841 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
842 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
843 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
844 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
845 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
846 mcp->mb[8] = MSW(ha->exchoffld_size);
847 mcp->mb[9] = LSW(ha->exchoffld_size);
848 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
849 mcp->in_mb = MBX_11|MBX_0;
850 mcp->tov = MBX_TOV_SECONDS;
851 mcp->flags = 0;
852 rval = qla2x00_mailbox_command(vha, mcp);
853 if (rval != QLA_SUCCESS) {
854 /*EMPTY*/
855 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
856 } else {
857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
858 "Done %s.\n", __func__);
861 return rval;
865 * qla2x00_get_fw_version
866 * Get firmware version.
868 * Input:
869 * ha: adapter state pointer.
870 * major: pointer for major number.
871 * minor: pointer for minor number.
872 * subminor: pointer for subminor number.
874 * Returns:
875 * qla2x00 local function return status code.
877 * Context:
878 * Kernel context.
881 qla2x00_get_fw_version(scsi_qla_host_t *vha)
883 int rval;
884 mbx_cmd_t mc;
885 mbx_cmd_t *mcp = &mc;
886 struct qla_hw_data *ha = vha->hw;
888 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
889 "Entered %s.\n", __func__);
891 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
892 mcp->out_mb = MBX_0;
893 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
894 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
895 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
896 if (IS_FWI2_CAPABLE(ha))
897 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
898 if (IS_QLA27XX(ha))
899 mcp->in_mb |=
900 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
901 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
903 mcp->flags = 0;
904 mcp->tov = MBX_TOV_SECONDS;
905 rval = qla2x00_mailbox_command(vha, mcp);
906 if (rval != QLA_SUCCESS)
907 goto failed;
909 /* Return mailbox data. */
910 ha->fw_major_version = mcp->mb[1];
911 ha->fw_minor_version = mcp->mb[2];
912 ha->fw_subminor_version = mcp->mb[3];
913 ha->fw_attributes = mcp->mb[6];
914 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
915 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
916 else
917 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
919 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
920 ha->mpi_version[0] = mcp->mb[10] & 0xff;
921 ha->mpi_version[1] = mcp->mb[11] >> 8;
922 ha->mpi_version[2] = mcp->mb[11] & 0xff;
923 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
924 ha->phy_version[0] = mcp->mb[8] & 0xff;
925 ha->phy_version[1] = mcp->mb[9] >> 8;
926 ha->phy_version[2] = mcp->mb[9] & 0xff;
929 if (IS_FWI2_CAPABLE(ha)) {
930 ha->fw_attributes_h = mcp->mb[15];
931 ha->fw_attributes_ext[0] = mcp->mb[16];
932 ha->fw_attributes_ext[1] = mcp->mb[17];
933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
934 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
935 __func__, mcp->mb[15], mcp->mb[6]);
936 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
937 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
938 __func__, mcp->mb[17], mcp->mb[16]);
940 if (ha->fw_attributes_h & 0x4)
941 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
942 "%s: Firmware supports Extended Login 0x%x\n",
943 __func__, ha->fw_attributes_h);
945 if (ha->fw_attributes_h & 0x8)
946 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
947 "%s: Firmware supports Exchange Offload 0x%x\n",
948 __func__, ha->fw_attributes_h);
950 /* bit 26 of fw_attributes */
951 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
952 struct init_cb_24xx *icb;
954 icb = (struct init_cb_24xx *)ha->init_cb;
956 * fw supports nvme and driver load
957 * parameter requested nvme
959 vha->flags.nvme_enabled = 1;
960 icb->firmware_options_2 &= cpu_to_le32(~0xf);
961 ha->zio_mode = 0;
962 ha->zio_timer = 0;
967 if (IS_QLA27XX(ha)) {
968 ha->mpi_version[0] = mcp->mb[10] & 0xff;
969 ha->mpi_version[1] = mcp->mb[11] >> 8;
970 ha->mpi_version[2] = mcp->mb[11] & 0xff;
971 ha->pep_version[0] = mcp->mb[13] & 0xff;
972 ha->pep_version[1] = mcp->mb[14] >> 8;
973 ha->pep_version[2] = mcp->mb[14] & 0xff;
974 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
975 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
976 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
977 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
980 failed:
981 if (rval != QLA_SUCCESS) {
982 /*EMPTY*/
983 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
984 } else {
985 /*EMPTY*/
986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
987 "Done %s.\n", __func__);
989 return rval;
993 * qla2x00_get_fw_options
994 * Set firmware options.
996 * Input:
997 * ha = adapter block pointer.
998 * fwopt = pointer for firmware options.
1000 * Returns:
1001 * qla2x00 local function return status code.
1003 * Context:
1004 * Kernel context.
1007 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1009 int rval;
1010 mbx_cmd_t mc;
1011 mbx_cmd_t *mcp = &mc;
1013 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1014 "Entered %s.\n", __func__);
1016 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1017 mcp->out_mb = MBX_0;
1018 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1019 mcp->tov = MBX_TOV_SECONDS;
1020 mcp->flags = 0;
1021 rval = qla2x00_mailbox_command(vha, mcp);
1023 if (rval != QLA_SUCCESS) {
1024 /*EMPTY*/
1025 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1026 } else {
1027 fwopts[0] = mcp->mb[0];
1028 fwopts[1] = mcp->mb[1];
1029 fwopts[2] = mcp->mb[2];
1030 fwopts[3] = mcp->mb[3];
1032 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1033 "Done %s.\n", __func__);
1036 return rval;
1041 * qla2x00_set_fw_options
1042 * Set firmware options.
1044 * Input:
1045 * ha = adapter block pointer.
1046 * fwopt = pointer for firmware options.
1048 * Returns:
1049 * qla2x00 local function return status code.
1051 * Context:
1052 * Kernel context.
1055 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1057 int rval;
1058 mbx_cmd_t mc;
1059 mbx_cmd_t *mcp = &mc;
1061 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1062 "Entered %s.\n", __func__);
1064 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1065 mcp->mb[1] = fwopts[1];
1066 mcp->mb[2] = fwopts[2];
1067 mcp->mb[3] = fwopts[3];
1068 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1069 mcp->in_mb = MBX_0;
1070 if (IS_FWI2_CAPABLE(vha->hw)) {
1071 mcp->in_mb |= MBX_1;
1072 mcp->mb[10] = fwopts[10];
1073 mcp->out_mb |= MBX_10;
1074 } else {
1075 mcp->mb[10] = fwopts[10];
1076 mcp->mb[11] = fwopts[11];
1077 mcp->mb[12] = 0; /* Undocumented, but used */
1078 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1080 mcp->tov = MBX_TOV_SECONDS;
1081 mcp->flags = 0;
1082 rval = qla2x00_mailbox_command(vha, mcp);
1084 fwopts[0] = mcp->mb[0];
1086 if (rval != QLA_SUCCESS) {
1087 /*EMPTY*/
1088 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1089 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1090 } else {
1091 /*EMPTY*/
1092 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1093 "Done %s.\n", __func__);
1096 return rval;
1100 * qla2x00_mbx_reg_test
1101 * Mailbox register wrap test.
1103 * Input:
1104 * ha = adapter block pointer.
1105 * TARGET_QUEUE_LOCK must be released.
1106 * ADAPTER_STATE_LOCK must be released.
1108 * Returns:
1109 * qla2x00 local function return status code.
1111 * Context:
1112 * Kernel context.
1115 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1117 int rval;
1118 mbx_cmd_t mc;
1119 mbx_cmd_t *mcp = &mc;
1121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1122 "Entered %s.\n", __func__);
1124 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1125 mcp->mb[1] = 0xAAAA;
1126 mcp->mb[2] = 0x5555;
1127 mcp->mb[3] = 0xAA55;
1128 mcp->mb[4] = 0x55AA;
1129 mcp->mb[5] = 0xA5A5;
1130 mcp->mb[6] = 0x5A5A;
1131 mcp->mb[7] = 0x2525;
1132 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1133 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1134 mcp->tov = MBX_TOV_SECONDS;
1135 mcp->flags = 0;
1136 rval = qla2x00_mailbox_command(vha, mcp);
1138 if (rval == QLA_SUCCESS) {
1139 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1140 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1141 rval = QLA_FUNCTION_FAILED;
1142 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1143 mcp->mb[7] != 0x2525)
1144 rval = QLA_FUNCTION_FAILED;
1147 if (rval != QLA_SUCCESS) {
1148 /*EMPTY*/
1149 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1150 } else {
1151 /*EMPTY*/
1152 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1153 "Done %s.\n", __func__);
1156 return rval;
1160 * qla2x00_verify_checksum
1161 * Verify firmware checksum.
1163 * Input:
1164 * ha = adapter block pointer.
1165 * TARGET_QUEUE_LOCK must be released.
1166 * ADAPTER_STATE_LOCK must be released.
1168 * Returns:
1169 * qla2x00 local function return status code.
1171 * Context:
1172 * Kernel context.
1175 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1177 int rval;
1178 mbx_cmd_t mc;
1179 mbx_cmd_t *mcp = &mc;
1181 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1182 "Entered %s.\n", __func__);
1184 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1185 mcp->out_mb = MBX_0;
1186 mcp->in_mb = MBX_0;
1187 if (IS_FWI2_CAPABLE(vha->hw)) {
1188 mcp->mb[1] = MSW(risc_addr);
1189 mcp->mb[2] = LSW(risc_addr);
1190 mcp->out_mb |= MBX_2|MBX_1;
1191 mcp->in_mb |= MBX_2|MBX_1;
1192 } else {
1193 mcp->mb[1] = LSW(risc_addr);
1194 mcp->out_mb |= MBX_1;
1195 mcp->in_mb |= MBX_1;
1198 mcp->tov = MBX_TOV_SECONDS;
1199 mcp->flags = 0;
1200 rval = qla2x00_mailbox_command(vha, mcp);
1202 if (rval != QLA_SUCCESS) {
1203 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1204 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1205 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1206 } else {
1207 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1208 "Done %s.\n", __func__);
1211 return rval;
1215 * qla2x00_issue_iocb
1216 * Issue IOCB using mailbox command
1218 * Input:
1219 * ha = adapter state pointer.
1220 * buffer = buffer pointer.
1221 * phys_addr = physical address of buffer.
1222 * size = size of buffer.
1223 * TARGET_QUEUE_LOCK must be released.
1224 * ADAPTER_STATE_LOCK must be released.
1226 * Returns:
1227 * qla2x00 local function return status code.
1229 * Context:
1230 * Kernel context.
1233 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1234 dma_addr_t phys_addr, size_t size, uint32_t tov)
1236 int rval;
1237 mbx_cmd_t mc;
1238 mbx_cmd_t *mcp = &mc;
1240 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1241 "Entered %s.\n", __func__);
1243 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1244 mcp->mb[1] = 0;
1245 mcp->mb[2] = MSW(phys_addr);
1246 mcp->mb[3] = LSW(phys_addr);
1247 mcp->mb[6] = MSW(MSD(phys_addr));
1248 mcp->mb[7] = LSW(MSD(phys_addr));
1249 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1250 mcp->in_mb = MBX_2|MBX_0;
1251 mcp->tov = tov;
1252 mcp->flags = 0;
1253 rval = qla2x00_mailbox_command(vha, mcp);
1255 if (rval != QLA_SUCCESS) {
1256 /*EMPTY*/
1257 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1258 } else {
1259 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1261 /* Mask reserved bits. */
1262 sts_entry->entry_status &=
1263 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1264 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1265 "Done %s.\n", __func__);
1268 return rval;
1272 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1273 size_t size)
1275 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1276 MBX_TOV_SECONDS);
1280 * qla2x00_abort_command
1281 * Abort command aborts a specified IOCB.
1283 * Input:
1284 * ha = adapter block pointer.
1285 * sp = SB structure pointer.
1287 * Returns:
1288 * qla2x00 local function return status code.
1290 * Context:
1291 * Kernel context.
1294 qla2x00_abort_command(srb_t *sp)
1296 unsigned long flags = 0;
1297 int rval;
1298 uint32_t handle = 0;
1299 mbx_cmd_t mc;
1300 mbx_cmd_t *mcp = &mc;
1301 fc_port_t *fcport = sp->fcport;
1302 scsi_qla_host_t *vha = fcport->vha;
1303 struct qla_hw_data *ha = vha->hw;
1304 struct req_que *req;
1305 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1308 "Entered %s.\n", __func__);
1310 if (vha->flags.qpairs_available && sp->qpair)
1311 req = sp->qpair->req;
1312 else
1313 req = vha->req;
1315 spin_lock_irqsave(&ha->hardware_lock, flags);
1316 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1317 if (req->outstanding_cmds[handle] == sp)
1318 break;
1320 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1322 if (handle == req->num_outstanding_cmds) {
1323 /* command not found */
1324 return QLA_FUNCTION_FAILED;
1327 mcp->mb[0] = MBC_ABORT_COMMAND;
1328 if (HAS_EXTENDED_IDS(ha))
1329 mcp->mb[1] = fcport->loop_id;
1330 else
1331 mcp->mb[1] = fcport->loop_id << 8;
1332 mcp->mb[2] = (uint16_t)handle;
1333 mcp->mb[3] = (uint16_t)(handle >> 16);
1334 mcp->mb[6] = (uint16_t)cmd->device->lun;
1335 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1336 mcp->in_mb = MBX_0;
1337 mcp->tov = MBX_TOV_SECONDS;
1338 mcp->flags = 0;
1339 rval = qla2x00_mailbox_command(vha, mcp);
1341 if (rval != QLA_SUCCESS) {
1342 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1343 } else {
1344 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1345 "Done %s.\n", __func__);
1348 return rval;
1352 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1354 int rval, rval2;
1355 mbx_cmd_t mc;
1356 mbx_cmd_t *mcp = &mc;
1357 scsi_qla_host_t *vha;
1358 struct req_que *req;
1359 struct rsp_que *rsp;
1361 l = l;
1362 vha = fcport->vha;
1364 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1365 "Entered %s.\n", __func__);
1367 req = vha->hw->req_q_map[0];
1368 rsp = req->rsp;
1369 mcp->mb[0] = MBC_ABORT_TARGET;
1370 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1371 if (HAS_EXTENDED_IDS(vha->hw)) {
1372 mcp->mb[1] = fcport->loop_id;
1373 mcp->mb[10] = 0;
1374 mcp->out_mb |= MBX_10;
1375 } else {
1376 mcp->mb[1] = fcport->loop_id << 8;
1378 mcp->mb[2] = vha->hw->loop_reset_delay;
1379 mcp->mb[9] = vha->vp_idx;
1381 mcp->in_mb = MBX_0;
1382 mcp->tov = MBX_TOV_SECONDS;
1383 mcp->flags = 0;
1384 rval = qla2x00_mailbox_command(vha, mcp);
1385 if (rval != QLA_SUCCESS) {
1386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1387 "Failed=%x.\n", rval);
1390 /* Issue marker IOCB. */
1391 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1392 MK_SYNC_ID);
1393 if (rval2 != QLA_SUCCESS) {
1394 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1395 "Failed to issue marker IOCB (%x).\n", rval2);
1396 } else {
1397 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1398 "Done %s.\n", __func__);
1401 return rval;
1405 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1407 int rval, rval2;
1408 mbx_cmd_t mc;
1409 mbx_cmd_t *mcp = &mc;
1410 scsi_qla_host_t *vha;
1411 struct req_que *req;
1412 struct rsp_que *rsp;
1414 vha = fcport->vha;
1416 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1417 "Entered %s.\n", __func__);
1419 req = vha->hw->req_q_map[0];
1420 rsp = req->rsp;
1421 mcp->mb[0] = MBC_LUN_RESET;
1422 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1423 if (HAS_EXTENDED_IDS(vha->hw))
1424 mcp->mb[1] = fcport->loop_id;
1425 else
1426 mcp->mb[1] = fcport->loop_id << 8;
1427 mcp->mb[2] = (u32)l;
1428 mcp->mb[3] = 0;
1429 mcp->mb[9] = vha->vp_idx;
1431 mcp->in_mb = MBX_0;
1432 mcp->tov = MBX_TOV_SECONDS;
1433 mcp->flags = 0;
1434 rval = qla2x00_mailbox_command(vha, mcp);
1435 if (rval != QLA_SUCCESS) {
1436 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1439 /* Issue marker IOCB. */
1440 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1441 MK_SYNC_ID_LUN);
1442 if (rval2 != QLA_SUCCESS) {
1443 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1444 "Failed to issue marker IOCB (%x).\n", rval2);
1445 } else {
1446 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1447 "Done %s.\n", __func__);
1450 return rval;
1454 * qla2x00_get_adapter_id
1455 * Get adapter ID and topology.
1457 * Input:
1458 * ha = adapter block pointer.
1459 * id = pointer for loop ID.
1460 * al_pa = pointer for AL_PA.
1461 * area = pointer for area.
1462 * domain = pointer for domain.
1463 * top = pointer for topology.
1464 * TARGET_QUEUE_LOCK must be released.
1465 * ADAPTER_STATE_LOCK must be released.
1467 * Returns:
1468 * qla2x00 local function return status code.
1470 * Context:
1471 * Kernel context.
1474 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1475 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1477 int rval;
1478 mbx_cmd_t mc;
1479 mbx_cmd_t *mcp = &mc;
1481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1482 "Entered %s.\n", __func__);
1484 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1485 mcp->mb[9] = vha->vp_idx;
1486 mcp->out_mb = MBX_9|MBX_0;
1487 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1488 if (IS_CNA_CAPABLE(vha->hw))
1489 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1490 if (IS_FWI2_CAPABLE(vha->hw))
1491 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1492 if (IS_QLA27XX(vha->hw))
1493 mcp->in_mb |= MBX_15;
1494 mcp->tov = MBX_TOV_SECONDS;
1495 mcp->flags = 0;
1496 rval = qla2x00_mailbox_command(vha, mcp);
1497 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1498 rval = QLA_COMMAND_ERROR;
1499 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1500 rval = QLA_INVALID_COMMAND;
1502 /* Return data. */
1503 *id = mcp->mb[1];
1504 *al_pa = LSB(mcp->mb[2]);
1505 *area = MSB(mcp->mb[2]);
1506 *domain = LSB(mcp->mb[3]);
1507 *top = mcp->mb[6];
1508 *sw_cap = mcp->mb[7];
1510 if (rval != QLA_SUCCESS) {
1511 /*EMPTY*/
1512 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1513 } else {
1514 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1515 "Done %s.\n", __func__);
1517 if (IS_CNA_CAPABLE(vha->hw)) {
1518 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1519 vha->fcoe_fcf_idx = mcp->mb[10];
1520 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1521 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1522 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1523 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1524 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1525 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1527 /* If FA-WWN supported */
1528 if (IS_FAWWN_CAPABLE(vha->hw)) {
1529 if (mcp->mb[7] & BIT_14) {
1530 vha->port_name[0] = MSB(mcp->mb[16]);
1531 vha->port_name[1] = LSB(mcp->mb[16]);
1532 vha->port_name[2] = MSB(mcp->mb[17]);
1533 vha->port_name[3] = LSB(mcp->mb[17]);
1534 vha->port_name[4] = MSB(mcp->mb[18]);
1535 vha->port_name[5] = LSB(mcp->mb[18]);
1536 vha->port_name[6] = MSB(mcp->mb[19]);
1537 vha->port_name[7] = LSB(mcp->mb[19]);
1538 fc_host_port_name(vha->host) =
1539 wwn_to_u64(vha->port_name);
1540 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1541 "FA-WWN acquired %016llx\n",
1542 wwn_to_u64(vha->port_name));
1546 if (IS_QLA27XX(vha->hw))
1547 vha->bbcr = mcp->mb[15];
1550 return rval;
1554 * qla2x00_get_retry_cnt
1555 * Get current firmware login retry count and delay.
1557 * Input:
1558 * ha = adapter block pointer.
1559 * retry_cnt = pointer to login retry count.
1560 * tov = pointer to login timeout value.
1562 * Returns:
1563 * qla2x00 local function return status code.
1565 * Context:
1566 * Kernel context.
1569 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1570 uint16_t *r_a_tov)
1572 int rval;
1573 uint16_t ratov;
1574 mbx_cmd_t mc;
1575 mbx_cmd_t *mcp = &mc;
1577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1578 "Entered %s.\n", __func__);
1580 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1581 mcp->out_mb = MBX_0;
1582 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1583 mcp->tov = MBX_TOV_SECONDS;
1584 mcp->flags = 0;
1585 rval = qla2x00_mailbox_command(vha, mcp);
1587 if (rval != QLA_SUCCESS) {
1588 /*EMPTY*/
1589 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1590 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1591 } else {
1592 /* Convert returned data and check our values. */
1593 *r_a_tov = mcp->mb[3] / 2;
1594 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1595 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1596 /* Update to the larger values */
1597 *retry_cnt = (uint8_t)mcp->mb[1];
1598 *tov = ratov;
1601 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1602 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1605 return rval;
1609 * qla2x00_init_firmware
1610 * Initialize adapter firmware.
1612 * Input:
1613 * ha = adapter block pointer.
1614 * dptr = Initialization control block pointer.
1615 * size = size of initialization control block.
1616 * TARGET_QUEUE_LOCK must be released.
1617 * ADAPTER_STATE_LOCK must be released.
1619 * Returns:
1620 * qla2x00 local function return status code.
1622 * Context:
1623 * Kernel context.
1626 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1628 int rval;
1629 mbx_cmd_t mc;
1630 mbx_cmd_t *mcp = &mc;
1631 struct qla_hw_data *ha = vha->hw;
1633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1634 "Entered %s.\n", __func__);
1636 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1637 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1638 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1640 if (ha->flags.npiv_supported)
1641 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1642 else
1643 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1645 mcp->mb[1] = 0;
1646 mcp->mb[2] = MSW(ha->init_cb_dma);
1647 mcp->mb[3] = LSW(ha->init_cb_dma);
1648 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1649 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1650 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1651 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1652 mcp->mb[1] = BIT_0;
1653 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1654 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1655 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1656 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1657 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1658 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1660 /* 1 and 2 should normally be captured. */
1661 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1662 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1663 /* mb3 is additional info about the installed SFP. */
1664 mcp->in_mb |= MBX_3;
1665 mcp->buf_size = size;
1666 mcp->flags = MBX_DMA_OUT;
1667 mcp->tov = MBX_TOV_SECONDS;
1668 rval = qla2x00_mailbox_command(vha, mcp);
1670 if (rval != QLA_SUCCESS) {
1671 /*EMPTY*/
1672 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1673 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1674 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1675 } else {
1676 /*EMPTY*/
1677 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1678 "Done %s.\n", __func__);
1681 return rval;
1686 * qla2x00_get_port_database
1687 * Issue normal/enhanced get port database mailbox command
1688 * and copy device name as necessary.
1690 * Input:
1691 * ha = adapter state pointer.
1692 * dev = structure pointer.
1693 * opt = enhanced cmd option byte.
1695 * Returns:
1696 * qla2x00 local function return status code.
1698 * Context:
1699 * Kernel context.
1702 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1704 int rval;
1705 mbx_cmd_t mc;
1706 mbx_cmd_t *mcp = &mc;
1707 port_database_t *pd;
1708 struct port_database_24xx *pd24;
1709 dma_addr_t pd_dma;
1710 struct qla_hw_data *ha = vha->hw;
1712 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1713 "Entered %s.\n", __func__);
1715 pd24 = NULL;
1716 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1717 if (pd == NULL) {
1718 ql_log(ql_log_warn, vha, 0x1050,
1719 "Failed to allocate port database structure.\n");
1720 return QLA_MEMORY_ALLOC_FAILED;
1722 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1724 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1725 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1726 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1727 mcp->mb[2] = MSW(pd_dma);
1728 mcp->mb[3] = LSW(pd_dma);
1729 mcp->mb[6] = MSW(MSD(pd_dma));
1730 mcp->mb[7] = LSW(MSD(pd_dma));
1731 mcp->mb[9] = vha->vp_idx;
1732 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1733 mcp->in_mb = MBX_0;
1734 if (IS_FWI2_CAPABLE(ha)) {
1735 mcp->mb[1] = fcport->loop_id;
1736 mcp->mb[10] = opt;
1737 mcp->out_mb |= MBX_10|MBX_1;
1738 mcp->in_mb |= MBX_1;
1739 } else if (HAS_EXTENDED_IDS(ha)) {
1740 mcp->mb[1] = fcport->loop_id;
1741 mcp->mb[10] = opt;
1742 mcp->out_mb |= MBX_10|MBX_1;
1743 } else {
1744 mcp->mb[1] = fcport->loop_id << 8 | opt;
1745 mcp->out_mb |= MBX_1;
1747 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1748 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1749 mcp->flags = MBX_DMA_IN;
1750 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1751 rval = qla2x00_mailbox_command(vha, mcp);
1752 if (rval != QLA_SUCCESS)
1753 goto gpd_error_out;
1755 if (IS_FWI2_CAPABLE(ha)) {
1756 uint64_t zero = 0;
1757 pd24 = (struct port_database_24xx *) pd;
1759 /* Check for logged in state. */
1760 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1761 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1762 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1763 "Unable to verify login-state (%x/%x) for "
1764 "loop_id %x.\n", pd24->current_login_state,
1765 pd24->last_login_state, fcport->loop_id);
1766 rval = QLA_FUNCTION_FAILED;
1767 goto gpd_error_out;
1770 if (fcport->loop_id == FC_NO_LOOP_ID ||
1771 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1772 memcmp(fcport->port_name, pd24->port_name, 8))) {
1773 /* We lost the device mid way. */
1774 rval = QLA_NOT_LOGGED_IN;
1775 goto gpd_error_out;
1778 /* Names are little-endian. */
1779 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1780 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1782 /* Get port_id of device. */
1783 fcport->d_id.b.domain = pd24->port_id[0];
1784 fcport->d_id.b.area = pd24->port_id[1];
1785 fcport->d_id.b.al_pa = pd24->port_id[2];
1786 fcport->d_id.b.rsvd_1 = 0;
1788 /* If not target must be initiator or unknown type. */
1789 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1790 fcport->port_type = FCT_INITIATOR;
1791 else
1792 fcport->port_type = FCT_TARGET;
1794 /* Passback COS information. */
1795 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1796 FC_COS_CLASS2 : FC_COS_CLASS3;
1798 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1799 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1800 } else {
1801 uint64_t zero = 0;
1803 /* Check for logged in state. */
1804 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1805 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1806 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1807 "Unable to verify login-state (%x/%x) - "
1808 "portid=%02x%02x%02x.\n", pd->master_state,
1809 pd->slave_state, fcport->d_id.b.domain,
1810 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1811 rval = QLA_FUNCTION_FAILED;
1812 goto gpd_error_out;
1815 if (fcport->loop_id == FC_NO_LOOP_ID ||
1816 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1817 memcmp(fcport->port_name, pd->port_name, 8))) {
1818 /* We lost the device mid way. */
1819 rval = QLA_NOT_LOGGED_IN;
1820 goto gpd_error_out;
1823 /* Names are little-endian. */
1824 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1825 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1827 /* Get port_id of device. */
1828 fcport->d_id.b.domain = pd->port_id[0];
1829 fcport->d_id.b.area = pd->port_id[3];
1830 fcport->d_id.b.al_pa = pd->port_id[2];
1831 fcport->d_id.b.rsvd_1 = 0;
1833 /* If not target must be initiator or unknown type. */
1834 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1835 fcport->port_type = FCT_INITIATOR;
1836 else
1837 fcport->port_type = FCT_TARGET;
1839 /* Passback COS information. */
1840 fcport->supported_classes = (pd->options & BIT_4) ?
1841 FC_COS_CLASS2: FC_COS_CLASS3;
1844 gpd_error_out:
1845 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1847 if (rval != QLA_SUCCESS) {
1848 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1849 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1850 mcp->mb[0], mcp->mb[1]);
1851 } else {
1852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1853 "Done %s.\n", __func__);
1856 return rval;
1860 * qla2x00_get_firmware_state
1861 * Get adapter firmware state.
1863 * Input:
1864 * ha = adapter block pointer.
1865 * dptr = pointer for firmware state.
1866 * TARGET_QUEUE_LOCK must be released.
1867 * ADAPTER_STATE_LOCK must be released.
1869 * Returns:
1870 * qla2x00 local function return status code.
1872 * Context:
1873 * Kernel context.
1876 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1878 int rval;
1879 mbx_cmd_t mc;
1880 mbx_cmd_t *mcp = &mc;
1882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1883 "Entered %s.\n", __func__);
1885 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1886 mcp->out_mb = MBX_0;
1887 if (IS_FWI2_CAPABLE(vha->hw))
1888 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1889 else
1890 mcp->in_mb = MBX_1|MBX_0;
1891 mcp->tov = MBX_TOV_SECONDS;
1892 mcp->flags = 0;
1893 rval = qla2x00_mailbox_command(vha, mcp);
1895 /* Return firmware states. */
1896 states[0] = mcp->mb[1];
1897 if (IS_FWI2_CAPABLE(vha->hw)) {
1898 states[1] = mcp->mb[2];
1899 states[2] = mcp->mb[3]; /* SFP info */
1900 states[3] = mcp->mb[4];
1901 states[4] = mcp->mb[5];
1902 states[5] = mcp->mb[6]; /* DPORT status */
1905 if (rval != QLA_SUCCESS) {
1906 /*EMPTY*/
1907 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1908 } else {
1909 /*EMPTY*/
1910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1911 "Done %s.\n", __func__);
1914 return rval;
1918 * qla2x00_get_port_name
1919 * Issue get port name mailbox command.
1920 * Returned name is in big endian format.
1922 * Input:
1923 * ha = adapter block pointer.
1924 * loop_id = loop ID of device.
1925 * name = pointer for name.
1926 * TARGET_QUEUE_LOCK must be released.
1927 * ADAPTER_STATE_LOCK must be released.
1929 * Returns:
1930 * qla2x00 local function return status code.
1932 * Context:
1933 * Kernel context.
1936 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1937 uint8_t opt)
1939 int rval;
1940 mbx_cmd_t mc;
1941 mbx_cmd_t *mcp = &mc;
1943 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1944 "Entered %s.\n", __func__);
1946 mcp->mb[0] = MBC_GET_PORT_NAME;
1947 mcp->mb[9] = vha->vp_idx;
1948 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1949 if (HAS_EXTENDED_IDS(vha->hw)) {
1950 mcp->mb[1] = loop_id;
1951 mcp->mb[10] = opt;
1952 mcp->out_mb |= MBX_10;
1953 } else {
1954 mcp->mb[1] = loop_id << 8 | opt;
1957 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1958 mcp->tov = MBX_TOV_SECONDS;
1959 mcp->flags = 0;
1960 rval = qla2x00_mailbox_command(vha, mcp);
1962 if (rval != QLA_SUCCESS) {
1963 /*EMPTY*/
1964 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1965 } else {
1966 if (name != NULL) {
1967 /* This function returns name in big endian. */
1968 name[0] = MSB(mcp->mb[2]);
1969 name[1] = LSB(mcp->mb[2]);
1970 name[2] = MSB(mcp->mb[3]);
1971 name[3] = LSB(mcp->mb[3]);
1972 name[4] = MSB(mcp->mb[6]);
1973 name[5] = LSB(mcp->mb[6]);
1974 name[6] = MSB(mcp->mb[7]);
1975 name[7] = LSB(mcp->mb[7]);
1978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1979 "Done %s.\n", __func__);
1982 return rval;
1986 * qla24xx_link_initialization
1987 * Issue link initialization mailbox command.
1989 * Input:
1990 * ha = adapter block pointer.
1991 * TARGET_QUEUE_LOCK must be released.
1992 * ADAPTER_STATE_LOCK must be released.
1994 * Returns:
1995 * qla2x00 local function return status code.
1997 * Context:
1998 * Kernel context.
2001 qla24xx_link_initialize(scsi_qla_host_t *vha)
2003 int rval;
2004 mbx_cmd_t mc;
2005 mbx_cmd_t *mcp = &mc;
2007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2008 "Entered %s.\n", __func__);
2010 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2011 return QLA_FUNCTION_FAILED;
2013 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2014 mcp->mb[1] = BIT_4;
2015 if (vha->hw->operating_mode == LOOP)
2016 mcp->mb[1] |= BIT_6;
2017 else
2018 mcp->mb[1] |= BIT_5;
2019 mcp->mb[2] = 0;
2020 mcp->mb[3] = 0;
2021 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2022 mcp->in_mb = MBX_0;
2023 mcp->tov = MBX_TOV_SECONDS;
2024 mcp->flags = 0;
2025 rval = qla2x00_mailbox_command(vha, mcp);
2027 if (rval != QLA_SUCCESS) {
2028 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2029 } else {
2030 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2031 "Done %s.\n", __func__);
2034 return rval;
2038 * qla2x00_lip_reset
2039 * Issue LIP reset mailbox command.
2041 * Input:
2042 * ha = adapter block pointer.
2043 * TARGET_QUEUE_LOCK must be released.
2044 * ADAPTER_STATE_LOCK must be released.
2046 * Returns:
2047 * qla2x00 local function return status code.
2049 * Context:
2050 * Kernel context.
2053 qla2x00_lip_reset(scsi_qla_host_t *vha)
2055 int rval;
2056 mbx_cmd_t mc;
2057 mbx_cmd_t *mcp = &mc;
2059 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2060 "Entered %s.\n", __func__);
2062 if (IS_CNA_CAPABLE(vha->hw)) {
2063 /* Logout across all FCFs. */
2064 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2065 mcp->mb[1] = BIT_1;
2066 mcp->mb[2] = 0;
2067 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2068 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2069 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2070 mcp->mb[1] = BIT_6;
2071 mcp->mb[2] = 0;
2072 mcp->mb[3] = vha->hw->loop_reset_delay;
2073 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2074 } else {
2075 mcp->mb[0] = MBC_LIP_RESET;
2076 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2077 if (HAS_EXTENDED_IDS(vha->hw)) {
2078 mcp->mb[1] = 0x00ff;
2079 mcp->mb[10] = 0;
2080 mcp->out_mb |= MBX_10;
2081 } else {
2082 mcp->mb[1] = 0xff00;
2084 mcp->mb[2] = vha->hw->loop_reset_delay;
2085 mcp->mb[3] = 0;
2087 mcp->in_mb = MBX_0;
2088 mcp->tov = MBX_TOV_SECONDS;
2089 mcp->flags = 0;
2090 rval = qla2x00_mailbox_command(vha, mcp);
2092 if (rval != QLA_SUCCESS) {
2093 /*EMPTY*/
2094 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2095 } else {
2096 /*EMPTY*/
2097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2098 "Done %s.\n", __func__);
2101 return rval;
2105 * qla2x00_send_sns
2106 * Send SNS command.
2108 * Input:
2109 * ha = adapter block pointer.
2110 * sns = pointer for command.
2111 * cmd_size = command size.
2112 * buf_size = response/command size.
2113 * TARGET_QUEUE_LOCK must be released.
2114 * ADAPTER_STATE_LOCK must be released.
2116 * Returns:
2117 * qla2x00 local function return status code.
2119 * Context:
2120 * Kernel context.
2123 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2124 uint16_t cmd_size, size_t buf_size)
2126 int rval;
2127 mbx_cmd_t mc;
2128 mbx_cmd_t *mcp = &mc;
2130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2131 "Entered %s.\n", __func__);
2133 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2134 "Retry cnt=%d ratov=%d total tov=%d.\n",
2135 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2137 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2138 mcp->mb[1] = cmd_size;
2139 mcp->mb[2] = MSW(sns_phys_address);
2140 mcp->mb[3] = LSW(sns_phys_address);
2141 mcp->mb[6] = MSW(MSD(sns_phys_address));
2142 mcp->mb[7] = LSW(MSD(sns_phys_address));
2143 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2144 mcp->in_mb = MBX_0|MBX_1;
2145 mcp->buf_size = buf_size;
2146 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2147 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2148 rval = qla2x00_mailbox_command(vha, mcp);
2150 if (rval != QLA_SUCCESS) {
2151 /*EMPTY*/
2152 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2153 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2154 rval, mcp->mb[0], mcp->mb[1]);
2155 } else {
2156 /*EMPTY*/
2157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2158 "Done %s.\n", __func__);
2161 return rval;
2165 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2166 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2168 int rval;
2170 struct logio_entry_24xx *lg;
2171 dma_addr_t lg_dma;
2172 uint32_t iop[2];
2173 struct qla_hw_data *ha = vha->hw;
2174 struct req_que *req;
2176 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2177 "Entered %s.\n", __func__);
2179 if (vha->vp_idx && vha->qpair)
2180 req = vha->qpair->req;
2181 else
2182 req = ha->req_q_map[0];
2184 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2185 if (lg == NULL) {
2186 ql_log(ql_log_warn, vha, 0x1062,
2187 "Failed to allocate login IOCB.\n");
2188 return QLA_MEMORY_ALLOC_FAILED;
2190 memset(lg, 0, sizeof(struct logio_entry_24xx));
2192 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2193 lg->entry_count = 1;
2194 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2195 lg->nport_handle = cpu_to_le16(loop_id);
2196 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2197 if (opt & BIT_0)
2198 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2199 if (opt & BIT_1)
2200 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2201 lg->port_id[0] = al_pa;
2202 lg->port_id[1] = area;
2203 lg->port_id[2] = domain;
2204 lg->vp_index = vha->vp_idx;
2205 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2206 (ha->r_a_tov / 10 * 2) + 2);
2207 if (rval != QLA_SUCCESS) {
2208 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2209 "Failed to issue login IOCB (%x).\n", rval);
2210 } else if (lg->entry_status != 0) {
2211 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2212 "Failed to complete IOCB -- error status (%x).\n",
2213 lg->entry_status);
2214 rval = QLA_FUNCTION_FAILED;
2215 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2216 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2217 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2219 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2220 "Failed to complete IOCB -- completion status (%x) "
2221 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2222 iop[0], iop[1]);
2224 switch (iop[0]) {
2225 case LSC_SCODE_PORTID_USED:
2226 mb[0] = MBS_PORT_ID_USED;
2227 mb[1] = LSW(iop[1]);
2228 break;
2229 case LSC_SCODE_NPORT_USED:
2230 mb[0] = MBS_LOOP_ID_USED;
2231 break;
2232 case LSC_SCODE_NOLINK:
2233 case LSC_SCODE_NOIOCB:
2234 case LSC_SCODE_NOXCB:
2235 case LSC_SCODE_CMD_FAILED:
2236 case LSC_SCODE_NOFABRIC:
2237 case LSC_SCODE_FW_NOT_READY:
2238 case LSC_SCODE_NOT_LOGGED_IN:
2239 case LSC_SCODE_NOPCB:
2240 case LSC_SCODE_ELS_REJECT:
2241 case LSC_SCODE_CMD_PARAM_ERR:
2242 case LSC_SCODE_NONPORT:
2243 case LSC_SCODE_LOGGED_IN:
2244 case LSC_SCODE_NOFLOGI_ACC:
2245 default:
2246 mb[0] = MBS_COMMAND_ERROR;
2247 break;
2249 } else {
2250 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2251 "Done %s.\n", __func__);
2253 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2255 mb[0] = MBS_COMMAND_COMPLETE;
2256 mb[1] = 0;
2257 if (iop[0] & BIT_4) {
2258 if (iop[0] & BIT_8)
2259 mb[1] |= BIT_1;
2260 } else
2261 mb[1] = BIT_0;
2263 /* Passback COS information. */
2264 mb[10] = 0;
2265 if (lg->io_parameter[7] || lg->io_parameter[8])
2266 mb[10] |= BIT_0; /* Class 2. */
2267 if (lg->io_parameter[9] || lg->io_parameter[10])
2268 mb[10] |= BIT_1; /* Class 3. */
2269 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2270 mb[10] |= BIT_7; /* Confirmed Completion
2271 * Allowed
2275 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2277 return rval;
2281 * qla2x00_login_fabric
2282 * Issue login fabric port mailbox command.
2284 * Input:
2285 * ha = adapter block pointer.
2286 * loop_id = device loop ID.
2287 * domain = device domain.
2288 * area = device area.
2289 * al_pa = device AL_PA.
2290 * status = pointer for return status.
2291 * opt = command options.
2292 * TARGET_QUEUE_LOCK must be released.
2293 * ADAPTER_STATE_LOCK must be released.
2295 * Returns:
2296 * qla2x00 local function return status code.
2298 * Context:
2299 * Kernel context.
2302 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2303 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2305 int rval;
2306 mbx_cmd_t mc;
2307 mbx_cmd_t *mcp = &mc;
2308 struct qla_hw_data *ha = vha->hw;
2310 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2311 "Entered %s.\n", __func__);
2313 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2314 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2315 if (HAS_EXTENDED_IDS(ha)) {
2316 mcp->mb[1] = loop_id;
2317 mcp->mb[10] = opt;
2318 mcp->out_mb |= MBX_10;
2319 } else {
2320 mcp->mb[1] = (loop_id << 8) | opt;
2322 mcp->mb[2] = domain;
2323 mcp->mb[3] = area << 8 | al_pa;
2325 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2326 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2327 mcp->flags = 0;
2328 rval = qla2x00_mailbox_command(vha, mcp);
2330 /* Return mailbox statuses. */
2331 if (mb != NULL) {
2332 mb[0] = mcp->mb[0];
2333 mb[1] = mcp->mb[1];
2334 mb[2] = mcp->mb[2];
2335 mb[6] = mcp->mb[6];
2336 mb[7] = mcp->mb[7];
2337 /* COS retrieved from Get-Port-Database mailbox command. */
2338 mb[10] = 0;
2341 if (rval != QLA_SUCCESS) {
2342 /* RLU tmp code: need to change main mailbox_command function to
2343 * return ok even when the mailbox completion value is not
2344 * SUCCESS. The caller needs to be responsible to interpret
2345 * the return values of this mailbox command if we're not
2346 * to change too much of the existing code.
2348 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2349 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2350 mcp->mb[0] == 0x4006)
2351 rval = QLA_SUCCESS;
2353 /*EMPTY*/
2354 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2355 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2356 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2357 } else {
2358 /*EMPTY*/
2359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2360 "Done %s.\n", __func__);
2363 return rval;
2367 * qla2x00_login_local_device
2368 * Issue login loop port mailbox command.
2370 * Input:
2371 * ha = adapter block pointer.
2372 * loop_id = device loop ID.
2373 * opt = command options.
2375 * Returns:
2376 * Return status code.
2378 * Context:
2379 * Kernel context.
2383 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2384 uint16_t *mb_ret, uint8_t opt)
2386 int rval;
2387 mbx_cmd_t mc;
2388 mbx_cmd_t *mcp = &mc;
2389 struct qla_hw_data *ha = vha->hw;
2391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2392 "Entered %s.\n", __func__);
2394 if (IS_FWI2_CAPABLE(ha))
2395 return qla24xx_login_fabric(vha, fcport->loop_id,
2396 fcport->d_id.b.domain, fcport->d_id.b.area,
2397 fcport->d_id.b.al_pa, mb_ret, opt);
2399 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2400 if (HAS_EXTENDED_IDS(ha))
2401 mcp->mb[1] = fcport->loop_id;
2402 else
2403 mcp->mb[1] = fcport->loop_id << 8;
2404 mcp->mb[2] = opt;
2405 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2406 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2407 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2408 mcp->flags = 0;
2409 rval = qla2x00_mailbox_command(vha, mcp);
2411 /* Return mailbox statuses. */
2412 if (mb_ret != NULL) {
2413 mb_ret[0] = mcp->mb[0];
2414 mb_ret[1] = mcp->mb[1];
2415 mb_ret[6] = mcp->mb[6];
2416 mb_ret[7] = mcp->mb[7];
2419 if (rval != QLA_SUCCESS) {
2420 /* AV tmp code: need to change main mailbox_command function to
2421 * return ok even when the mailbox completion value is not
2422 * SUCCESS. The caller needs to be responsible to interpret
2423 * the return values of this mailbox command if we're not
2424 * to change too much of the existing code.
2426 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2427 rval = QLA_SUCCESS;
2429 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2430 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2431 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2432 } else {
2433 /*EMPTY*/
2434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2435 "Done %s.\n", __func__);
2438 return (rval);
2442 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2443 uint8_t area, uint8_t al_pa)
2445 int rval;
2446 struct logio_entry_24xx *lg;
2447 dma_addr_t lg_dma;
2448 struct qla_hw_data *ha = vha->hw;
2449 struct req_que *req;
2451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2452 "Entered %s.\n", __func__);
2454 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2455 if (lg == NULL) {
2456 ql_log(ql_log_warn, vha, 0x106e,
2457 "Failed to allocate logout IOCB.\n");
2458 return QLA_MEMORY_ALLOC_FAILED;
2460 memset(lg, 0, sizeof(struct logio_entry_24xx));
2462 req = vha->req;
2463 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2464 lg->entry_count = 1;
2465 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2466 lg->nport_handle = cpu_to_le16(loop_id);
2467 lg->control_flags =
2468 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2469 LCF_FREE_NPORT);
2470 lg->port_id[0] = al_pa;
2471 lg->port_id[1] = area;
2472 lg->port_id[2] = domain;
2473 lg->vp_index = vha->vp_idx;
2474 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2475 (ha->r_a_tov / 10 * 2) + 2);
2476 if (rval != QLA_SUCCESS) {
2477 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2478 "Failed to issue logout IOCB (%x).\n", rval);
2479 } else if (lg->entry_status != 0) {
2480 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2481 "Failed to complete IOCB -- error status (%x).\n",
2482 lg->entry_status);
2483 rval = QLA_FUNCTION_FAILED;
2484 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2485 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2486 "Failed to complete IOCB -- completion status (%x) "
2487 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2488 le32_to_cpu(lg->io_parameter[0]),
2489 le32_to_cpu(lg->io_parameter[1]));
2490 } else {
2491 /*EMPTY*/
2492 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2493 "Done %s.\n", __func__);
2496 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2498 return rval;
2502 * qla2x00_fabric_logout
2503 * Issue logout fabric port mailbox command.
2505 * Input:
2506 * ha = adapter block pointer.
2507 * loop_id = device loop ID.
2508 * TARGET_QUEUE_LOCK must be released.
2509 * ADAPTER_STATE_LOCK must be released.
2511 * Returns:
2512 * qla2x00 local function return status code.
2514 * Context:
2515 * Kernel context.
2518 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2519 uint8_t area, uint8_t al_pa)
2521 int rval;
2522 mbx_cmd_t mc;
2523 mbx_cmd_t *mcp = &mc;
2525 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2526 "Entered %s.\n", __func__);
2528 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2529 mcp->out_mb = MBX_1|MBX_0;
2530 if (HAS_EXTENDED_IDS(vha->hw)) {
2531 mcp->mb[1] = loop_id;
2532 mcp->mb[10] = 0;
2533 mcp->out_mb |= MBX_10;
2534 } else {
2535 mcp->mb[1] = loop_id << 8;
2538 mcp->in_mb = MBX_1|MBX_0;
2539 mcp->tov = MBX_TOV_SECONDS;
2540 mcp->flags = 0;
2541 rval = qla2x00_mailbox_command(vha, mcp);
2543 if (rval != QLA_SUCCESS) {
2544 /*EMPTY*/
2545 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2546 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2547 } else {
2548 /*EMPTY*/
2549 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2550 "Done %s.\n", __func__);
2553 return rval;
2557 * qla2x00_full_login_lip
2558 * Issue full login LIP mailbox command.
2560 * Input:
2561 * ha = adapter block pointer.
2562 * TARGET_QUEUE_LOCK must be released.
2563 * ADAPTER_STATE_LOCK must be released.
2565 * Returns:
2566 * qla2x00 local function return status code.
2568 * Context:
2569 * Kernel context.
2572 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2574 int rval;
2575 mbx_cmd_t mc;
2576 mbx_cmd_t *mcp = &mc;
2578 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2579 "Entered %s.\n", __func__);
2581 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2582 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2583 mcp->mb[2] = 0;
2584 mcp->mb[3] = 0;
2585 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2586 mcp->in_mb = MBX_0;
2587 mcp->tov = MBX_TOV_SECONDS;
2588 mcp->flags = 0;
2589 rval = qla2x00_mailbox_command(vha, mcp);
2591 if (rval != QLA_SUCCESS) {
2592 /*EMPTY*/
2593 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2594 } else {
2595 /*EMPTY*/
2596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2597 "Done %s.\n", __func__);
2600 return rval;
2604 * qla2x00_get_id_list
2606 * Input:
2607 * ha = adapter block pointer.
2609 * Returns:
2610 * qla2x00 local function return status code.
2612 * Context:
2613 * Kernel context.
2616 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2617 uint16_t *entries)
2619 int rval;
2620 mbx_cmd_t mc;
2621 mbx_cmd_t *mcp = &mc;
2623 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2624 "Entered %s.\n", __func__);
2626 if (id_list == NULL)
2627 return QLA_FUNCTION_FAILED;
2629 mcp->mb[0] = MBC_GET_ID_LIST;
2630 mcp->out_mb = MBX_0;
2631 if (IS_FWI2_CAPABLE(vha->hw)) {
2632 mcp->mb[2] = MSW(id_list_dma);
2633 mcp->mb[3] = LSW(id_list_dma);
2634 mcp->mb[6] = MSW(MSD(id_list_dma));
2635 mcp->mb[7] = LSW(MSD(id_list_dma));
2636 mcp->mb[8] = 0;
2637 mcp->mb[9] = vha->vp_idx;
2638 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2639 } else {
2640 mcp->mb[1] = MSW(id_list_dma);
2641 mcp->mb[2] = LSW(id_list_dma);
2642 mcp->mb[3] = MSW(MSD(id_list_dma));
2643 mcp->mb[6] = LSW(MSD(id_list_dma));
2644 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2646 mcp->in_mb = MBX_1|MBX_0;
2647 mcp->tov = MBX_TOV_SECONDS;
2648 mcp->flags = 0;
2649 rval = qla2x00_mailbox_command(vha, mcp);
2651 if (rval != QLA_SUCCESS) {
2652 /*EMPTY*/
2653 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2654 } else {
2655 *entries = mcp->mb[1];
2656 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2657 "Done %s.\n", __func__);
2660 return rval;
2664 * qla2x00_get_resource_cnts
2665 * Get current firmware resource counts.
2667 * Input:
2668 * ha = adapter block pointer.
2670 * Returns:
2671 * qla2x00 local function return status code.
2673 * Context:
2674 * Kernel context.
2677 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2679 struct qla_hw_data *ha = vha->hw;
2680 int rval;
2681 mbx_cmd_t mc;
2682 mbx_cmd_t *mcp = &mc;
2684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2685 "Entered %s.\n", __func__);
2687 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2688 mcp->out_mb = MBX_0;
2689 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2690 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2691 mcp->in_mb |= MBX_12;
2692 mcp->tov = MBX_TOV_SECONDS;
2693 mcp->flags = 0;
2694 rval = qla2x00_mailbox_command(vha, mcp);
2696 if (rval != QLA_SUCCESS) {
2697 /*EMPTY*/
2698 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2699 "Failed mb[0]=%x.\n", mcp->mb[0]);
2700 } else {
2701 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2702 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2703 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2704 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2705 mcp->mb[11], mcp->mb[12]);
2707 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2708 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2709 ha->cur_fw_xcb_count = mcp->mb[3];
2710 ha->orig_fw_xcb_count = mcp->mb[6];
2711 ha->cur_fw_iocb_count = mcp->mb[7];
2712 ha->orig_fw_iocb_count = mcp->mb[10];
2713 if (ha->flags.npiv_supported)
2714 ha->max_npiv_vports = mcp->mb[11];
2715 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2716 ha->fw_max_fcf_count = mcp->mb[12];
2719 return (rval);
2723 * qla2x00_get_fcal_position_map
2724 * Get FCAL (LILP) position map using mailbox command
2726 * Input:
2727 * ha = adapter state pointer.
2728 * pos_map = buffer pointer (can be NULL).
2730 * Returns:
2731 * qla2x00 local function return status code.
2733 * Context:
2734 * Kernel context.
2737 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2739 int rval;
2740 mbx_cmd_t mc;
2741 mbx_cmd_t *mcp = &mc;
2742 char *pmap;
2743 dma_addr_t pmap_dma;
2744 struct qla_hw_data *ha = vha->hw;
2746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2747 "Entered %s.\n", __func__);
2749 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2750 if (pmap == NULL) {
2751 ql_log(ql_log_warn, vha, 0x1080,
2752 "Memory alloc failed.\n");
2753 return QLA_MEMORY_ALLOC_FAILED;
2755 memset(pmap, 0, FCAL_MAP_SIZE);
2757 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2758 mcp->mb[2] = MSW(pmap_dma);
2759 mcp->mb[3] = LSW(pmap_dma);
2760 mcp->mb[6] = MSW(MSD(pmap_dma));
2761 mcp->mb[7] = LSW(MSD(pmap_dma));
2762 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2763 mcp->in_mb = MBX_1|MBX_0;
2764 mcp->buf_size = FCAL_MAP_SIZE;
2765 mcp->flags = MBX_DMA_IN;
2766 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2767 rval = qla2x00_mailbox_command(vha, mcp);
2769 if (rval == QLA_SUCCESS) {
2770 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2771 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2772 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2773 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2774 pmap, pmap[0] + 1);
2776 if (pos_map)
2777 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2779 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2781 if (rval != QLA_SUCCESS) {
2782 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2783 } else {
2784 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2785 "Done %s.\n", __func__);
2788 return rval;
2792 * qla2x00_get_link_status
2794 * Input:
2795 * ha = adapter block pointer.
2796 * loop_id = device loop ID.
2797 * ret_buf = pointer to link status return buffer.
2799 * Returns:
2800 * 0 = success.
2801 * BIT_0 = mem alloc error.
2802 * BIT_1 = mailbox error.
2805 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2806 struct link_statistics *stats, dma_addr_t stats_dma)
2808 int rval;
2809 mbx_cmd_t mc;
2810 mbx_cmd_t *mcp = &mc;
2811 uint32_t *iter = (void *)stats;
2812 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2813 struct qla_hw_data *ha = vha->hw;
2815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2816 "Entered %s.\n", __func__);
2818 mcp->mb[0] = MBC_GET_LINK_STATUS;
2819 mcp->mb[2] = MSW(LSD(stats_dma));
2820 mcp->mb[3] = LSW(LSD(stats_dma));
2821 mcp->mb[6] = MSW(MSD(stats_dma));
2822 mcp->mb[7] = LSW(MSD(stats_dma));
2823 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2824 mcp->in_mb = MBX_0;
2825 if (IS_FWI2_CAPABLE(ha)) {
2826 mcp->mb[1] = loop_id;
2827 mcp->mb[4] = 0;
2828 mcp->mb[10] = 0;
2829 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2830 mcp->in_mb |= MBX_1;
2831 } else if (HAS_EXTENDED_IDS(ha)) {
2832 mcp->mb[1] = loop_id;
2833 mcp->mb[10] = 0;
2834 mcp->out_mb |= MBX_10|MBX_1;
2835 } else {
2836 mcp->mb[1] = loop_id << 8;
2837 mcp->out_mb |= MBX_1;
2839 mcp->tov = MBX_TOV_SECONDS;
2840 mcp->flags = IOCTL_CMD;
2841 rval = qla2x00_mailbox_command(vha, mcp);
2843 if (rval == QLA_SUCCESS) {
2844 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2845 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2846 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2847 rval = QLA_FUNCTION_FAILED;
2848 } else {
2849 /* Re-endianize - firmware data is le32. */
2850 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2851 "Done %s.\n", __func__);
2852 for ( ; dwords--; iter++)
2853 le32_to_cpus(iter);
2855 } else {
2856 /* Failed. */
2857 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2860 return rval;
2864 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2865 dma_addr_t stats_dma, uint16_t options)
2867 int rval;
2868 mbx_cmd_t mc;
2869 mbx_cmd_t *mcp = &mc;
2870 uint32_t *iter, dwords;
2872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2873 "Entered %s.\n", __func__);
2875 memset(&mc, 0, sizeof(mc));
2876 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2877 mc.mb[2] = MSW(stats_dma);
2878 mc.mb[3] = LSW(stats_dma);
2879 mc.mb[6] = MSW(MSD(stats_dma));
2880 mc.mb[7] = LSW(MSD(stats_dma));
2881 mc.mb[8] = sizeof(struct link_statistics) / 4;
2882 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2883 mc.mb[10] = cpu_to_le16(options);
2885 rval = qla24xx_send_mb_cmd(vha, &mc);
2887 if (rval == QLA_SUCCESS) {
2888 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2889 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2890 "Failed mb[0]=%x.\n", mcp->mb[0]);
2891 rval = QLA_FUNCTION_FAILED;
2892 } else {
2893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2894 "Done %s.\n", __func__);
2895 /* Re-endianize - firmware data is le32. */
2896 dwords = sizeof(struct link_statistics) / 4;
2897 iter = &stats->link_fail_cnt;
2898 for ( ; dwords--; iter++)
2899 le32_to_cpus(iter);
2901 } else {
2902 /* Failed. */
2903 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2906 return rval;
2910 qla24xx_abort_command(srb_t *sp)
2912 int rval;
2913 unsigned long flags = 0;
2915 struct abort_entry_24xx *abt;
2916 dma_addr_t abt_dma;
2917 uint32_t handle;
2918 fc_port_t *fcport = sp->fcport;
2919 struct scsi_qla_host *vha = fcport->vha;
2920 struct qla_hw_data *ha = vha->hw;
2921 struct req_que *req = vha->req;
2923 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2924 "Entered %s.\n", __func__);
2926 if (vha->flags.qpairs_available && sp->qpair)
2927 req = sp->qpair->req;
2929 if (ql2xasynctmfenable)
2930 return qla24xx_async_abort_command(sp);
2932 spin_lock_irqsave(&ha->hardware_lock, flags);
2933 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
2934 if (req->outstanding_cmds[handle] == sp)
2935 break;
2937 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2938 if (handle == req->num_outstanding_cmds) {
2939 /* Command not found. */
2940 return QLA_FUNCTION_FAILED;
2943 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2944 if (abt == NULL) {
2945 ql_log(ql_log_warn, vha, 0x108d,
2946 "Failed to allocate abort IOCB.\n");
2947 return QLA_MEMORY_ALLOC_FAILED;
2949 memset(abt, 0, sizeof(struct abort_entry_24xx));
2951 abt->entry_type = ABORT_IOCB_TYPE;
2952 abt->entry_count = 1;
2953 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2954 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2955 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2956 abt->port_id[0] = fcport->d_id.b.al_pa;
2957 abt->port_id[1] = fcport->d_id.b.area;
2958 abt->port_id[2] = fcport->d_id.b.domain;
2959 abt->vp_index = fcport->vha->vp_idx;
2961 abt->req_que_no = cpu_to_le16(req->id);
2963 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2964 if (rval != QLA_SUCCESS) {
2965 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2966 "Failed to issue IOCB (%x).\n", rval);
2967 } else if (abt->entry_status != 0) {
2968 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2969 "Failed to complete IOCB -- error status (%x).\n",
2970 abt->entry_status);
2971 rval = QLA_FUNCTION_FAILED;
2972 } else if (abt->nport_handle != cpu_to_le16(0)) {
2973 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2974 "Failed to complete IOCB -- completion status (%x).\n",
2975 le16_to_cpu(abt->nport_handle));
2976 if (abt->nport_handle == CS_IOCB_ERROR)
2977 rval = QLA_FUNCTION_PARAMETER_ERROR;
2978 else
2979 rval = QLA_FUNCTION_FAILED;
2980 } else {
2981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2982 "Done %s.\n", __func__);
2985 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2987 return rval;
2990 struct tsk_mgmt_cmd {
2991 union {
2992 struct tsk_mgmt_entry tsk;
2993 struct sts_entry_24xx sts;
2994 } p;
2997 static int
2998 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2999 uint64_t l, int tag)
3001 int rval, rval2;
3002 struct tsk_mgmt_cmd *tsk;
3003 struct sts_entry_24xx *sts;
3004 dma_addr_t tsk_dma;
3005 scsi_qla_host_t *vha;
3006 struct qla_hw_data *ha;
3007 struct req_que *req;
3008 struct rsp_que *rsp;
3009 struct qla_qpair *qpair;
3011 vha = fcport->vha;
3012 ha = vha->hw;
3013 req = vha->req;
3015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3016 "Entered %s.\n", __func__);
3018 if (vha->vp_idx && vha->qpair) {
3019 /* NPIV port */
3020 qpair = vha->qpair;
3021 rsp = qpair->rsp;
3022 req = qpair->req;
3023 } else {
3024 rsp = req->rsp;
3027 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3028 if (tsk == NULL) {
3029 ql_log(ql_log_warn, vha, 0x1093,
3030 "Failed to allocate task management IOCB.\n");
3031 return QLA_MEMORY_ALLOC_FAILED;
3033 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
3035 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3036 tsk->p.tsk.entry_count = 1;
3037 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3038 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3039 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3040 tsk->p.tsk.control_flags = cpu_to_le32(type);
3041 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3042 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3043 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3044 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3045 if (type == TCF_LUN_RESET) {
3046 int_to_scsilun(l, &tsk->p.tsk.lun);
3047 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3048 sizeof(tsk->p.tsk.lun));
3051 sts = &tsk->p.sts;
3052 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3053 if (rval != QLA_SUCCESS) {
3054 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3055 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3056 } else if (sts->entry_status != 0) {
3057 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3058 "Failed to complete IOCB -- error status (%x).\n",
3059 sts->entry_status);
3060 rval = QLA_FUNCTION_FAILED;
3061 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3062 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3063 "Failed to complete IOCB -- completion status (%x).\n",
3064 le16_to_cpu(sts->comp_status));
3065 rval = QLA_FUNCTION_FAILED;
3066 } else if (le16_to_cpu(sts->scsi_status) &
3067 SS_RESPONSE_INFO_LEN_VALID) {
3068 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3070 "Ignoring inconsistent data length -- not enough "
3071 "response info (%d).\n",
3072 le32_to_cpu(sts->rsp_data_len));
3073 } else if (sts->data[3]) {
3074 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3075 "Failed to complete IOCB -- response (%x).\n",
3076 sts->data[3]);
3077 rval = QLA_FUNCTION_FAILED;
3081 /* Issue marker IOCB. */
3082 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3083 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3084 if (rval2 != QLA_SUCCESS) {
3085 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3086 "Failed to issue marker IOCB (%x).\n", rval2);
3087 } else {
3088 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3089 "Done %s.\n", __func__);
3092 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3094 return rval;
3098 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3100 struct qla_hw_data *ha = fcport->vha->hw;
3102 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3103 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3105 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3109 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3111 struct qla_hw_data *ha = fcport->vha->hw;
3113 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3114 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3116 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3120 qla2x00_system_error(scsi_qla_host_t *vha)
3122 int rval;
3123 mbx_cmd_t mc;
3124 mbx_cmd_t *mcp = &mc;
3125 struct qla_hw_data *ha = vha->hw;
3127 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3128 return QLA_FUNCTION_FAILED;
3130 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3131 "Entered %s.\n", __func__);
3133 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3134 mcp->out_mb = MBX_0;
3135 mcp->in_mb = MBX_0;
3136 mcp->tov = 5;
3137 mcp->flags = 0;
3138 rval = qla2x00_mailbox_command(vha, mcp);
3140 if (rval != QLA_SUCCESS) {
3141 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3142 } else {
3143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3144 "Done %s.\n", __func__);
3147 return rval;
3151 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3153 int rval;
3154 mbx_cmd_t mc;
3155 mbx_cmd_t *mcp = &mc;
3157 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3158 !IS_QLA27XX(vha->hw))
3159 return QLA_FUNCTION_FAILED;
3161 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3162 "Entered %s.\n", __func__);
3164 mcp->mb[0] = MBC_WRITE_SERDES;
3165 mcp->mb[1] = addr;
3166 if (IS_QLA2031(vha->hw))
3167 mcp->mb[2] = data & 0xff;
3168 else
3169 mcp->mb[2] = data;
3171 mcp->mb[3] = 0;
3172 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3173 mcp->in_mb = MBX_0;
3174 mcp->tov = MBX_TOV_SECONDS;
3175 mcp->flags = 0;
3176 rval = qla2x00_mailbox_command(vha, mcp);
3178 if (rval != QLA_SUCCESS) {
3179 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3180 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3181 } else {
3182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3183 "Done %s.\n", __func__);
3186 return rval;
3190 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3192 int rval;
3193 mbx_cmd_t mc;
3194 mbx_cmd_t *mcp = &mc;
3196 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3197 !IS_QLA27XX(vha->hw))
3198 return QLA_FUNCTION_FAILED;
3200 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3201 "Entered %s.\n", __func__);
3203 mcp->mb[0] = MBC_READ_SERDES;
3204 mcp->mb[1] = addr;
3205 mcp->mb[3] = 0;
3206 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3207 mcp->in_mb = MBX_1|MBX_0;
3208 mcp->tov = MBX_TOV_SECONDS;
3209 mcp->flags = 0;
3210 rval = qla2x00_mailbox_command(vha, mcp);
3212 if (IS_QLA2031(vha->hw))
3213 *data = mcp->mb[1] & 0xff;
3214 else
3215 *data = mcp->mb[1];
3217 if (rval != QLA_SUCCESS) {
3218 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3219 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3220 } else {
3221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3222 "Done %s.\n", __func__);
3225 return rval;
3229 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3231 int rval;
3232 mbx_cmd_t mc;
3233 mbx_cmd_t *mcp = &mc;
3235 if (!IS_QLA8044(vha->hw))
3236 return QLA_FUNCTION_FAILED;
3238 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3239 "Entered %s.\n", __func__);
3241 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3242 mcp->mb[1] = HCS_WRITE_SERDES;
3243 mcp->mb[3] = LSW(addr);
3244 mcp->mb[4] = MSW(addr);
3245 mcp->mb[5] = LSW(data);
3246 mcp->mb[6] = MSW(data);
3247 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3248 mcp->in_mb = MBX_0;
3249 mcp->tov = MBX_TOV_SECONDS;
3250 mcp->flags = 0;
3251 rval = qla2x00_mailbox_command(vha, mcp);
3253 if (rval != QLA_SUCCESS) {
3254 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3255 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3256 } else {
3257 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3258 "Done %s.\n", __func__);
3261 return rval;
3265 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3267 int rval;
3268 mbx_cmd_t mc;
3269 mbx_cmd_t *mcp = &mc;
3271 if (!IS_QLA8044(vha->hw))
3272 return QLA_FUNCTION_FAILED;
3274 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3275 "Entered %s.\n", __func__);
3277 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3278 mcp->mb[1] = HCS_READ_SERDES;
3279 mcp->mb[3] = LSW(addr);
3280 mcp->mb[4] = MSW(addr);
3281 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3282 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3283 mcp->tov = MBX_TOV_SECONDS;
3284 mcp->flags = 0;
3285 rval = qla2x00_mailbox_command(vha, mcp);
3287 *data = mcp->mb[2] << 16 | mcp->mb[1];
3289 if (rval != QLA_SUCCESS) {
3290 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3291 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3292 } else {
3293 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3294 "Done %s.\n", __func__);
3297 return rval;
3301 * qla2x00_set_serdes_params() -
3302 * @ha: HA context
3304 * Returns
3307 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3308 uint16_t sw_em_2g, uint16_t sw_em_4g)
3310 int rval;
3311 mbx_cmd_t mc;
3312 mbx_cmd_t *mcp = &mc;
3314 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3315 "Entered %s.\n", __func__);
3317 mcp->mb[0] = MBC_SERDES_PARAMS;
3318 mcp->mb[1] = BIT_0;
3319 mcp->mb[2] = sw_em_1g | BIT_15;
3320 mcp->mb[3] = sw_em_2g | BIT_15;
3321 mcp->mb[4] = sw_em_4g | BIT_15;
3322 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3323 mcp->in_mb = MBX_0;
3324 mcp->tov = MBX_TOV_SECONDS;
3325 mcp->flags = 0;
3326 rval = qla2x00_mailbox_command(vha, mcp);
3328 if (rval != QLA_SUCCESS) {
3329 /*EMPTY*/
3330 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3331 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3332 } else {
3333 /*EMPTY*/
3334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3335 "Done %s.\n", __func__);
3338 return rval;
3342 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3344 int rval;
3345 mbx_cmd_t mc;
3346 mbx_cmd_t *mcp = &mc;
3348 if (!IS_FWI2_CAPABLE(vha->hw))
3349 return QLA_FUNCTION_FAILED;
3351 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3352 "Entered %s.\n", __func__);
3354 mcp->mb[0] = MBC_STOP_FIRMWARE;
3355 mcp->mb[1] = 0;
3356 mcp->out_mb = MBX_1|MBX_0;
3357 mcp->in_mb = MBX_0;
3358 mcp->tov = 5;
3359 mcp->flags = 0;
3360 rval = qla2x00_mailbox_command(vha, mcp);
3362 if (rval != QLA_SUCCESS) {
3363 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3364 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3365 rval = QLA_INVALID_COMMAND;
3366 } else {
3367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3368 "Done %s.\n", __func__);
3371 return rval;
3375 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3376 uint16_t buffers)
3378 int rval;
3379 mbx_cmd_t mc;
3380 mbx_cmd_t *mcp = &mc;
3382 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3383 "Entered %s.\n", __func__);
3385 if (!IS_FWI2_CAPABLE(vha->hw))
3386 return QLA_FUNCTION_FAILED;
3388 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3389 return QLA_FUNCTION_FAILED;
3391 mcp->mb[0] = MBC_TRACE_CONTROL;
3392 mcp->mb[1] = TC_EFT_ENABLE;
3393 mcp->mb[2] = LSW(eft_dma);
3394 mcp->mb[3] = MSW(eft_dma);
3395 mcp->mb[4] = LSW(MSD(eft_dma));
3396 mcp->mb[5] = MSW(MSD(eft_dma));
3397 mcp->mb[6] = buffers;
3398 mcp->mb[7] = TC_AEN_DISABLE;
3399 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3400 mcp->in_mb = MBX_1|MBX_0;
3401 mcp->tov = MBX_TOV_SECONDS;
3402 mcp->flags = 0;
3403 rval = qla2x00_mailbox_command(vha, mcp);
3404 if (rval != QLA_SUCCESS) {
3405 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3406 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3407 rval, mcp->mb[0], mcp->mb[1]);
3408 } else {
3409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3410 "Done %s.\n", __func__);
3413 return rval;
3417 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3419 int rval;
3420 mbx_cmd_t mc;
3421 mbx_cmd_t *mcp = &mc;
3423 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3424 "Entered %s.\n", __func__);
3426 if (!IS_FWI2_CAPABLE(vha->hw))
3427 return QLA_FUNCTION_FAILED;
3429 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3430 return QLA_FUNCTION_FAILED;
3432 mcp->mb[0] = MBC_TRACE_CONTROL;
3433 mcp->mb[1] = TC_EFT_DISABLE;
3434 mcp->out_mb = MBX_1|MBX_0;
3435 mcp->in_mb = MBX_1|MBX_0;
3436 mcp->tov = MBX_TOV_SECONDS;
3437 mcp->flags = 0;
3438 rval = qla2x00_mailbox_command(vha, mcp);
3439 if (rval != QLA_SUCCESS) {
3440 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3441 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3442 rval, mcp->mb[0], mcp->mb[1]);
3443 } else {
3444 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3445 "Done %s.\n", __func__);
3448 return rval;
3452 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3453 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3455 int rval;
3456 mbx_cmd_t mc;
3457 mbx_cmd_t *mcp = &mc;
3459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3460 "Entered %s.\n", __func__);
3462 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3463 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3464 return QLA_FUNCTION_FAILED;
3466 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3467 return QLA_FUNCTION_FAILED;
3469 mcp->mb[0] = MBC_TRACE_CONTROL;
3470 mcp->mb[1] = TC_FCE_ENABLE;
3471 mcp->mb[2] = LSW(fce_dma);
3472 mcp->mb[3] = MSW(fce_dma);
3473 mcp->mb[4] = LSW(MSD(fce_dma));
3474 mcp->mb[5] = MSW(MSD(fce_dma));
3475 mcp->mb[6] = buffers;
3476 mcp->mb[7] = TC_AEN_DISABLE;
3477 mcp->mb[8] = 0;
3478 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3479 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3480 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3481 MBX_1|MBX_0;
3482 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3483 mcp->tov = MBX_TOV_SECONDS;
3484 mcp->flags = 0;
3485 rval = qla2x00_mailbox_command(vha, mcp);
3486 if (rval != QLA_SUCCESS) {
3487 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3488 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3489 rval, mcp->mb[0], mcp->mb[1]);
3490 } else {
3491 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3492 "Done %s.\n", __func__);
3494 if (mb)
3495 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3496 if (dwords)
3497 *dwords = buffers;
3500 return rval;
3504 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3506 int rval;
3507 mbx_cmd_t mc;
3508 mbx_cmd_t *mcp = &mc;
3510 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3511 "Entered %s.\n", __func__);
3513 if (!IS_FWI2_CAPABLE(vha->hw))
3514 return QLA_FUNCTION_FAILED;
3516 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3517 return QLA_FUNCTION_FAILED;
3519 mcp->mb[0] = MBC_TRACE_CONTROL;
3520 mcp->mb[1] = TC_FCE_DISABLE;
3521 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3522 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3523 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3524 MBX_1|MBX_0;
3525 mcp->tov = MBX_TOV_SECONDS;
3526 mcp->flags = 0;
3527 rval = qla2x00_mailbox_command(vha, mcp);
3528 if (rval != QLA_SUCCESS) {
3529 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3530 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3531 rval, mcp->mb[0], mcp->mb[1]);
3532 } else {
3533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3534 "Done %s.\n", __func__);
3536 if (wr)
3537 *wr = (uint64_t) mcp->mb[5] << 48 |
3538 (uint64_t) mcp->mb[4] << 32 |
3539 (uint64_t) mcp->mb[3] << 16 |
3540 (uint64_t) mcp->mb[2];
3541 if (rd)
3542 *rd = (uint64_t) mcp->mb[9] << 48 |
3543 (uint64_t) mcp->mb[8] << 32 |
3544 (uint64_t) mcp->mb[7] << 16 |
3545 (uint64_t) mcp->mb[6];
3548 return rval;
3552 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3553 uint16_t *port_speed, uint16_t *mb)
3555 int rval;
3556 mbx_cmd_t mc;
3557 mbx_cmd_t *mcp = &mc;
3559 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3560 "Entered %s.\n", __func__);
3562 if (!IS_IIDMA_CAPABLE(vha->hw))
3563 return QLA_FUNCTION_FAILED;
3565 mcp->mb[0] = MBC_PORT_PARAMS;
3566 mcp->mb[1] = loop_id;
3567 mcp->mb[2] = mcp->mb[3] = 0;
3568 mcp->mb[9] = vha->vp_idx;
3569 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3570 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3571 mcp->tov = MBX_TOV_SECONDS;
3572 mcp->flags = 0;
3573 rval = qla2x00_mailbox_command(vha, mcp);
3575 /* Return mailbox statuses. */
3576 if (mb != NULL) {
3577 mb[0] = mcp->mb[0];
3578 mb[1] = mcp->mb[1];
3579 mb[3] = mcp->mb[3];
3582 if (rval != QLA_SUCCESS) {
3583 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3584 } else {
3585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3586 "Done %s.\n", __func__);
3587 if (port_speed)
3588 *port_speed = mcp->mb[3];
3591 return rval;
3595 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3596 uint16_t port_speed, uint16_t *mb)
3598 int rval;
3599 mbx_cmd_t mc;
3600 mbx_cmd_t *mcp = &mc;
3602 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3603 "Entered %s.\n", __func__);
3605 if (!IS_IIDMA_CAPABLE(vha->hw))
3606 return QLA_FUNCTION_FAILED;
3608 mcp->mb[0] = MBC_PORT_PARAMS;
3609 mcp->mb[1] = loop_id;
3610 mcp->mb[2] = BIT_0;
3611 if (IS_CNA_CAPABLE(vha->hw))
3612 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3613 else
3614 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3615 mcp->mb[9] = vha->vp_idx;
3616 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3617 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3618 mcp->tov = MBX_TOV_SECONDS;
3619 mcp->flags = 0;
3620 rval = qla2x00_mailbox_command(vha, mcp);
3622 /* Return mailbox statuses. */
3623 if (mb != NULL) {
3624 mb[0] = mcp->mb[0];
3625 mb[1] = mcp->mb[1];
3626 mb[3] = mcp->mb[3];
3629 if (rval != QLA_SUCCESS) {
3630 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3631 "Failed=%x.\n", rval);
3632 } else {
3633 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3634 "Done %s.\n", __func__);
3637 return rval;
3640 void
3641 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3642 struct vp_rpt_id_entry_24xx *rptid_entry)
3644 struct qla_hw_data *ha = vha->hw;
3645 scsi_qla_host_t *vp = NULL;
3646 unsigned long flags;
3647 int found;
3648 port_id_t id;
3650 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3651 "Entered %s.\n", __func__);
3653 if (rptid_entry->entry_status != 0)
3654 return;
3656 id.b.domain = rptid_entry->port_id[2];
3657 id.b.area = rptid_entry->port_id[1];
3658 id.b.al_pa = rptid_entry->port_id[0];
3659 id.b.rsvd_1 = 0;
3661 if (rptid_entry->format == 0) {
3662 /* loop */
3663 ql_dbg(ql_dbg_async, vha, 0x10b7,
3664 "Format 0 : Number of VPs setup %d, number of "
3665 "VPs acquired %d.\n", rptid_entry->vp_setup,
3666 rptid_entry->vp_acquired);
3667 ql_dbg(ql_dbg_async, vha, 0x10b8,
3668 "Primary port id %02x%02x%02x.\n",
3669 rptid_entry->port_id[2], rptid_entry->port_id[1],
3670 rptid_entry->port_id[0]);
3672 qlt_update_host_map(vha, id);
3674 } else if (rptid_entry->format == 1) {
3675 /* fabric */
3676 ql_dbg(ql_dbg_async, vha, 0x10b9,
3677 "Format 1: VP[%d] enabled - status %d - with "
3678 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3679 rptid_entry->vp_status,
3680 rptid_entry->port_id[2], rptid_entry->port_id[1],
3681 rptid_entry->port_id[0]);
3683 /* buffer to buffer credit flag */
3684 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3686 if (rptid_entry->vp_idx == 0) {
3687 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3688 /* FA-WWN is only for physical port */
3689 if (qla_ini_mode_enabled(vha) &&
3690 ha->flags.fawwpn_enabled &&
3691 (rptid_entry->u.f1.flags &
3692 VP_FLAGS_NAME_VALID)) {
3693 memcpy(vha->port_name,
3694 rptid_entry->u.f1.port_name,
3695 WWN_SIZE);
3698 qlt_update_host_map(vha, id);
3701 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3702 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3703 } else {
3704 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3705 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3706 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3707 "Could not acquire ID for VP[%d].\n",
3708 rptid_entry->vp_idx);
3709 return;
3712 found = 0;
3713 spin_lock_irqsave(&ha->vport_slock, flags);
3714 list_for_each_entry(vp, &ha->vp_list, list) {
3715 if (rptid_entry->vp_idx == vp->vp_idx) {
3716 found = 1;
3717 break;
3720 spin_unlock_irqrestore(&ha->vport_slock, flags);
3722 if (!found)
3723 return;
3725 qlt_update_host_map(vp, id);
3728 * Cannot configure here as we are still sitting on the
3729 * response queue. Handle it in dpc context.
3731 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3732 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3733 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3735 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3736 qla2xxx_wake_dpc(vha);
3737 } else if (rptid_entry->format == 2) {
3738 ql_dbg(ql_dbg_async, vha, 0x505f,
3739 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3740 rptid_entry->port_id[2], rptid_entry->port_id[1],
3741 rptid_entry->port_id[0]);
3743 ql_dbg(ql_dbg_async, vha, 0x5075,
3744 "N2N: Remote WWPN %8phC.\n",
3745 rptid_entry->u.f2.port_name);
3747 /* N2N. direct connect */
3748 vha->d_id.b.domain = rptid_entry->port_id[2];
3749 vha->d_id.b.area = rptid_entry->port_id[1];
3750 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3752 spin_lock_irqsave(&ha->vport_slock, flags);
3753 qlt_update_vp_map(vha, SET_AL_PA);
3754 spin_unlock_irqrestore(&ha->vport_slock, flags);
3759 * qla24xx_modify_vp_config
3760 * Change VP configuration for vha
3762 * Input:
3763 * vha = adapter block pointer.
3765 * Returns:
3766 * qla2xxx local function return status code.
3768 * Context:
3769 * Kernel context.
3772 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3774 int rval;
3775 struct vp_config_entry_24xx *vpmod;
3776 dma_addr_t vpmod_dma;
3777 struct qla_hw_data *ha = vha->hw;
3778 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3780 /* This can be called by the parent */
3782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3783 "Entered %s.\n", __func__);
3785 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3786 if (!vpmod) {
3787 ql_log(ql_log_warn, vha, 0x10bc,
3788 "Failed to allocate modify VP IOCB.\n");
3789 return QLA_MEMORY_ALLOC_FAILED;
3792 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
3793 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3794 vpmod->entry_count = 1;
3795 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3796 vpmod->vp_count = 1;
3797 vpmod->vp_index1 = vha->vp_idx;
3798 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3800 qlt_modify_vp_config(vha, vpmod);
3802 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3803 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3804 vpmod->entry_count = 1;
3806 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3807 if (rval != QLA_SUCCESS) {
3808 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3809 "Failed to issue VP config IOCB (%x).\n", rval);
3810 } else if (vpmod->comp_status != 0) {
3811 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3812 "Failed to complete IOCB -- error status (%x).\n",
3813 vpmod->comp_status);
3814 rval = QLA_FUNCTION_FAILED;
3815 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3816 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3817 "Failed to complete IOCB -- completion status (%x).\n",
3818 le16_to_cpu(vpmod->comp_status));
3819 rval = QLA_FUNCTION_FAILED;
3820 } else {
3821 /* EMPTY */
3822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3823 "Done %s.\n", __func__);
3824 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3826 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3828 return rval;
3832 * qla24xx_control_vp
3833 * Enable a virtual port for given host
3835 * Input:
3836 * ha = adapter block pointer.
3837 * vhba = virtual adapter (unused)
3838 * index = index number for enabled VP
3840 * Returns:
3841 * qla2xxx local function return status code.
3843 * Context:
3844 * Kernel context.
3847 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3849 int rval;
3850 int map, pos;
3851 struct vp_ctrl_entry_24xx *vce;
3852 dma_addr_t vce_dma;
3853 struct qla_hw_data *ha = vha->hw;
3854 int vp_index = vha->vp_idx;
3855 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3857 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3858 "Entered %s enabling index %d.\n", __func__, vp_index);
3860 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3861 return QLA_PARAMETER_ERROR;
3863 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3864 if (!vce) {
3865 ql_log(ql_log_warn, vha, 0x10c2,
3866 "Failed to allocate VP control IOCB.\n");
3867 return QLA_MEMORY_ALLOC_FAILED;
3869 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3871 vce->entry_type = VP_CTRL_IOCB_TYPE;
3872 vce->entry_count = 1;
3873 vce->command = cpu_to_le16(cmd);
3874 vce->vp_count = cpu_to_le16(1);
3876 /* index map in firmware starts with 1; decrement index
3877 * this is ok as we never use index 0
3879 map = (vp_index - 1) / 8;
3880 pos = (vp_index - 1) & 7;
3881 mutex_lock(&ha->vport_lock);
3882 vce->vp_idx_map[map] |= 1 << pos;
3883 mutex_unlock(&ha->vport_lock);
3885 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3886 if (rval != QLA_SUCCESS) {
3887 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3888 "Failed to issue VP control IOCB (%x).\n", rval);
3889 } else if (vce->entry_status != 0) {
3890 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3891 "Failed to complete IOCB -- error status (%x).\n",
3892 vce->entry_status);
3893 rval = QLA_FUNCTION_FAILED;
3894 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
3895 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3896 "Failed to complete IOCB -- completion status (%x).\n",
3897 le16_to_cpu(vce->comp_status));
3898 rval = QLA_FUNCTION_FAILED;
3899 } else {
3900 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3901 "Done %s.\n", __func__);
3904 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3906 return rval;
3910 * qla2x00_send_change_request
3911 * Receive or disable RSCN request from fabric controller
3913 * Input:
3914 * ha = adapter block pointer
3915 * format = registration format:
3916 * 0 - Reserved
3917 * 1 - Fabric detected registration
3918 * 2 - N_port detected registration
3919 * 3 - Full registration
3920 * FF - clear registration
3921 * vp_idx = Virtual port index
3923 * Returns:
3924 * qla2x00 local function return status code.
3926 * Context:
3927 * Kernel Context
3931 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3932 uint16_t vp_idx)
3934 int rval;
3935 mbx_cmd_t mc;
3936 mbx_cmd_t *mcp = &mc;
3938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3939 "Entered %s.\n", __func__);
3941 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3942 mcp->mb[1] = format;
3943 mcp->mb[9] = vp_idx;
3944 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3945 mcp->in_mb = MBX_0|MBX_1;
3946 mcp->tov = MBX_TOV_SECONDS;
3947 mcp->flags = 0;
3948 rval = qla2x00_mailbox_command(vha, mcp);
3950 if (rval == QLA_SUCCESS) {
3951 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3952 rval = BIT_1;
3954 } else
3955 rval = BIT_1;
3957 return rval;
3961 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3962 uint32_t size)
3964 int rval;
3965 mbx_cmd_t mc;
3966 mbx_cmd_t *mcp = &mc;
3968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3969 "Entered %s.\n", __func__);
3971 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3972 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3973 mcp->mb[8] = MSW(addr);
3974 mcp->out_mb = MBX_8|MBX_0;
3975 } else {
3976 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3977 mcp->out_mb = MBX_0;
3979 mcp->mb[1] = LSW(addr);
3980 mcp->mb[2] = MSW(req_dma);
3981 mcp->mb[3] = LSW(req_dma);
3982 mcp->mb[6] = MSW(MSD(req_dma));
3983 mcp->mb[7] = LSW(MSD(req_dma));
3984 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3985 if (IS_FWI2_CAPABLE(vha->hw)) {
3986 mcp->mb[4] = MSW(size);
3987 mcp->mb[5] = LSW(size);
3988 mcp->out_mb |= MBX_5|MBX_4;
3989 } else {
3990 mcp->mb[4] = LSW(size);
3991 mcp->out_mb |= MBX_4;
3994 mcp->in_mb = MBX_0;
3995 mcp->tov = MBX_TOV_SECONDS;
3996 mcp->flags = 0;
3997 rval = qla2x00_mailbox_command(vha, mcp);
3999 if (rval != QLA_SUCCESS) {
4000 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4001 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4002 } else {
4003 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4004 "Done %s.\n", __func__);
4007 return rval;
4009 /* 84XX Support **************************************************************/
4011 struct cs84xx_mgmt_cmd {
4012 union {
4013 struct verify_chip_entry_84xx req;
4014 struct verify_chip_rsp_84xx rsp;
4015 } p;
4019 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4021 int rval, retry;
4022 struct cs84xx_mgmt_cmd *mn;
4023 dma_addr_t mn_dma;
4024 uint16_t options;
4025 unsigned long flags;
4026 struct qla_hw_data *ha = vha->hw;
4028 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4029 "Entered %s.\n", __func__);
4031 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4032 if (mn == NULL) {
4033 return QLA_MEMORY_ALLOC_FAILED;
4036 /* Force Update? */
4037 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4038 /* Diagnostic firmware? */
4039 /* options |= MENLO_DIAG_FW; */
4040 /* We update the firmware with only one data sequence. */
4041 options |= VCO_END_OF_DATA;
4043 do {
4044 retry = 0;
4045 memset(mn, 0, sizeof(*mn));
4046 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4047 mn->p.req.entry_count = 1;
4048 mn->p.req.options = cpu_to_le16(options);
4050 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4051 "Dump of Verify Request.\n");
4052 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4053 (uint8_t *)mn, sizeof(*mn));
4055 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4056 if (rval != QLA_SUCCESS) {
4057 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4058 "Failed to issue verify IOCB (%x).\n", rval);
4059 goto verify_done;
4062 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4063 "Dump of Verify Response.\n");
4064 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4065 (uint8_t *)mn, sizeof(*mn));
4067 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4068 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4069 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4071 "cs=%x fc=%x.\n", status[0], status[1]);
4073 if (status[0] != CS_COMPLETE) {
4074 rval = QLA_FUNCTION_FAILED;
4075 if (!(options & VCO_DONT_UPDATE_FW)) {
4076 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4077 "Firmware update failed. Retrying "
4078 "without update firmware.\n");
4079 options |= VCO_DONT_UPDATE_FW;
4080 options &= ~VCO_FORCE_UPDATE;
4081 retry = 1;
4083 } else {
4084 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4085 "Firmware updated to %x.\n",
4086 le32_to_cpu(mn->p.rsp.fw_ver));
4088 /* NOTE: we only update OP firmware. */
4089 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4090 ha->cs84xx->op_fw_version =
4091 le32_to_cpu(mn->p.rsp.fw_ver);
4092 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4093 flags);
4095 } while (retry);
4097 verify_done:
4098 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4100 if (rval != QLA_SUCCESS) {
4101 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4102 "Failed=%x.\n", rval);
4103 } else {
4104 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4105 "Done %s.\n", __func__);
4108 return rval;
4112 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4114 int rval;
4115 unsigned long flags;
4116 mbx_cmd_t mc;
4117 mbx_cmd_t *mcp = &mc;
4118 struct qla_hw_data *ha = vha->hw;
4120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4121 "Entered %s.\n", __func__);
4123 if (IS_SHADOW_REG_CAPABLE(ha))
4124 req->options |= BIT_13;
4126 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4127 mcp->mb[1] = req->options;
4128 mcp->mb[2] = MSW(LSD(req->dma));
4129 mcp->mb[3] = LSW(LSD(req->dma));
4130 mcp->mb[6] = MSW(MSD(req->dma));
4131 mcp->mb[7] = LSW(MSD(req->dma));
4132 mcp->mb[5] = req->length;
4133 if (req->rsp)
4134 mcp->mb[10] = req->rsp->id;
4135 mcp->mb[12] = req->qos;
4136 mcp->mb[11] = req->vp_idx;
4137 mcp->mb[13] = req->rid;
4138 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4139 mcp->mb[15] = 0;
4141 mcp->mb[4] = req->id;
4142 /* que in ptr index */
4143 mcp->mb[8] = 0;
4144 /* que out ptr index */
4145 mcp->mb[9] = *req->out_ptr = 0;
4146 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4147 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4148 mcp->in_mb = MBX_0;
4149 mcp->flags = MBX_DMA_OUT;
4150 mcp->tov = MBX_TOV_SECONDS * 2;
4152 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4153 mcp->in_mb |= MBX_1;
4154 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4155 mcp->out_mb |= MBX_15;
4156 /* debug q create issue in SR-IOV */
4157 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4160 spin_lock_irqsave(&ha->hardware_lock, flags);
4161 if (!(req->options & BIT_0)) {
4162 WRT_REG_DWORD(req->req_q_in, 0);
4163 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4164 WRT_REG_DWORD(req->req_q_out, 0);
4166 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4168 rval = qla2x00_mailbox_command(vha, mcp);
4169 if (rval != QLA_SUCCESS) {
4170 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4171 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4172 } else {
4173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4174 "Done %s.\n", __func__);
4177 return rval;
4181 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4183 int rval;
4184 unsigned long flags;
4185 mbx_cmd_t mc;
4186 mbx_cmd_t *mcp = &mc;
4187 struct qla_hw_data *ha = vha->hw;
4189 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4190 "Entered %s.\n", __func__);
4192 if (IS_SHADOW_REG_CAPABLE(ha))
4193 rsp->options |= BIT_13;
4195 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4196 mcp->mb[1] = rsp->options;
4197 mcp->mb[2] = MSW(LSD(rsp->dma));
4198 mcp->mb[3] = LSW(LSD(rsp->dma));
4199 mcp->mb[6] = MSW(MSD(rsp->dma));
4200 mcp->mb[7] = LSW(MSD(rsp->dma));
4201 mcp->mb[5] = rsp->length;
4202 mcp->mb[14] = rsp->msix->entry;
4203 mcp->mb[13] = rsp->rid;
4204 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4205 mcp->mb[15] = 0;
4207 mcp->mb[4] = rsp->id;
4208 /* que in ptr index */
4209 mcp->mb[8] = *rsp->in_ptr = 0;
4210 /* que out ptr index */
4211 mcp->mb[9] = 0;
4212 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4213 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4214 mcp->in_mb = MBX_0;
4215 mcp->flags = MBX_DMA_OUT;
4216 mcp->tov = MBX_TOV_SECONDS * 2;
4218 if (IS_QLA81XX(ha)) {
4219 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4220 mcp->in_mb |= MBX_1;
4221 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4222 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4223 mcp->in_mb |= MBX_1;
4224 /* debug q create issue in SR-IOV */
4225 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4228 spin_lock_irqsave(&ha->hardware_lock, flags);
4229 if (!(rsp->options & BIT_0)) {
4230 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4231 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4232 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4235 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4237 rval = qla2x00_mailbox_command(vha, mcp);
4238 if (rval != QLA_SUCCESS) {
4239 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4240 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4241 } else {
4242 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4243 "Done %s.\n", __func__);
4246 return rval;
4250 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4252 int rval;
4253 mbx_cmd_t mc;
4254 mbx_cmd_t *mcp = &mc;
4256 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4257 "Entered %s.\n", __func__);
4259 mcp->mb[0] = MBC_IDC_ACK;
4260 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4261 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4262 mcp->in_mb = MBX_0;
4263 mcp->tov = MBX_TOV_SECONDS;
4264 mcp->flags = 0;
4265 rval = qla2x00_mailbox_command(vha, mcp);
4267 if (rval != QLA_SUCCESS) {
4268 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4269 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4270 } else {
4271 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4272 "Done %s.\n", __func__);
4275 return rval;
4279 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4281 int rval;
4282 mbx_cmd_t mc;
4283 mbx_cmd_t *mcp = &mc;
4285 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4286 "Entered %s.\n", __func__);
4288 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4289 !IS_QLA27XX(vha->hw))
4290 return QLA_FUNCTION_FAILED;
4292 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4293 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4294 mcp->out_mb = MBX_1|MBX_0;
4295 mcp->in_mb = MBX_1|MBX_0;
4296 mcp->tov = MBX_TOV_SECONDS;
4297 mcp->flags = 0;
4298 rval = qla2x00_mailbox_command(vha, mcp);
4300 if (rval != QLA_SUCCESS) {
4301 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4302 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4303 rval, mcp->mb[0], mcp->mb[1]);
4304 } else {
4305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4306 "Done %s.\n", __func__);
4307 *sector_size = mcp->mb[1];
4310 return rval;
4314 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4316 int rval;
4317 mbx_cmd_t mc;
4318 mbx_cmd_t *mcp = &mc;
4320 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4321 !IS_QLA27XX(vha->hw))
4322 return QLA_FUNCTION_FAILED;
4324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4325 "Entered %s.\n", __func__);
4327 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4328 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4329 FAC_OPT_CMD_WRITE_PROTECT;
4330 mcp->out_mb = MBX_1|MBX_0;
4331 mcp->in_mb = MBX_1|MBX_0;
4332 mcp->tov = MBX_TOV_SECONDS;
4333 mcp->flags = 0;
4334 rval = qla2x00_mailbox_command(vha, mcp);
4336 if (rval != QLA_SUCCESS) {
4337 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4338 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4339 rval, mcp->mb[0], mcp->mb[1]);
4340 } else {
4341 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4342 "Done %s.\n", __func__);
4345 return rval;
4349 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4351 int rval;
4352 mbx_cmd_t mc;
4353 mbx_cmd_t *mcp = &mc;
4355 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4356 !IS_QLA27XX(vha->hw))
4357 return QLA_FUNCTION_FAILED;
4359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4360 "Entered %s.\n", __func__);
4362 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4363 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4364 mcp->mb[2] = LSW(start);
4365 mcp->mb[3] = MSW(start);
4366 mcp->mb[4] = LSW(finish);
4367 mcp->mb[5] = MSW(finish);
4368 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4369 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4370 mcp->tov = MBX_TOV_SECONDS;
4371 mcp->flags = 0;
4372 rval = qla2x00_mailbox_command(vha, mcp);
4374 if (rval != QLA_SUCCESS) {
4375 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4376 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4377 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4378 } else {
4379 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4380 "Done %s.\n", __func__);
4383 return rval;
4387 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4389 int rval = 0;
4390 mbx_cmd_t mc;
4391 mbx_cmd_t *mcp = &mc;
4393 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4394 "Entered %s.\n", __func__);
4396 mcp->mb[0] = MBC_RESTART_MPI_FW;
4397 mcp->out_mb = MBX_0;
4398 mcp->in_mb = MBX_0|MBX_1;
4399 mcp->tov = MBX_TOV_SECONDS;
4400 mcp->flags = 0;
4401 rval = qla2x00_mailbox_command(vha, mcp);
4403 if (rval != QLA_SUCCESS) {
4404 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4405 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4406 rval, mcp->mb[0], mcp->mb[1]);
4407 } else {
4408 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4409 "Done %s.\n", __func__);
4412 return rval;
4416 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4418 int rval;
4419 mbx_cmd_t mc;
4420 mbx_cmd_t *mcp = &mc;
4421 int i;
4422 int len;
4423 uint16_t *str;
4424 struct qla_hw_data *ha = vha->hw;
4426 if (!IS_P3P_TYPE(ha))
4427 return QLA_FUNCTION_FAILED;
4429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4430 "Entered %s.\n", __func__);
4432 str = (void *)version;
4433 len = strlen(version);
4435 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4436 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4437 mcp->out_mb = MBX_1|MBX_0;
4438 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4439 mcp->mb[i] = cpu_to_le16p(str);
4440 mcp->out_mb |= 1<<i;
4442 for (; i < 16; i++) {
4443 mcp->mb[i] = 0;
4444 mcp->out_mb |= 1<<i;
4446 mcp->in_mb = MBX_1|MBX_0;
4447 mcp->tov = MBX_TOV_SECONDS;
4448 mcp->flags = 0;
4449 rval = qla2x00_mailbox_command(vha, mcp);
4451 if (rval != QLA_SUCCESS) {
4452 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4453 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4454 } else {
4455 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4456 "Done %s.\n", __func__);
4459 return rval;
4463 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4465 int rval;
4466 mbx_cmd_t mc;
4467 mbx_cmd_t *mcp = &mc;
4468 int len;
4469 uint16_t dwlen;
4470 uint8_t *str;
4471 dma_addr_t str_dma;
4472 struct qla_hw_data *ha = vha->hw;
4474 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4475 IS_P3P_TYPE(ha))
4476 return QLA_FUNCTION_FAILED;
4478 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4479 "Entered %s.\n", __func__);
4481 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4482 if (!str) {
4483 ql_log(ql_log_warn, vha, 0x117f,
4484 "Failed to allocate driver version param.\n");
4485 return QLA_MEMORY_ALLOC_FAILED;
4488 memcpy(str, "\x7\x3\x11\x0", 4);
4489 dwlen = str[0];
4490 len = dwlen * 4 - 4;
4491 memset(str + 4, 0, len);
4492 if (len > strlen(version))
4493 len = strlen(version);
4494 memcpy(str + 4, version, len);
4496 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4497 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4498 mcp->mb[2] = MSW(LSD(str_dma));
4499 mcp->mb[3] = LSW(LSD(str_dma));
4500 mcp->mb[6] = MSW(MSD(str_dma));
4501 mcp->mb[7] = LSW(MSD(str_dma));
4502 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4503 mcp->in_mb = MBX_1|MBX_0;
4504 mcp->tov = MBX_TOV_SECONDS;
4505 mcp->flags = 0;
4506 rval = qla2x00_mailbox_command(vha, mcp);
4508 if (rval != QLA_SUCCESS) {
4509 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4510 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4511 } else {
4512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4513 "Done %s.\n", __func__);
4516 dma_pool_free(ha->s_dma_pool, str, str_dma);
4518 return rval;
4521 static int
4522 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4524 int rval;
4525 mbx_cmd_t mc;
4526 mbx_cmd_t *mcp = &mc;
4528 if (!IS_FWI2_CAPABLE(vha->hw))
4529 return QLA_FUNCTION_FAILED;
4531 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4532 "Entered %s.\n", __func__);
4534 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4535 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4536 mcp->out_mb = MBX_1|MBX_0;
4537 mcp->in_mb = MBX_1|MBX_0;
4538 mcp->tov = MBX_TOV_SECONDS;
4539 mcp->flags = 0;
4540 rval = qla2x00_mailbox_command(vha, mcp);
4541 *temp = mcp->mb[1];
4543 if (rval != QLA_SUCCESS) {
4544 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4545 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4546 } else {
4547 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4548 "Done %s.\n", __func__);
4551 return rval;
4555 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4556 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4558 int rval;
4559 mbx_cmd_t mc;
4560 mbx_cmd_t *mcp = &mc;
4561 struct qla_hw_data *ha = vha->hw;
4563 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4564 "Entered %s.\n", __func__);
4566 if (!IS_FWI2_CAPABLE(ha))
4567 return QLA_FUNCTION_FAILED;
4569 if (len == 1)
4570 opt |= BIT_0;
4572 mcp->mb[0] = MBC_READ_SFP;
4573 mcp->mb[1] = dev;
4574 mcp->mb[2] = MSW(sfp_dma);
4575 mcp->mb[3] = LSW(sfp_dma);
4576 mcp->mb[6] = MSW(MSD(sfp_dma));
4577 mcp->mb[7] = LSW(MSD(sfp_dma));
4578 mcp->mb[8] = len;
4579 mcp->mb[9] = off;
4580 mcp->mb[10] = opt;
4581 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4582 mcp->in_mb = MBX_1|MBX_0;
4583 mcp->tov = MBX_TOV_SECONDS;
4584 mcp->flags = 0;
4585 rval = qla2x00_mailbox_command(vha, mcp);
4587 if (opt & BIT_0)
4588 *sfp = mcp->mb[1];
4590 if (rval != QLA_SUCCESS) {
4591 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4592 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4593 } else {
4594 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4595 "Done %s.\n", __func__);
4598 return rval;
4602 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4603 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4605 int rval;
4606 mbx_cmd_t mc;
4607 mbx_cmd_t *mcp = &mc;
4608 struct qla_hw_data *ha = vha->hw;
4610 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4611 "Entered %s.\n", __func__);
4613 if (!IS_FWI2_CAPABLE(ha))
4614 return QLA_FUNCTION_FAILED;
4616 if (len == 1)
4617 opt |= BIT_0;
4619 if (opt & BIT_0)
4620 len = *sfp;
4622 mcp->mb[0] = MBC_WRITE_SFP;
4623 mcp->mb[1] = dev;
4624 mcp->mb[2] = MSW(sfp_dma);
4625 mcp->mb[3] = LSW(sfp_dma);
4626 mcp->mb[6] = MSW(MSD(sfp_dma));
4627 mcp->mb[7] = LSW(MSD(sfp_dma));
4628 mcp->mb[8] = len;
4629 mcp->mb[9] = off;
4630 mcp->mb[10] = opt;
4631 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4632 mcp->in_mb = MBX_1|MBX_0;
4633 mcp->tov = MBX_TOV_SECONDS;
4634 mcp->flags = 0;
4635 rval = qla2x00_mailbox_command(vha, mcp);
4637 if (rval != QLA_SUCCESS) {
4638 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4639 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4640 } else {
4641 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4642 "Done %s.\n", __func__);
4645 return rval;
4649 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4650 uint16_t size_in_bytes, uint16_t *actual_size)
4652 int rval;
4653 mbx_cmd_t mc;
4654 mbx_cmd_t *mcp = &mc;
4656 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4657 "Entered %s.\n", __func__);
4659 if (!IS_CNA_CAPABLE(vha->hw))
4660 return QLA_FUNCTION_FAILED;
4662 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4663 mcp->mb[2] = MSW(stats_dma);
4664 mcp->mb[3] = LSW(stats_dma);
4665 mcp->mb[6] = MSW(MSD(stats_dma));
4666 mcp->mb[7] = LSW(MSD(stats_dma));
4667 mcp->mb[8] = size_in_bytes >> 2;
4668 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4669 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4670 mcp->tov = MBX_TOV_SECONDS;
4671 mcp->flags = 0;
4672 rval = qla2x00_mailbox_command(vha, mcp);
4674 if (rval != QLA_SUCCESS) {
4675 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4676 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4677 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4678 } else {
4679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4680 "Done %s.\n", __func__);
4683 *actual_size = mcp->mb[2] << 2;
4686 return rval;
4690 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4691 uint16_t size)
4693 int rval;
4694 mbx_cmd_t mc;
4695 mbx_cmd_t *mcp = &mc;
4697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4698 "Entered %s.\n", __func__);
4700 if (!IS_CNA_CAPABLE(vha->hw))
4701 return QLA_FUNCTION_FAILED;
4703 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4704 mcp->mb[1] = 0;
4705 mcp->mb[2] = MSW(tlv_dma);
4706 mcp->mb[3] = LSW(tlv_dma);
4707 mcp->mb[6] = MSW(MSD(tlv_dma));
4708 mcp->mb[7] = LSW(MSD(tlv_dma));
4709 mcp->mb[8] = size;
4710 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4711 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4712 mcp->tov = MBX_TOV_SECONDS;
4713 mcp->flags = 0;
4714 rval = qla2x00_mailbox_command(vha, mcp);
4716 if (rval != QLA_SUCCESS) {
4717 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4718 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4719 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4720 } else {
4721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4722 "Done %s.\n", __func__);
4725 return rval;
4729 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4731 int rval;
4732 mbx_cmd_t mc;
4733 mbx_cmd_t *mcp = &mc;
4735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4736 "Entered %s.\n", __func__);
4738 if (!IS_FWI2_CAPABLE(vha->hw))
4739 return QLA_FUNCTION_FAILED;
4741 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4742 mcp->mb[1] = LSW(risc_addr);
4743 mcp->mb[8] = MSW(risc_addr);
4744 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4745 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4746 mcp->tov = 30;
4747 mcp->flags = 0;
4748 rval = qla2x00_mailbox_command(vha, mcp);
4749 if (rval != QLA_SUCCESS) {
4750 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4751 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4752 } else {
4753 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4754 "Done %s.\n", __func__);
4755 *data = mcp->mb[3] << 16 | mcp->mb[2];
4758 return rval;
4762 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4763 uint16_t *mresp)
4765 int rval;
4766 mbx_cmd_t mc;
4767 mbx_cmd_t *mcp = &mc;
4769 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4770 "Entered %s.\n", __func__);
4772 memset(mcp->mb, 0 , sizeof(mcp->mb));
4773 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4774 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4776 /* transfer count */
4777 mcp->mb[10] = LSW(mreq->transfer_size);
4778 mcp->mb[11] = MSW(mreq->transfer_size);
4780 /* send data address */
4781 mcp->mb[14] = LSW(mreq->send_dma);
4782 mcp->mb[15] = MSW(mreq->send_dma);
4783 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4784 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4786 /* receive data address */
4787 mcp->mb[16] = LSW(mreq->rcv_dma);
4788 mcp->mb[17] = MSW(mreq->rcv_dma);
4789 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4790 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4792 /* Iteration count */
4793 mcp->mb[18] = LSW(mreq->iteration_count);
4794 mcp->mb[19] = MSW(mreq->iteration_count);
4796 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4797 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4798 if (IS_CNA_CAPABLE(vha->hw))
4799 mcp->out_mb |= MBX_2;
4800 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4802 mcp->buf_size = mreq->transfer_size;
4803 mcp->tov = MBX_TOV_SECONDS;
4804 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4806 rval = qla2x00_mailbox_command(vha, mcp);
4808 if (rval != QLA_SUCCESS) {
4809 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4810 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4811 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4812 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4813 } else {
4814 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4815 "Done %s.\n", __func__);
4818 /* Copy mailbox information */
4819 memcpy( mresp, mcp->mb, 64);
4820 return rval;
4824 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4825 uint16_t *mresp)
4827 int rval;
4828 mbx_cmd_t mc;
4829 mbx_cmd_t *mcp = &mc;
4830 struct qla_hw_data *ha = vha->hw;
4832 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4833 "Entered %s.\n", __func__);
4835 memset(mcp->mb, 0 , sizeof(mcp->mb));
4836 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4837 /* BIT_6 specifies 64bit address */
4838 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4839 if (IS_CNA_CAPABLE(ha)) {
4840 mcp->mb[2] = vha->fcoe_fcf_idx;
4842 mcp->mb[16] = LSW(mreq->rcv_dma);
4843 mcp->mb[17] = MSW(mreq->rcv_dma);
4844 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4845 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4847 mcp->mb[10] = LSW(mreq->transfer_size);
4849 mcp->mb[14] = LSW(mreq->send_dma);
4850 mcp->mb[15] = MSW(mreq->send_dma);
4851 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4852 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4854 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4855 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4856 if (IS_CNA_CAPABLE(ha))
4857 mcp->out_mb |= MBX_2;
4859 mcp->in_mb = MBX_0;
4860 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
4861 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4862 mcp->in_mb |= MBX_1;
4863 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4864 mcp->in_mb |= MBX_3;
4866 mcp->tov = MBX_TOV_SECONDS;
4867 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4868 mcp->buf_size = mreq->transfer_size;
4870 rval = qla2x00_mailbox_command(vha, mcp);
4872 if (rval != QLA_SUCCESS) {
4873 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
4874 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4875 rval, mcp->mb[0], mcp->mb[1]);
4876 } else {
4877 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4878 "Done %s.\n", __func__);
4881 /* Copy mailbox information */
4882 memcpy(mresp, mcp->mb, 64);
4883 return rval;
4887 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
4889 int rval;
4890 mbx_cmd_t mc;
4891 mbx_cmd_t *mcp = &mc;
4893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
4894 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
4896 mcp->mb[0] = MBC_ISP84XX_RESET;
4897 mcp->mb[1] = enable_diagnostic;
4898 mcp->out_mb = MBX_1|MBX_0;
4899 mcp->in_mb = MBX_1|MBX_0;
4900 mcp->tov = MBX_TOV_SECONDS;
4901 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4902 rval = qla2x00_mailbox_command(vha, mcp);
4904 if (rval != QLA_SUCCESS)
4905 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
4906 else
4907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4908 "Done %s.\n", __func__);
4910 return rval;
4914 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
4916 int rval;
4917 mbx_cmd_t mc;
4918 mbx_cmd_t *mcp = &mc;
4920 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4921 "Entered %s.\n", __func__);
4923 if (!IS_FWI2_CAPABLE(vha->hw))
4924 return QLA_FUNCTION_FAILED;
4926 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
4927 mcp->mb[1] = LSW(risc_addr);
4928 mcp->mb[2] = LSW(data);
4929 mcp->mb[3] = MSW(data);
4930 mcp->mb[8] = MSW(risc_addr);
4931 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
4932 mcp->in_mb = MBX_0;
4933 mcp->tov = 30;
4934 mcp->flags = 0;
4935 rval = qla2x00_mailbox_command(vha, mcp);
4936 if (rval != QLA_SUCCESS) {
4937 ql_dbg(ql_dbg_mbx, vha, 0x1101,
4938 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4939 } else {
4940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4941 "Done %s.\n", __func__);
4944 return rval;
4948 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4950 int rval;
4951 uint32_t stat, timer;
4952 uint16_t mb0 = 0;
4953 struct qla_hw_data *ha = vha->hw;
4954 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4956 rval = QLA_SUCCESS;
4958 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4959 "Entered %s.\n", __func__);
4961 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4963 /* Write the MBC data to the registers */
4964 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
4965 WRT_REG_WORD(&reg->mailbox1, mb[0]);
4966 WRT_REG_WORD(&reg->mailbox2, mb[1]);
4967 WRT_REG_WORD(&reg->mailbox3, mb[2]);
4968 WRT_REG_WORD(&reg->mailbox4, mb[3]);
4970 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
4972 /* Poll for MBC interrupt */
4973 for (timer = 6000000; timer; timer--) {
4974 /* Check for pending interrupts. */
4975 stat = RD_REG_DWORD(&reg->host_status);
4976 if (stat & HSRX_RISC_INT) {
4977 stat &= 0xff;
4979 if (stat == 0x1 || stat == 0x2 ||
4980 stat == 0x10 || stat == 0x11) {
4981 set_bit(MBX_INTERRUPT,
4982 &ha->mbx_cmd_flags);
4983 mb0 = RD_REG_WORD(&reg->mailbox0);
4984 WRT_REG_DWORD(&reg->hccr,
4985 HCCRX_CLR_RISC_INT);
4986 RD_REG_DWORD(&reg->hccr);
4987 break;
4990 udelay(5);
4993 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
4994 rval = mb0 & MBS_MASK;
4995 else
4996 rval = QLA_FUNCTION_FAILED;
4998 if (rval != QLA_SUCCESS) {
4999 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5000 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5001 } else {
5002 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5003 "Done %s.\n", __func__);
5006 return rval;
5010 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5012 int rval;
5013 mbx_cmd_t mc;
5014 mbx_cmd_t *mcp = &mc;
5015 struct qla_hw_data *ha = vha->hw;
5017 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5018 "Entered %s.\n", __func__);
5020 if (!IS_FWI2_CAPABLE(ha))
5021 return QLA_FUNCTION_FAILED;
5023 mcp->mb[0] = MBC_DATA_RATE;
5024 mcp->mb[1] = 0;
5025 mcp->out_mb = MBX_1|MBX_0;
5026 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5027 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5028 mcp->in_mb |= MBX_3;
5029 mcp->tov = MBX_TOV_SECONDS;
5030 mcp->flags = 0;
5031 rval = qla2x00_mailbox_command(vha, mcp);
5032 if (rval != QLA_SUCCESS) {
5033 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5034 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5035 } else {
5036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5037 "Done %s.\n", __func__);
5038 if (mcp->mb[1] != 0x7)
5039 ha->link_data_rate = mcp->mb[1];
5042 return rval;
5046 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5048 int rval;
5049 mbx_cmd_t mc;
5050 mbx_cmd_t *mcp = &mc;
5051 struct qla_hw_data *ha = vha->hw;
5053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5054 "Entered %s.\n", __func__);
5056 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5057 !IS_QLA27XX(ha))
5058 return QLA_FUNCTION_FAILED;
5059 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5060 mcp->out_mb = MBX_0;
5061 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5062 mcp->tov = MBX_TOV_SECONDS;
5063 mcp->flags = 0;
5065 rval = qla2x00_mailbox_command(vha, mcp);
5067 if (rval != QLA_SUCCESS) {
5068 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5069 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5070 } else {
5071 /* Copy all bits to preserve original value */
5072 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5074 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5075 "Done %s.\n", __func__);
5077 return rval;
5081 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5083 int rval;
5084 mbx_cmd_t mc;
5085 mbx_cmd_t *mcp = &mc;
5087 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5088 "Entered %s.\n", __func__);
5090 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5091 /* Copy all bits to preserve original setting */
5092 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5093 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5094 mcp->in_mb = MBX_0;
5095 mcp->tov = MBX_TOV_SECONDS;
5096 mcp->flags = 0;
5097 rval = qla2x00_mailbox_command(vha, mcp);
5099 if (rval != QLA_SUCCESS) {
5100 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5101 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5102 } else
5103 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5104 "Done %s.\n", __func__);
5106 return rval;
5111 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5112 uint16_t *mb)
5114 int rval;
5115 mbx_cmd_t mc;
5116 mbx_cmd_t *mcp = &mc;
5117 struct qla_hw_data *ha = vha->hw;
5119 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5120 "Entered %s.\n", __func__);
5122 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5123 return QLA_FUNCTION_FAILED;
5125 mcp->mb[0] = MBC_PORT_PARAMS;
5126 mcp->mb[1] = loop_id;
5127 if (ha->flags.fcp_prio_enabled)
5128 mcp->mb[2] = BIT_1;
5129 else
5130 mcp->mb[2] = BIT_2;
5131 mcp->mb[4] = priority & 0xf;
5132 mcp->mb[9] = vha->vp_idx;
5133 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5134 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5135 mcp->tov = 30;
5136 mcp->flags = 0;
5137 rval = qla2x00_mailbox_command(vha, mcp);
5138 if (mb != NULL) {
5139 mb[0] = mcp->mb[0];
5140 mb[1] = mcp->mb[1];
5141 mb[3] = mcp->mb[3];
5142 mb[4] = mcp->mb[4];
5145 if (rval != QLA_SUCCESS) {
5146 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5147 } else {
5148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5149 "Done %s.\n", __func__);
5152 return rval;
5156 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5158 int rval = QLA_FUNCTION_FAILED;
5159 struct qla_hw_data *ha = vha->hw;
5160 uint8_t byte;
5162 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5163 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5164 "Thermal not supported by this card.\n");
5165 return rval;
5168 if (IS_QLA25XX(ha)) {
5169 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5170 ha->pdev->subsystem_device == 0x0175) {
5171 rval = qla2x00_read_sfp(vha, 0, &byte,
5172 0x98, 0x1, 1, BIT_13|BIT_0);
5173 *temp = byte;
5174 return rval;
5176 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5177 ha->pdev->subsystem_device == 0x338e) {
5178 rval = qla2x00_read_sfp(vha, 0, &byte,
5179 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5180 *temp = byte;
5181 return rval;
5183 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5184 "Thermal not supported by this card.\n");
5185 return rval;
5188 if (IS_QLA82XX(ha)) {
5189 *temp = qla82xx_read_temperature(vha);
5190 rval = QLA_SUCCESS;
5191 return rval;
5192 } else if (IS_QLA8044(ha)) {
5193 *temp = qla8044_read_temperature(vha);
5194 rval = QLA_SUCCESS;
5195 return rval;
5198 rval = qla2x00_read_asic_temperature(vha, temp);
5199 return rval;
5203 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5205 int rval;
5206 struct qla_hw_data *ha = vha->hw;
5207 mbx_cmd_t mc;
5208 mbx_cmd_t *mcp = &mc;
5210 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5211 "Entered %s.\n", __func__);
5213 if (!IS_FWI2_CAPABLE(ha))
5214 return QLA_FUNCTION_FAILED;
5216 memset(mcp, 0, sizeof(mbx_cmd_t));
5217 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5218 mcp->mb[1] = 1;
5220 mcp->out_mb = MBX_1|MBX_0;
5221 mcp->in_mb = MBX_0;
5222 mcp->tov = 30;
5223 mcp->flags = 0;
5225 rval = qla2x00_mailbox_command(vha, mcp);
5226 if (rval != QLA_SUCCESS) {
5227 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5228 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5229 } else {
5230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5231 "Done %s.\n", __func__);
5234 return rval;
5238 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5240 int rval;
5241 struct qla_hw_data *ha = vha->hw;
5242 mbx_cmd_t mc;
5243 mbx_cmd_t *mcp = &mc;
5245 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5246 "Entered %s.\n", __func__);
5248 if (!IS_P3P_TYPE(ha))
5249 return QLA_FUNCTION_FAILED;
5251 memset(mcp, 0, sizeof(mbx_cmd_t));
5252 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5253 mcp->mb[1] = 0;
5255 mcp->out_mb = MBX_1|MBX_0;
5256 mcp->in_mb = MBX_0;
5257 mcp->tov = 30;
5258 mcp->flags = 0;
5260 rval = qla2x00_mailbox_command(vha, mcp);
5261 if (rval != QLA_SUCCESS) {
5262 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5263 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5264 } else {
5265 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5266 "Done %s.\n", __func__);
5269 return rval;
5273 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5275 struct qla_hw_data *ha = vha->hw;
5276 mbx_cmd_t mc;
5277 mbx_cmd_t *mcp = &mc;
5278 int rval = QLA_FUNCTION_FAILED;
5280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5281 "Entered %s.\n", __func__);
5283 memset(mcp->mb, 0 , sizeof(mcp->mb));
5284 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5285 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5286 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5287 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5289 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5290 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5291 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5293 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5294 mcp->tov = MBX_TOV_SECONDS;
5295 rval = qla2x00_mailbox_command(vha, mcp);
5297 /* Always copy back return mailbox values. */
5298 if (rval != QLA_SUCCESS) {
5299 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5300 "mailbox command FAILED=0x%x, subcode=%x.\n",
5301 (mcp->mb[1] << 16) | mcp->mb[0],
5302 (mcp->mb[3] << 16) | mcp->mb[2]);
5303 } else {
5304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5305 "Done %s.\n", __func__);
5306 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5307 if (!ha->md_template_size) {
5308 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5309 "Null template size obtained.\n");
5310 rval = QLA_FUNCTION_FAILED;
5313 return rval;
5317 qla82xx_md_get_template(scsi_qla_host_t *vha)
5319 struct qla_hw_data *ha = vha->hw;
5320 mbx_cmd_t mc;
5321 mbx_cmd_t *mcp = &mc;
5322 int rval = QLA_FUNCTION_FAILED;
5324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5325 "Entered %s.\n", __func__);
5327 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5328 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5329 if (!ha->md_tmplt_hdr) {
5330 ql_log(ql_log_warn, vha, 0x1124,
5331 "Unable to allocate memory for Minidump template.\n");
5332 return rval;
5335 memset(mcp->mb, 0 , sizeof(mcp->mb));
5336 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5337 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5338 mcp->mb[2] = LSW(RQST_TMPLT);
5339 mcp->mb[3] = MSW(RQST_TMPLT);
5340 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5341 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5342 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5343 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5344 mcp->mb[8] = LSW(ha->md_template_size);
5345 mcp->mb[9] = MSW(ha->md_template_size);
5347 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5348 mcp->tov = MBX_TOV_SECONDS;
5349 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5350 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5351 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5352 rval = qla2x00_mailbox_command(vha, mcp);
5354 if (rval != QLA_SUCCESS) {
5355 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5356 "mailbox command FAILED=0x%x, subcode=%x.\n",
5357 ((mcp->mb[1] << 16) | mcp->mb[0]),
5358 ((mcp->mb[3] << 16) | mcp->mb[2]));
5359 } else
5360 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5361 "Done %s.\n", __func__);
5362 return rval;
5366 qla8044_md_get_template(scsi_qla_host_t *vha)
5368 struct qla_hw_data *ha = vha->hw;
5369 mbx_cmd_t mc;
5370 mbx_cmd_t *mcp = &mc;
5371 int rval = QLA_FUNCTION_FAILED;
5372 int offset = 0, size = MINIDUMP_SIZE_36K;
5373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5374 "Entered %s.\n", __func__);
5376 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5377 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5378 if (!ha->md_tmplt_hdr) {
5379 ql_log(ql_log_warn, vha, 0xb11b,
5380 "Unable to allocate memory for Minidump template.\n");
5381 return rval;
5384 memset(mcp->mb, 0 , sizeof(mcp->mb));
5385 while (offset < ha->md_template_size) {
5386 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5387 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5388 mcp->mb[2] = LSW(RQST_TMPLT);
5389 mcp->mb[3] = MSW(RQST_TMPLT);
5390 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5391 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5392 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5393 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5394 mcp->mb[8] = LSW(size);
5395 mcp->mb[9] = MSW(size);
5396 mcp->mb[10] = offset & 0x0000FFFF;
5397 mcp->mb[11] = offset & 0xFFFF0000;
5398 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5399 mcp->tov = MBX_TOV_SECONDS;
5400 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5401 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5402 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5403 rval = qla2x00_mailbox_command(vha, mcp);
5405 if (rval != QLA_SUCCESS) {
5406 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5407 "mailbox command FAILED=0x%x, subcode=%x.\n",
5408 ((mcp->mb[1] << 16) | mcp->mb[0]),
5409 ((mcp->mb[3] << 16) | mcp->mb[2]));
5410 return rval;
5411 } else
5412 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5413 "Done %s.\n", __func__);
5414 offset = offset + size;
5416 return rval;
5420 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5422 int rval;
5423 struct qla_hw_data *ha = vha->hw;
5424 mbx_cmd_t mc;
5425 mbx_cmd_t *mcp = &mc;
5427 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5428 return QLA_FUNCTION_FAILED;
5430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5431 "Entered %s.\n", __func__);
5433 memset(mcp, 0, sizeof(mbx_cmd_t));
5434 mcp->mb[0] = MBC_SET_LED_CONFIG;
5435 mcp->mb[1] = led_cfg[0];
5436 mcp->mb[2] = led_cfg[1];
5437 if (IS_QLA8031(ha)) {
5438 mcp->mb[3] = led_cfg[2];
5439 mcp->mb[4] = led_cfg[3];
5440 mcp->mb[5] = led_cfg[4];
5441 mcp->mb[6] = led_cfg[5];
5444 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5445 if (IS_QLA8031(ha))
5446 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5447 mcp->in_mb = MBX_0;
5448 mcp->tov = 30;
5449 mcp->flags = 0;
5451 rval = qla2x00_mailbox_command(vha, mcp);
5452 if (rval != QLA_SUCCESS) {
5453 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5454 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5455 } else {
5456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5457 "Done %s.\n", __func__);
5460 return rval;
5464 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5466 int rval;
5467 struct qla_hw_data *ha = vha->hw;
5468 mbx_cmd_t mc;
5469 mbx_cmd_t *mcp = &mc;
5471 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5472 return QLA_FUNCTION_FAILED;
5474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5475 "Entered %s.\n", __func__);
5477 memset(mcp, 0, sizeof(mbx_cmd_t));
5478 mcp->mb[0] = MBC_GET_LED_CONFIG;
5480 mcp->out_mb = MBX_0;
5481 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5482 if (IS_QLA8031(ha))
5483 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5484 mcp->tov = 30;
5485 mcp->flags = 0;
5487 rval = qla2x00_mailbox_command(vha, mcp);
5488 if (rval != QLA_SUCCESS) {
5489 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5491 } else {
5492 led_cfg[0] = mcp->mb[1];
5493 led_cfg[1] = mcp->mb[2];
5494 if (IS_QLA8031(ha)) {
5495 led_cfg[2] = mcp->mb[3];
5496 led_cfg[3] = mcp->mb[4];
5497 led_cfg[4] = mcp->mb[5];
5498 led_cfg[5] = mcp->mb[6];
5500 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5501 "Done %s.\n", __func__);
5504 return rval;
5508 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5510 int rval;
5511 struct qla_hw_data *ha = vha->hw;
5512 mbx_cmd_t mc;
5513 mbx_cmd_t *mcp = &mc;
5515 if (!IS_P3P_TYPE(ha))
5516 return QLA_FUNCTION_FAILED;
5518 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5519 "Entered %s.\n", __func__);
5521 memset(mcp, 0, sizeof(mbx_cmd_t));
5522 mcp->mb[0] = MBC_SET_LED_CONFIG;
5523 if (enable)
5524 mcp->mb[7] = 0xE;
5525 else
5526 mcp->mb[7] = 0xD;
5528 mcp->out_mb = MBX_7|MBX_0;
5529 mcp->in_mb = MBX_0;
5530 mcp->tov = MBX_TOV_SECONDS;
5531 mcp->flags = 0;
5533 rval = qla2x00_mailbox_command(vha, mcp);
5534 if (rval != QLA_SUCCESS) {
5535 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5536 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5537 } else {
5538 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5539 "Done %s.\n", __func__);
5542 return rval;
5546 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5548 int rval;
5549 struct qla_hw_data *ha = vha->hw;
5550 mbx_cmd_t mc;
5551 mbx_cmd_t *mcp = &mc;
5553 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5554 return QLA_FUNCTION_FAILED;
5556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5557 "Entered %s.\n", __func__);
5559 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5560 mcp->mb[1] = LSW(reg);
5561 mcp->mb[2] = MSW(reg);
5562 mcp->mb[3] = LSW(data);
5563 mcp->mb[4] = MSW(data);
5564 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5566 mcp->in_mb = MBX_1|MBX_0;
5567 mcp->tov = MBX_TOV_SECONDS;
5568 mcp->flags = 0;
5569 rval = qla2x00_mailbox_command(vha, mcp);
5571 if (rval != QLA_SUCCESS) {
5572 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5573 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5574 } else {
5575 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5576 "Done %s.\n", __func__);
5579 return rval;
5583 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5585 int rval;
5586 struct qla_hw_data *ha = vha->hw;
5587 mbx_cmd_t mc;
5588 mbx_cmd_t *mcp = &mc;
5590 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5592 "Implicit LOGO Unsupported.\n");
5593 return QLA_FUNCTION_FAILED;
5597 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5598 "Entering %s.\n", __func__);
5600 /* Perform Implicit LOGO. */
5601 mcp->mb[0] = MBC_PORT_LOGOUT;
5602 mcp->mb[1] = fcport->loop_id;
5603 mcp->mb[10] = BIT_15;
5604 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5605 mcp->in_mb = MBX_0;
5606 mcp->tov = MBX_TOV_SECONDS;
5607 mcp->flags = 0;
5608 rval = qla2x00_mailbox_command(vha, mcp);
5609 if (rval != QLA_SUCCESS)
5610 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5611 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5612 else
5613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5614 "Done %s.\n", __func__);
5616 return rval;
5620 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5622 int rval;
5623 mbx_cmd_t mc;
5624 mbx_cmd_t *mcp = &mc;
5625 struct qla_hw_data *ha = vha->hw;
5626 unsigned long retry_max_time = jiffies + (2 * HZ);
5628 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5629 return QLA_FUNCTION_FAILED;
5631 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5633 retry_rd_reg:
5634 mcp->mb[0] = MBC_READ_REMOTE_REG;
5635 mcp->mb[1] = LSW(reg);
5636 mcp->mb[2] = MSW(reg);
5637 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5638 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5639 mcp->tov = MBX_TOV_SECONDS;
5640 mcp->flags = 0;
5641 rval = qla2x00_mailbox_command(vha, mcp);
5643 if (rval != QLA_SUCCESS) {
5644 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5645 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5646 rval, mcp->mb[0], mcp->mb[1]);
5647 } else {
5648 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5649 if (*data == QLA8XXX_BAD_VALUE) {
5651 * During soft-reset CAMRAM register reads might
5652 * return 0xbad0bad0. So retry for MAX of 2 sec
5653 * while reading camram registers.
5655 if (time_after(jiffies, retry_max_time)) {
5656 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5657 "Failure to read CAMRAM register. "
5658 "data=0x%x.\n", *data);
5659 return QLA_FUNCTION_FAILED;
5661 msleep(100);
5662 goto retry_rd_reg;
5664 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5667 return rval;
5671 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5673 int rval;
5674 mbx_cmd_t mc;
5675 mbx_cmd_t *mcp = &mc;
5676 struct qla_hw_data *ha = vha->hw;
5678 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5679 return QLA_FUNCTION_FAILED;
5681 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5683 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5684 mcp->out_mb = MBX_0;
5685 mcp->in_mb = MBX_1|MBX_0;
5686 mcp->tov = MBX_TOV_SECONDS;
5687 mcp->flags = 0;
5688 rval = qla2x00_mailbox_command(vha, mcp);
5690 if (rval != QLA_SUCCESS) {
5691 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5693 rval, mcp->mb[0], mcp->mb[1]);
5694 ha->isp_ops->fw_dump(vha, 0);
5695 } else {
5696 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5699 return rval;
5703 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5704 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5706 int rval;
5707 mbx_cmd_t mc;
5708 mbx_cmd_t *mcp = &mc;
5709 uint8_t subcode = (uint8_t)options;
5710 struct qla_hw_data *ha = vha->hw;
5712 if (!IS_QLA8031(ha))
5713 return QLA_FUNCTION_FAILED;
5715 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5717 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5718 mcp->mb[1] = options;
5719 mcp->out_mb = MBX_1|MBX_0;
5720 if (subcode & BIT_2) {
5721 mcp->mb[2] = LSW(start_addr);
5722 mcp->mb[3] = MSW(start_addr);
5723 mcp->mb[4] = LSW(end_addr);
5724 mcp->mb[5] = MSW(end_addr);
5725 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5727 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5728 if (!(subcode & (BIT_2 | BIT_5)))
5729 mcp->in_mb |= MBX_4|MBX_3;
5730 mcp->tov = MBX_TOV_SECONDS;
5731 mcp->flags = 0;
5732 rval = qla2x00_mailbox_command(vha, mcp);
5734 if (rval != QLA_SUCCESS) {
5735 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5736 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5737 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5738 mcp->mb[4]);
5739 ha->isp_ops->fw_dump(vha, 0);
5740 } else {
5741 if (subcode & BIT_5)
5742 *sector_size = mcp->mb[1];
5743 else if (subcode & (BIT_6 | BIT_7)) {
5744 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5745 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5746 } else if (subcode & (BIT_3 | BIT_4)) {
5747 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5748 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5750 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5753 return rval;
5757 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5758 uint32_t size)
5760 int rval;
5761 mbx_cmd_t mc;
5762 mbx_cmd_t *mcp = &mc;
5764 if (!IS_MCTP_CAPABLE(vha->hw))
5765 return QLA_FUNCTION_FAILED;
5767 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5768 "Entered %s.\n", __func__);
5770 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5771 mcp->mb[1] = LSW(addr);
5772 mcp->mb[2] = MSW(req_dma);
5773 mcp->mb[3] = LSW(req_dma);
5774 mcp->mb[4] = MSW(size);
5775 mcp->mb[5] = LSW(size);
5776 mcp->mb[6] = MSW(MSD(req_dma));
5777 mcp->mb[7] = LSW(MSD(req_dma));
5778 mcp->mb[8] = MSW(addr);
5779 /* Setting RAM ID to valid */
5780 mcp->mb[10] |= BIT_7;
5781 /* For MCTP RAM ID is 0x40 */
5782 mcp->mb[10] |= 0x40;
5784 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5785 MBX_0;
5787 mcp->in_mb = MBX_0;
5788 mcp->tov = MBX_TOV_SECONDS;
5789 mcp->flags = 0;
5790 rval = qla2x00_mailbox_command(vha, mcp);
5792 if (rval != QLA_SUCCESS) {
5793 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5794 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5795 } else {
5796 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5797 "Done %s.\n", __func__);
5800 return rval;
5804 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5805 void *dd_buf, uint size, uint options)
5807 int rval;
5808 mbx_cmd_t mc;
5809 mbx_cmd_t *mcp = &mc;
5810 dma_addr_t dd_dma;
5812 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
5813 return QLA_FUNCTION_FAILED;
5815 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
5816 "Entered %s.\n", __func__);
5818 dd_dma = dma_map_single(&vha->hw->pdev->dev,
5819 dd_buf, size, DMA_FROM_DEVICE);
5820 if (!dd_dma) {
5821 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
5822 return QLA_MEMORY_ALLOC_FAILED;
5825 memset(dd_buf, 0, size);
5827 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
5828 mcp->mb[1] = options;
5829 mcp->mb[2] = MSW(LSD(dd_dma));
5830 mcp->mb[3] = LSW(LSD(dd_dma));
5831 mcp->mb[6] = MSW(MSD(dd_dma));
5832 mcp->mb[7] = LSW(MSD(dd_dma));
5833 mcp->mb[8] = size;
5834 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5835 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5836 mcp->buf_size = size;
5837 mcp->flags = MBX_DMA_IN;
5838 mcp->tov = MBX_TOV_SECONDS * 4;
5839 rval = qla2x00_mailbox_command(vha, mcp);
5841 if (rval != QLA_SUCCESS) {
5842 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
5843 } else {
5844 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
5845 "Done %s.\n", __func__);
5848 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
5849 size, DMA_FROM_DEVICE);
5851 return rval;
5854 static void qla2x00_async_mb_sp_done(void *s, int res)
5856 struct srb *sp = s;
5858 sp->u.iocb_cmd.u.mbx.rc = res;
5860 complete(&sp->u.iocb_cmd.u.mbx.comp);
5861 /* don't free sp here. Let the caller do the free */
5865 * This mailbox uses the iocb interface to send MB command.
5866 * This allows non-critial (non chip setup) command to go
5867 * out in parrallel.
5869 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
5871 int rval = QLA_FUNCTION_FAILED;
5872 srb_t *sp;
5873 struct srb_iocb *c;
5875 if (!vha->hw->flags.fw_started)
5876 goto done;
5878 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
5879 if (!sp)
5880 goto done;
5882 sp->type = SRB_MB_IOCB;
5883 sp->name = mb_to_str(mcp->mb[0]);
5885 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
5887 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
5889 c = &sp->u.iocb_cmd;
5890 c->timeout = qla2x00_async_iocb_timeout;
5891 init_completion(&c->u.mbx.comp);
5893 sp->done = qla2x00_async_mb_sp_done;
5895 rval = qla2x00_start_sp(sp);
5896 if (rval != QLA_SUCCESS) {
5897 ql_dbg(ql_dbg_mbx, vha, 0x1018,
5898 "%s: %s Failed submission. %x.\n",
5899 __func__, sp->name, rval);
5900 goto done_free_sp;
5903 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
5904 sp->name, sp->handle);
5906 wait_for_completion(&c->u.mbx.comp);
5907 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
5909 rval = c->u.mbx.rc;
5910 switch (rval) {
5911 case QLA_FUNCTION_TIMEOUT:
5912 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
5913 __func__, sp->name, rval);
5914 break;
5915 case QLA_SUCCESS:
5916 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
5917 __func__, sp->name);
5918 sp->free(sp);
5919 break;
5920 default:
5921 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
5922 __func__, sp->name, rval);
5923 sp->free(sp);
5924 break;
5927 return rval;
5929 done_free_sp:
5930 sp->free(sp);
5931 done:
5932 return rval;
5936 * qla24xx_gpdb_wait
5937 * NOTE: Do not call this routine from DPC thread
5939 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
5941 int rval = QLA_FUNCTION_FAILED;
5942 dma_addr_t pd_dma;
5943 struct port_database_24xx *pd;
5944 struct qla_hw_data *ha = vha->hw;
5945 mbx_cmd_t mc;
5947 if (!vha->hw->flags.fw_started)
5948 goto done;
5950 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
5951 if (pd == NULL) {
5952 ql_log(ql_log_warn, vha, 0xd047,
5953 "Failed to allocate port database structure.\n");
5954 goto done_free_sp;
5956 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
5958 memset(&mc, 0, sizeof(mc));
5959 mc.mb[0] = MBC_GET_PORT_DATABASE;
5960 mc.mb[1] = cpu_to_le16(fcport->loop_id);
5961 mc.mb[2] = MSW(pd_dma);
5962 mc.mb[3] = LSW(pd_dma);
5963 mc.mb[6] = MSW(MSD(pd_dma));
5964 mc.mb[7] = LSW(MSD(pd_dma));
5965 mc.mb[9] = cpu_to_le16(vha->vp_idx);
5966 mc.mb[10] = cpu_to_le16((uint16_t)opt);
5968 rval = qla24xx_send_mb_cmd(vha, &mc);
5969 if (rval != QLA_SUCCESS) {
5970 ql_dbg(ql_dbg_mbx, vha, 0x1193,
5971 "%s: %8phC fail\n", __func__, fcport->port_name);
5972 goto done_free_sp;
5975 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
5977 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
5978 __func__, fcport->port_name);
5980 done_free_sp:
5981 if (pd)
5982 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
5983 done:
5984 return rval;
5987 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
5988 struct port_database_24xx *pd)
5990 int rval = QLA_SUCCESS;
5991 uint64_t zero = 0;
5992 u8 current_login_state, last_login_state;
5994 if (fcport->fc4f_nvme) {
5995 current_login_state = pd->current_login_state >> 4;
5996 last_login_state = pd->last_login_state >> 4;
5997 } else {
5998 current_login_state = pd->current_login_state & 0xf;
5999 last_login_state = pd->last_login_state & 0xf;
6002 /* Check for logged in state. */
6003 if (current_login_state != PDS_PRLI_COMPLETE &&
6004 last_login_state != PDS_PRLI_COMPLETE) {
6005 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6006 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6007 current_login_state, last_login_state, fcport->loop_id);
6008 rval = QLA_FUNCTION_FAILED;
6009 goto gpd_error_out;
6012 if (fcport->loop_id == FC_NO_LOOP_ID ||
6013 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6014 memcmp(fcport->port_name, pd->port_name, 8))) {
6015 /* We lost the device mid way. */
6016 rval = QLA_NOT_LOGGED_IN;
6017 goto gpd_error_out;
6020 /* Names are little-endian. */
6021 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6022 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6024 /* Get port_id of device. */
6025 fcport->d_id.b.domain = pd->port_id[0];
6026 fcport->d_id.b.area = pd->port_id[1];
6027 fcport->d_id.b.al_pa = pd->port_id[2];
6028 fcport->d_id.b.rsvd_1 = 0;
6030 if (fcport->fc4f_nvme) {
6031 fcport->nvme_prli_service_param =
6032 pd->prli_nvme_svc_param_word_3;
6033 fcport->port_type = FCT_NVME;
6034 } else {
6035 /* If not target must be initiator or unknown type. */
6036 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6037 fcport->port_type = FCT_INITIATOR;
6038 else
6039 fcport->port_type = FCT_TARGET;
6041 /* Passback COS information. */
6042 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6043 FC_COS_CLASS2 : FC_COS_CLASS3;
6045 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6046 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6047 fcport->conf_compl_supported = 1;
6050 gpd_error_out:
6051 return rval;
6055 * qla24xx_gidlist__wait
6056 * NOTE: don't call this routine from DPC thread.
6058 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6059 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6061 int rval = QLA_FUNCTION_FAILED;
6062 mbx_cmd_t mc;
6064 if (!vha->hw->flags.fw_started)
6065 goto done;
6067 memset(&mc, 0, sizeof(mc));
6068 mc.mb[0] = MBC_GET_ID_LIST;
6069 mc.mb[2] = MSW(id_list_dma);
6070 mc.mb[3] = LSW(id_list_dma);
6071 mc.mb[6] = MSW(MSD(id_list_dma));
6072 mc.mb[7] = LSW(MSD(id_list_dma));
6073 mc.mb[8] = 0;
6074 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6076 rval = qla24xx_send_mb_cmd(vha, &mc);
6077 if (rval != QLA_SUCCESS) {
6078 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6079 "%s: fail\n", __func__);
6080 } else {
6081 *entries = mc.mb[1];
6082 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6083 "%s: done\n", __func__);
6085 done:
6086 return rval;