Linux 4.16.11
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_mbx.c
blob7397aeddd96cb9ca783b9b9b5dffc1f6d423f954
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
20 {MBC_GET_RESOURCE_COUNTS, "ResCnt"},
23 static const char *mb_to_str(uint16_t cmd)
25 int i;
26 struct mb_cmd_name *e;
28 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
29 e = mb_str + i;
30 if (cmd == e->cmd)
31 return e->str;
33 return "unknown";
36 static struct rom_cmd {
37 uint16_t cmd;
38 } rom_cmds[] = {
39 { MBC_LOAD_RAM },
40 { MBC_EXECUTE_FIRMWARE },
41 { MBC_READ_RAM_WORD },
42 { MBC_MAILBOX_REGISTER_TEST },
43 { MBC_VERIFY_CHECKSUM },
44 { MBC_GET_FIRMWARE_VERSION },
45 { MBC_LOAD_RISC_RAM },
46 { MBC_DUMP_RISC_RAM },
47 { MBC_LOAD_RISC_RAM_EXTENDED },
48 { MBC_DUMP_RISC_RAM_EXTENDED },
49 { MBC_WRITE_RAM_WORD_EXTENDED },
50 { MBC_READ_RAM_EXTENDED },
51 { MBC_GET_RESOURCE_COUNTS },
52 { MBC_SET_FIRMWARE_OPTION },
53 { MBC_MID_INITIALIZE_FIRMWARE },
54 { MBC_GET_FIRMWARE_STATE },
55 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
56 { MBC_GET_RETRY_COUNT },
57 { MBC_TRACE_CONTROL },
58 { MBC_INITIALIZE_MULTIQ },
59 { MBC_IOCB_COMMAND_A64 },
60 { MBC_GET_ADAPTER_LOOP_ID },
61 { MBC_READ_SFP },
64 static int is_rom_cmd(uint16_t cmd)
66 int i;
67 struct rom_cmd *wc;
69 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
70 wc = rom_cmds + i;
71 if (wc->cmd == cmd)
72 return 1;
75 return 0;
79 * qla2x00_mailbox_command
80 * Issue mailbox command and waits for completion.
82 * Input:
83 * ha = adapter block pointer.
84 * mcp = driver internal mbx struct pointer.
86 * Output:
87 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
89 * Returns:
90 * 0 : QLA_SUCCESS = cmd performed success
91 * 1 : QLA_FUNCTION_FAILED (error encountered)
92 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
94 * Context:
95 * Kernel context.
97 static int
98 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
100 int rval, i;
101 unsigned long flags = 0;
102 device_reg_t *reg;
103 uint8_t abort_active;
104 uint8_t io_lock_on;
105 uint16_t command = 0;
106 uint16_t *iptr;
107 uint16_t __iomem *optr;
108 uint32_t cnt;
109 uint32_t mboxes;
110 unsigned long wait_time;
111 struct qla_hw_data *ha = vha->hw;
112 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
115 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
117 if (ha->pdev->error_state > pci_channel_io_frozen) {
118 ql_log(ql_log_warn, vha, 0x1001,
119 "error_state is greater than pci_channel_io_frozen, "
120 "exiting.\n");
121 return QLA_FUNCTION_TIMEOUT;
124 if (vha->device_flags & DFLG_DEV_FAILED) {
125 ql_log(ql_log_warn, vha, 0x1002,
126 "Device in failed state, exiting.\n");
127 return QLA_FUNCTION_TIMEOUT;
130 /* if PCI error, then avoid mbx processing.*/
131 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
132 test_bit(UNLOADING, &base_vha->dpc_flags)) {
133 ql_log(ql_log_warn, vha, 0xd04e,
134 "PCI error, exiting.\n");
135 return QLA_FUNCTION_TIMEOUT;
138 reg = ha->iobase;
139 io_lock_on = base_vha->flags.init_done;
141 rval = QLA_SUCCESS;
142 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
145 if (ha->flags.pci_channel_io_perm_failure) {
146 ql_log(ql_log_warn, vha, 0x1003,
147 "Perm failure on EEH timeout MBX, exiting.\n");
148 return QLA_FUNCTION_TIMEOUT;
151 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
152 /* Setting Link-Down error */
153 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
154 ql_log(ql_log_warn, vha, 0x1004,
155 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
156 return QLA_FUNCTION_TIMEOUT;
159 /* check if ISP abort is active and return cmd with timeout */
160 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
161 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
162 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
163 !is_rom_cmd(mcp->mb[0])) {
164 ql_log(ql_log_info, vha, 0x1005,
165 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
166 mcp->mb[0]);
167 return QLA_FUNCTION_TIMEOUT;
171 * Wait for active mailbox commands to finish by waiting at most tov
172 * seconds. This is to serialize actual issuing of mailbox cmds during
173 * non ISP abort time.
175 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
176 /* Timeout occurred. Return error. */
177 ql_log(ql_log_warn, vha, 0xd035,
178 "Cmd access timeout, cmd=0x%x, Exiting.\n",
179 mcp->mb[0]);
180 return QLA_FUNCTION_TIMEOUT;
183 ha->flags.mbox_busy = 1;
184 /* Save mailbox command for debug */
185 ha->mcp = mcp;
187 ql_dbg(ql_dbg_mbx, vha, 0x1006,
188 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
190 spin_lock_irqsave(&ha->hardware_lock, flags);
192 /* Load mailbox registers. */
193 if (IS_P3P_TYPE(ha))
194 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
195 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
196 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
197 else
198 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
200 iptr = mcp->mb;
201 command = mcp->mb[0];
202 mboxes = mcp->out_mb;
204 ql_dbg(ql_dbg_mbx, vha, 0x1111,
205 "Mailbox registers (OUT):\n");
206 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
207 if (IS_QLA2200(ha) && cnt == 8)
208 optr =
209 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
210 if (mboxes & BIT_0) {
211 ql_dbg(ql_dbg_mbx, vha, 0x1112,
212 "mbox[%d]<-0x%04x\n", cnt, *iptr);
213 WRT_REG_WORD(optr, *iptr);
216 mboxes >>= 1;
217 optr++;
218 iptr++;
221 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
222 "I/O Address = %p.\n", optr);
224 /* Issue set host interrupt command to send cmd out. */
225 ha->flags.mbox_int = 0;
226 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
228 /* Unlock mbx registers and wait for interrupt */
229 ql_dbg(ql_dbg_mbx, vha, 0x100f,
230 "Going to unlock irq & waiting for interrupts. "
231 "jiffies=%lx.\n", jiffies);
233 /* Wait for mbx cmd completion until timeout */
235 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
236 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
238 if (IS_P3P_TYPE(ha)) {
239 if (RD_REG_DWORD(&reg->isp82.hint) &
240 HINT_MBX_INT_PENDING) {
241 spin_unlock_irqrestore(&ha->hardware_lock,
242 flags);
243 ha->flags.mbox_busy = 0;
244 ql_dbg(ql_dbg_mbx, vha, 0x1010,
245 "Pending mailbox timeout, exiting.\n");
246 rval = QLA_FUNCTION_TIMEOUT;
247 goto premature_exit;
249 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
250 } else if (IS_FWI2_CAPABLE(ha))
251 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
252 else
253 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
254 spin_unlock_irqrestore(&ha->hardware_lock, flags);
256 wait_time = jiffies;
257 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
258 mcp->tov * HZ)) {
259 ql_dbg(ql_dbg_mbx, vha, 0x117a,
260 "cmd=%x Timeout.\n", command);
261 spin_lock_irqsave(&ha->hardware_lock, flags);
262 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
265 if (time_after(jiffies, wait_time + 5 * HZ))
266 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
267 command, jiffies_to_msecs(jiffies - wait_time));
268 } else {
269 ql_dbg(ql_dbg_mbx, vha, 0x1011,
270 "Cmd=%x Polling Mode.\n", command);
272 if (IS_P3P_TYPE(ha)) {
273 if (RD_REG_DWORD(&reg->isp82.hint) &
274 HINT_MBX_INT_PENDING) {
275 spin_unlock_irqrestore(&ha->hardware_lock,
276 flags);
277 ha->flags.mbox_busy = 0;
278 ql_dbg(ql_dbg_mbx, vha, 0x1012,
279 "Pending mailbox timeout, exiting.\n");
280 rval = QLA_FUNCTION_TIMEOUT;
281 goto premature_exit;
283 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
284 } else if (IS_FWI2_CAPABLE(ha))
285 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
286 else
287 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
288 spin_unlock_irqrestore(&ha->hardware_lock, flags);
290 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
291 while (!ha->flags.mbox_int) {
292 if (time_after(jiffies, wait_time))
293 break;
295 /* Check for pending interrupts. */
296 qla2x00_poll(ha->rsp_q_map[0]);
298 if (!ha->flags.mbox_int &&
299 !(IS_QLA2200(ha) &&
300 command == MBC_LOAD_RISC_RAM_EXTENDED))
301 msleep(10);
302 } /* while */
303 ql_dbg(ql_dbg_mbx, vha, 0x1013,
304 "Waited %d sec.\n",
305 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
308 /* Check whether we timed out */
309 if (ha->flags.mbox_int) {
310 uint16_t *iptr2;
312 ql_dbg(ql_dbg_mbx, vha, 0x1014,
313 "Cmd=%x completed.\n", command);
315 /* Got interrupt. Clear the flag. */
316 ha->flags.mbox_int = 0;
317 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
319 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
320 ha->flags.mbox_busy = 0;
321 /* Setting Link-Down error */
322 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
323 ha->mcp = NULL;
324 rval = QLA_FUNCTION_FAILED;
325 ql_log(ql_log_warn, vha, 0xd048,
326 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
327 goto premature_exit;
330 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
331 rval = QLA_FUNCTION_FAILED;
333 /* Load return mailbox registers. */
334 iptr2 = mcp->mb;
335 iptr = (uint16_t *)&ha->mailbox_out[0];
336 mboxes = mcp->in_mb;
338 ql_dbg(ql_dbg_mbx, vha, 0x1113,
339 "Mailbox registers (IN):\n");
340 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
341 if (mboxes & BIT_0) {
342 *iptr2 = *iptr;
343 ql_dbg(ql_dbg_mbx, vha, 0x1114,
344 "mbox[%d]->0x%04x\n", cnt, *iptr2);
347 mboxes >>= 1;
348 iptr2++;
349 iptr++;
351 } else {
353 uint16_t mb[8];
354 uint32_t ictrl, host_status, hccr;
355 uint16_t w;
357 if (IS_FWI2_CAPABLE(ha)) {
358 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
359 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
360 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
361 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
362 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
363 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
364 host_status = RD_REG_DWORD(&reg->isp24.host_status);
365 hccr = RD_REG_DWORD(&reg->isp24.hccr);
367 ql_log(ql_log_warn, vha, 0xd04c,
368 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
369 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
370 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
371 mb[7], host_status, hccr);
373 } else {
374 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
375 ictrl = RD_REG_WORD(&reg->isp.ictrl);
376 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
377 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
378 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
380 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
382 /* Capture FW dump only, if PCI device active */
383 if (!pci_channel_offline(vha->hw->pdev)) {
384 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
385 if (w == 0xffff || ictrl == 0xffffffff) {
386 /* This is special case if there is unload
387 * of driver happening and if PCI device go
388 * into bad state due to PCI error condition
389 * then only PCI ERR flag would be set.
390 * we will do premature exit for above case.
392 ha->flags.mbox_busy = 0;
393 rval = QLA_FUNCTION_TIMEOUT;
394 goto premature_exit;
397 /* Attempt to capture firmware dump for further
398 * anallysis of the current formware state. we do not
399 * need to do this if we are intentionally generating
400 * a dump
402 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
403 ha->isp_ops->fw_dump(vha, 0);
404 rval = QLA_FUNCTION_TIMEOUT;
408 ha->flags.mbox_busy = 0;
410 /* Clean up */
411 ha->mcp = NULL;
413 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
414 ql_dbg(ql_dbg_mbx, vha, 0x101a,
415 "Checking for additional resp interrupt.\n");
417 /* polling mode for non isp_abort commands. */
418 qla2x00_poll(ha->rsp_q_map[0]);
421 if (rval == QLA_FUNCTION_TIMEOUT &&
422 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
423 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
424 ha->flags.eeh_busy) {
425 /* not in dpc. schedule it for dpc to take over. */
426 ql_dbg(ql_dbg_mbx, vha, 0x101b,
427 "Timeout, schedule isp_abort_needed.\n");
429 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
430 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
431 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
432 if (IS_QLA82XX(ha)) {
433 ql_dbg(ql_dbg_mbx, vha, 0x112a,
434 "disabling pause transmit on port "
435 "0 & 1.\n");
436 qla82xx_wr_32(ha,
437 QLA82XX_CRB_NIU + 0x98,
438 CRB_NIU_XG_PAUSE_CTL_P0|
439 CRB_NIU_XG_PAUSE_CTL_P1);
441 ql_log(ql_log_info, base_vha, 0x101c,
442 "Mailbox cmd timeout occurred, cmd=0x%x, "
443 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
444 "abort.\n", command, mcp->mb[0],
445 ha->flags.eeh_busy);
446 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
447 qla2xxx_wake_dpc(vha);
449 } else if (!abort_active) {
450 /* call abort directly since we are in the DPC thread */
451 ql_dbg(ql_dbg_mbx, vha, 0x101d,
452 "Timeout, calling abort_isp.\n");
454 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
455 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
456 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
457 if (IS_QLA82XX(ha)) {
458 ql_dbg(ql_dbg_mbx, vha, 0x112b,
459 "disabling pause transmit on port "
460 "0 & 1.\n");
461 qla82xx_wr_32(ha,
462 QLA82XX_CRB_NIU + 0x98,
463 CRB_NIU_XG_PAUSE_CTL_P0|
464 CRB_NIU_XG_PAUSE_CTL_P1);
466 ql_log(ql_log_info, base_vha, 0x101e,
467 "Mailbox cmd timeout occurred, cmd=0x%x, "
468 "mb[0]=0x%x. Scheduling ISP abort ",
469 command, mcp->mb[0]);
470 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
471 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
472 /* Allow next mbx cmd to come in. */
473 complete(&ha->mbx_cmd_comp);
474 if (ha->isp_ops->abort_isp(vha)) {
475 /* Failed. retry later. */
476 set_bit(ISP_ABORT_NEEDED,
477 &vha->dpc_flags);
479 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
480 ql_dbg(ql_dbg_mbx, vha, 0x101f,
481 "Finished abort_isp.\n");
482 goto mbx_done;
487 premature_exit:
488 /* Allow next mbx cmd to come in. */
489 complete(&ha->mbx_cmd_comp);
491 mbx_done:
492 if (rval) {
493 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
494 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
495 dev_name(&ha->pdev->dev), 0x1020+0x800,
496 vha->host_no);
497 mboxes = mcp->in_mb;
498 cnt = 4;
499 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
500 if (mboxes & BIT_0) {
501 printk(" mb[%u]=%x", i, mcp->mb[i]);
502 cnt--;
504 pr_warn(" cmd=%x ****\n", command);
506 ql_dbg(ql_dbg_mbx, vha, 0x1198,
507 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
508 RD_REG_DWORD(&reg->isp24.host_status),
509 RD_REG_DWORD(&reg->isp24.ictrl),
510 RD_REG_DWORD(&reg->isp24.istatus));
511 } else {
512 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
515 return rval;
519 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
520 uint32_t risc_code_size)
522 int rval;
523 struct qla_hw_data *ha = vha->hw;
524 mbx_cmd_t mc;
525 mbx_cmd_t *mcp = &mc;
527 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
528 "Entered %s.\n", __func__);
530 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
531 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
532 mcp->mb[8] = MSW(risc_addr);
533 mcp->out_mb = MBX_8|MBX_0;
534 } else {
535 mcp->mb[0] = MBC_LOAD_RISC_RAM;
536 mcp->out_mb = MBX_0;
538 mcp->mb[1] = LSW(risc_addr);
539 mcp->mb[2] = MSW(req_dma);
540 mcp->mb[3] = LSW(req_dma);
541 mcp->mb[6] = MSW(MSD(req_dma));
542 mcp->mb[7] = LSW(MSD(req_dma));
543 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
544 if (IS_FWI2_CAPABLE(ha)) {
545 mcp->mb[4] = MSW(risc_code_size);
546 mcp->mb[5] = LSW(risc_code_size);
547 mcp->out_mb |= MBX_5|MBX_4;
548 } else {
549 mcp->mb[4] = LSW(risc_code_size);
550 mcp->out_mb |= MBX_4;
553 mcp->in_mb = MBX_0;
554 mcp->tov = MBX_TOV_SECONDS;
555 mcp->flags = 0;
556 rval = qla2x00_mailbox_command(vha, mcp);
558 if (rval != QLA_SUCCESS) {
559 ql_dbg(ql_dbg_mbx, vha, 0x1023,
560 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
561 } else {
562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
563 "Done %s.\n", __func__);
566 return rval;
569 #define EXTENDED_BB_CREDITS BIT_0
570 #define NVME_ENABLE_FLAG BIT_3
571 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
573 uint16_t mb4 = BIT_0;
575 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
576 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
578 return mb4;
581 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
583 uint16_t mb4 = BIT_0;
585 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
586 struct nvram_81xx *nv = ha->nvram;
588 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
591 return mb4;
595 * qla2x00_execute_fw
596 * Start adapter firmware.
598 * Input:
599 * ha = adapter block pointer.
600 * TARGET_QUEUE_LOCK must be released.
601 * ADAPTER_STATE_LOCK must be released.
603 * Returns:
604 * qla2x00 local function return status code.
606 * Context:
607 * Kernel context.
610 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
612 int rval;
613 struct qla_hw_data *ha = vha->hw;
614 mbx_cmd_t mc;
615 mbx_cmd_t *mcp = &mc;
617 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
618 "Entered %s.\n", __func__);
620 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
621 mcp->out_mb = MBX_0;
622 mcp->in_mb = MBX_0;
623 if (IS_FWI2_CAPABLE(ha)) {
624 mcp->mb[1] = MSW(risc_addr);
625 mcp->mb[2] = LSW(risc_addr);
626 mcp->mb[3] = 0;
627 mcp->mb[4] = 0;
628 ha->flags.using_lr_setting = 0;
629 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
630 IS_QLA27XX(ha)) {
631 if (ql2xautodetectsfp) {
632 if (ha->flags.detected_lr_sfp) {
633 mcp->mb[4] |=
634 qla25xx_set_sfp_lr_dist(ha);
635 ha->flags.using_lr_setting = 1;
637 } else {
638 struct nvram_81xx *nv = ha->nvram;
639 /* set LR distance if specified in nvram */
640 if (nv->enhanced_features &
641 NEF_LR_DIST_ENABLE) {
642 mcp->mb[4] |=
643 qla25xx_set_nvr_lr_dist(ha);
644 ha->flags.using_lr_setting = 1;
649 if (ql2xnvmeenable && IS_QLA27XX(ha))
650 mcp->mb[4] |= NVME_ENABLE_FLAG;
652 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
653 struct nvram_81xx *nv = ha->nvram;
654 /* set minimum speed if specified in nvram */
655 if (nv->min_link_speed >= 2 &&
656 nv->min_link_speed <= 5) {
657 mcp->mb[4] |= BIT_4;
658 mcp->mb[11] = nv->min_link_speed;
659 mcp->out_mb |= MBX_11;
660 mcp->in_mb |= BIT_5;
661 vha->min_link_speed_feat = nv->min_link_speed;
665 if (ha->flags.exlogins_enabled)
666 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
668 if (ha->flags.exchoffld_enabled)
669 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
671 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
672 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
673 } else {
674 mcp->mb[1] = LSW(risc_addr);
675 mcp->out_mb |= MBX_1;
676 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
677 mcp->mb[2] = 0;
678 mcp->out_mb |= MBX_2;
682 mcp->tov = MBX_TOV_SECONDS;
683 mcp->flags = 0;
684 rval = qla2x00_mailbox_command(vha, mcp);
686 if (rval != QLA_SUCCESS) {
687 ql_dbg(ql_dbg_mbx, vha, 0x1026,
688 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
689 } else {
690 if (IS_FWI2_CAPABLE(ha)) {
691 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
692 ql_dbg(ql_dbg_mbx, vha, 0x119a,
693 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
694 ql_dbg(ql_dbg_mbx, vha, 0x1027,
695 "exchanges=%x.\n", mcp->mb[1]);
696 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
697 ha->max_speed_sup = mcp->mb[2] & BIT_0;
698 ql_dbg(ql_dbg_mbx, vha, 0x119b,
699 "Maximum speed supported=%s.\n",
700 ha->max_speed_sup ? "32Gps" : "16Gps");
701 if (vha->min_link_speed_feat) {
702 ha->min_link_speed = mcp->mb[5];
703 ql_dbg(ql_dbg_mbx, vha, 0x119c,
704 "Minimum speed set=%s.\n",
705 mcp->mb[5] == 5 ? "32Gps" :
706 mcp->mb[5] == 4 ? "16Gps" :
707 mcp->mb[5] == 3 ? "8Gps" :
708 mcp->mb[5] == 2 ? "4Gps" :
709 "unknown");
713 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
714 "Done.\n");
717 return rval;
721 * qla_get_exlogin_status
722 * Get extended login status
723 * uses the memory offload control/status Mailbox
725 * Input:
726 * ha: adapter state pointer.
727 * fwopt: firmware options
729 * Returns:
730 * qla2x00 local function status
732 * Context:
733 * Kernel context.
735 #define FETCH_XLOGINS_STAT 0x8
737 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
738 uint16_t *ex_logins_cnt)
740 int rval;
741 mbx_cmd_t mc;
742 mbx_cmd_t *mcp = &mc;
744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
745 "Entered %s\n", __func__);
747 memset(mcp->mb, 0 , sizeof(mcp->mb));
748 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
749 mcp->mb[1] = FETCH_XLOGINS_STAT;
750 mcp->out_mb = MBX_1|MBX_0;
751 mcp->in_mb = MBX_10|MBX_4|MBX_0;
752 mcp->tov = MBX_TOV_SECONDS;
753 mcp->flags = 0;
755 rval = qla2x00_mailbox_command(vha, mcp);
756 if (rval != QLA_SUCCESS) {
757 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
758 } else {
759 *buf_sz = mcp->mb[4];
760 *ex_logins_cnt = mcp->mb[10];
762 ql_log(ql_log_info, vha, 0x1190,
763 "buffer size 0x%x, exchange login count=%d\n",
764 mcp->mb[4], mcp->mb[10]);
766 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
767 "Done %s.\n", __func__);
770 return rval;
774 * qla_set_exlogin_mem_cfg
775 * set extended login memory configuration
776 * Mbx needs to be issues before init_cb is set
778 * Input:
779 * ha: adapter state pointer.
780 * buffer: buffer pointer
781 * phys_addr: physical address of buffer
782 * size: size of buffer
783 * TARGET_QUEUE_LOCK must be released
784 * ADAPTER_STATE_LOCK must be release
786 * Returns:
787 * qla2x00 local funxtion status code.
789 * Context:
790 * Kernel context.
792 #define CONFIG_XLOGINS_MEM 0x3
794 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
796 int rval;
797 mbx_cmd_t mc;
798 mbx_cmd_t *mcp = &mc;
799 struct qla_hw_data *ha = vha->hw;
801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
802 "Entered %s.\n", __func__);
804 memset(mcp->mb, 0 , sizeof(mcp->mb));
805 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
806 mcp->mb[1] = CONFIG_XLOGINS_MEM;
807 mcp->mb[2] = MSW(phys_addr);
808 mcp->mb[3] = LSW(phys_addr);
809 mcp->mb[6] = MSW(MSD(phys_addr));
810 mcp->mb[7] = LSW(MSD(phys_addr));
811 mcp->mb[8] = MSW(ha->exlogin_size);
812 mcp->mb[9] = LSW(ha->exlogin_size);
813 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
814 mcp->in_mb = MBX_11|MBX_0;
815 mcp->tov = MBX_TOV_SECONDS;
816 mcp->flags = 0;
817 rval = qla2x00_mailbox_command(vha, mcp);
818 if (rval != QLA_SUCCESS) {
819 /*EMPTY*/
820 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
821 } else {
822 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
823 "Done %s.\n", __func__);
826 return rval;
830 * qla_get_exchoffld_status
831 * Get exchange offload status
832 * uses the memory offload control/status Mailbox
834 * Input:
835 * ha: adapter state pointer.
836 * fwopt: firmware options
838 * Returns:
839 * qla2x00 local function status
841 * Context:
842 * Kernel context.
844 #define FETCH_XCHOFFLD_STAT 0x2
846 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
847 uint16_t *ex_logins_cnt)
849 int rval;
850 mbx_cmd_t mc;
851 mbx_cmd_t *mcp = &mc;
853 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
854 "Entered %s\n", __func__);
856 memset(mcp->mb, 0 , sizeof(mcp->mb));
857 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
858 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
859 mcp->out_mb = MBX_1|MBX_0;
860 mcp->in_mb = MBX_10|MBX_4|MBX_0;
861 mcp->tov = MBX_TOV_SECONDS;
862 mcp->flags = 0;
864 rval = qla2x00_mailbox_command(vha, mcp);
865 if (rval != QLA_SUCCESS) {
866 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
867 } else {
868 *buf_sz = mcp->mb[4];
869 *ex_logins_cnt = mcp->mb[10];
871 ql_log(ql_log_info, vha, 0x118e,
872 "buffer size 0x%x, exchange offload count=%d\n",
873 mcp->mb[4], mcp->mb[10]);
875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
876 "Done %s.\n", __func__);
879 return rval;
883 * qla_set_exchoffld_mem_cfg
884 * Set exchange offload memory configuration
885 * Mbx needs to be issues before init_cb is set
887 * Input:
888 * ha: adapter state pointer.
889 * buffer: buffer pointer
890 * phys_addr: physical address of buffer
891 * size: size of buffer
892 * TARGET_QUEUE_LOCK must be released
893 * ADAPTER_STATE_LOCK must be release
895 * Returns:
896 * qla2x00 local funxtion status code.
898 * Context:
899 * Kernel context.
901 #define CONFIG_XCHOFFLD_MEM 0x3
903 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
905 int rval;
906 mbx_cmd_t mc;
907 mbx_cmd_t *mcp = &mc;
908 struct qla_hw_data *ha = vha->hw;
910 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
911 "Entered %s.\n", __func__);
913 memset(mcp->mb, 0 , sizeof(mcp->mb));
914 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
915 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
916 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
917 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
918 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
919 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
920 mcp->mb[8] = MSW(ha->exchoffld_size);
921 mcp->mb[9] = LSW(ha->exchoffld_size);
922 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
923 mcp->in_mb = MBX_11|MBX_0;
924 mcp->tov = MBX_TOV_SECONDS;
925 mcp->flags = 0;
926 rval = qla2x00_mailbox_command(vha, mcp);
927 if (rval != QLA_SUCCESS) {
928 /*EMPTY*/
929 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
930 } else {
931 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
932 "Done %s.\n", __func__);
935 return rval;
939 * qla2x00_get_fw_version
940 * Get firmware version.
942 * Input:
943 * ha: adapter state pointer.
944 * major: pointer for major number.
945 * minor: pointer for minor number.
946 * subminor: pointer for subminor number.
948 * Returns:
949 * qla2x00 local function return status code.
951 * Context:
952 * Kernel context.
955 qla2x00_get_fw_version(scsi_qla_host_t *vha)
957 int rval;
958 mbx_cmd_t mc;
959 mbx_cmd_t *mcp = &mc;
960 struct qla_hw_data *ha = vha->hw;
962 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
963 "Entered %s.\n", __func__);
965 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
966 mcp->out_mb = MBX_0;
967 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
968 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
969 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
970 if (IS_FWI2_CAPABLE(ha))
971 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
972 if (IS_QLA27XX(ha))
973 mcp->in_mb |=
974 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
975 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
977 mcp->flags = 0;
978 mcp->tov = MBX_TOV_SECONDS;
979 rval = qla2x00_mailbox_command(vha, mcp);
980 if (rval != QLA_SUCCESS)
981 goto failed;
983 /* Return mailbox data. */
984 ha->fw_major_version = mcp->mb[1];
985 ha->fw_minor_version = mcp->mb[2];
986 ha->fw_subminor_version = mcp->mb[3];
987 ha->fw_attributes = mcp->mb[6];
988 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
989 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
990 else
991 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
993 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
994 ha->mpi_version[0] = mcp->mb[10] & 0xff;
995 ha->mpi_version[1] = mcp->mb[11] >> 8;
996 ha->mpi_version[2] = mcp->mb[11] & 0xff;
997 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
998 ha->phy_version[0] = mcp->mb[8] & 0xff;
999 ha->phy_version[1] = mcp->mb[9] >> 8;
1000 ha->phy_version[2] = mcp->mb[9] & 0xff;
1003 if (IS_FWI2_CAPABLE(ha)) {
1004 ha->fw_attributes_h = mcp->mb[15];
1005 ha->fw_attributes_ext[0] = mcp->mb[16];
1006 ha->fw_attributes_ext[1] = mcp->mb[17];
1007 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1008 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1009 __func__, mcp->mb[15], mcp->mb[6]);
1010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1011 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1012 __func__, mcp->mb[17], mcp->mb[16]);
1014 if (ha->fw_attributes_h & 0x4)
1015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1016 "%s: Firmware supports Extended Login 0x%x\n",
1017 __func__, ha->fw_attributes_h);
1019 if (ha->fw_attributes_h & 0x8)
1020 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1021 "%s: Firmware supports Exchange Offload 0x%x\n",
1022 __func__, ha->fw_attributes_h);
1025 * FW supports nvme and driver load parameter requested nvme.
1026 * BIT 26 of fw_attributes indicates NVMe support.
1028 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
1029 vha->flags.nvme_enabled = 1;
1033 if (IS_QLA27XX(ha)) {
1034 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1035 ha->mpi_version[1] = mcp->mb[11] >> 8;
1036 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1037 ha->pep_version[0] = mcp->mb[13] & 0xff;
1038 ha->pep_version[1] = mcp->mb[14] >> 8;
1039 ha->pep_version[2] = mcp->mb[14] & 0xff;
1040 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1041 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1042 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1043 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1046 failed:
1047 if (rval != QLA_SUCCESS) {
1048 /*EMPTY*/
1049 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1050 } else {
1051 /*EMPTY*/
1052 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1053 "Done %s.\n", __func__);
1055 return rval;
1059 * qla2x00_get_fw_options
1060 * Set firmware options.
1062 * Input:
1063 * ha = adapter block pointer.
1064 * fwopt = pointer for firmware options.
1066 * Returns:
1067 * qla2x00 local function return status code.
1069 * Context:
1070 * Kernel context.
1073 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1075 int rval;
1076 mbx_cmd_t mc;
1077 mbx_cmd_t *mcp = &mc;
1079 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1080 "Entered %s.\n", __func__);
1082 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1083 mcp->out_mb = MBX_0;
1084 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1085 mcp->tov = MBX_TOV_SECONDS;
1086 mcp->flags = 0;
1087 rval = qla2x00_mailbox_command(vha, mcp);
1089 if (rval != QLA_SUCCESS) {
1090 /*EMPTY*/
1091 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1092 } else {
1093 fwopts[0] = mcp->mb[0];
1094 fwopts[1] = mcp->mb[1];
1095 fwopts[2] = mcp->mb[2];
1096 fwopts[3] = mcp->mb[3];
1098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1099 "Done %s.\n", __func__);
1102 return rval;
1107 * qla2x00_set_fw_options
1108 * Set firmware options.
1110 * Input:
1111 * ha = adapter block pointer.
1112 * fwopt = pointer for firmware options.
1114 * Returns:
1115 * qla2x00 local function return status code.
1117 * Context:
1118 * Kernel context.
1121 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1123 int rval;
1124 mbx_cmd_t mc;
1125 mbx_cmd_t *mcp = &mc;
1127 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1128 "Entered %s.\n", __func__);
1130 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1131 mcp->mb[1] = fwopts[1];
1132 mcp->mb[2] = fwopts[2];
1133 mcp->mb[3] = fwopts[3];
1134 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1135 mcp->in_mb = MBX_0;
1136 if (IS_FWI2_CAPABLE(vha->hw)) {
1137 mcp->in_mb |= MBX_1;
1138 mcp->mb[10] = fwopts[10];
1139 mcp->out_mb |= MBX_10;
1140 } else {
1141 mcp->mb[10] = fwopts[10];
1142 mcp->mb[11] = fwopts[11];
1143 mcp->mb[12] = 0; /* Undocumented, but used */
1144 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1146 mcp->tov = MBX_TOV_SECONDS;
1147 mcp->flags = 0;
1148 rval = qla2x00_mailbox_command(vha, mcp);
1150 fwopts[0] = mcp->mb[0];
1152 if (rval != QLA_SUCCESS) {
1153 /*EMPTY*/
1154 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1155 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1156 } else {
1157 /*EMPTY*/
1158 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1159 "Done %s.\n", __func__);
1162 return rval;
1166 * qla2x00_mbx_reg_test
1167 * Mailbox register wrap test.
1169 * Input:
1170 * ha = adapter block pointer.
1171 * TARGET_QUEUE_LOCK must be released.
1172 * ADAPTER_STATE_LOCK must be released.
1174 * Returns:
1175 * qla2x00 local function return status code.
1177 * Context:
1178 * Kernel context.
1181 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1183 int rval;
1184 mbx_cmd_t mc;
1185 mbx_cmd_t *mcp = &mc;
1187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1188 "Entered %s.\n", __func__);
1190 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1191 mcp->mb[1] = 0xAAAA;
1192 mcp->mb[2] = 0x5555;
1193 mcp->mb[3] = 0xAA55;
1194 mcp->mb[4] = 0x55AA;
1195 mcp->mb[5] = 0xA5A5;
1196 mcp->mb[6] = 0x5A5A;
1197 mcp->mb[7] = 0x2525;
1198 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1199 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1200 mcp->tov = MBX_TOV_SECONDS;
1201 mcp->flags = 0;
1202 rval = qla2x00_mailbox_command(vha, mcp);
1204 if (rval == QLA_SUCCESS) {
1205 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1206 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1207 rval = QLA_FUNCTION_FAILED;
1208 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1209 mcp->mb[7] != 0x2525)
1210 rval = QLA_FUNCTION_FAILED;
1213 if (rval != QLA_SUCCESS) {
1214 /*EMPTY*/
1215 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1216 } else {
1217 /*EMPTY*/
1218 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1219 "Done %s.\n", __func__);
1222 return rval;
1226 * qla2x00_verify_checksum
1227 * Verify firmware checksum.
1229 * Input:
1230 * ha = adapter block pointer.
1231 * TARGET_QUEUE_LOCK must be released.
1232 * ADAPTER_STATE_LOCK must be released.
1234 * Returns:
1235 * qla2x00 local function return status code.
1237 * Context:
1238 * Kernel context.
1241 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1243 int rval;
1244 mbx_cmd_t mc;
1245 mbx_cmd_t *mcp = &mc;
1247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1248 "Entered %s.\n", __func__);
1250 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1251 mcp->out_mb = MBX_0;
1252 mcp->in_mb = MBX_0;
1253 if (IS_FWI2_CAPABLE(vha->hw)) {
1254 mcp->mb[1] = MSW(risc_addr);
1255 mcp->mb[2] = LSW(risc_addr);
1256 mcp->out_mb |= MBX_2|MBX_1;
1257 mcp->in_mb |= MBX_2|MBX_1;
1258 } else {
1259 mcp->mb[1] = LSW(risc_addr);
1260 mcp->out_mb |= MBX_1;
1261 mcp->in_mb |= MBX_1;
1264 mcp->tov = MBX_TOV_SECONDS;
1265 mcp->flags = 0;
1266 rval = qla2x00_mailbox_command(vha, mcp);
1268 if (rval != QLA_SUCCESS) {
1269 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1270 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1271 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1272 } else {
1273 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1274 "Done %s.\n", __func__);
1277 return rval;
1281 * qla2x00_issue_iocb
1282 * Issue IOCB using mailbox command
1284 * Input:
1285 * ha = adapter state pointer.
1286 * buffer = buffer pointer.
1287 * phys_addr = physical address of buffer.
1288 * size = size of buffer.
1289 * TARGET_QUEUE_LOCK must be released.
1290 * ADAPTER_STATE_LOCK must be released.
1292 * Returns:
1293 * qla2x00 local function return status code.
1295 * Context:
1296 * Kernel context.
1299 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1300 dma_addr_t phys_addr, size_t size, uint32_t tov)
1302 int rval;
1303 mbx_cmd_t mc;
1304 mbx_cmd_t *mcp = &mc;
1306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1307 "Entered %s.\n", __func__);
1309 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1310 mcp->mb[1] = 0;
1311 mcp->mb[2] = MSW(phys_addr);
1312 mcp->mb[3] = LSW(phys_addr);
1313 mcp->mb[6] = MSW(MSD(phys_addr));
1314 mcp->mb[7] = LSW(MSD(phys_addr));
1315 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1316 mcp->in_mb = MBX_2|MBX_0;
1317 mcp->tov = tov;
1318 mcp->flags = 0;
1319 rval = qla2x00_mailbox_command(vha, mcp);
1321 if (rval != QLA_SUCCESS) {
1322 /*EMPTY*/
1323 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1324 } else {
1325 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1327 /* Mask reserved bits. */
1328 sts_entry->entry_status &=
1329 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1330 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1331 "Done %s.\n", __func__);
1334 return rval;
1338 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1339 size_t size)
1341 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1342 MBX_TOV_SECONDS);
1346 * qla2x00_abort_command
1347 * Abort command aborts a specified IOCB.
1349 * Input:
1350 * ha = adapter block pointer.
1351 * sp = SB structure pointer.
1353 * Returns:
1354 * qla2x00 local function return status code.
1356 * Context:
1357 * Kernel context.
1360 qla2x00_abort_command(srb_t *sp)
1362 unsigned long flags = 0;
1363 int rval;
1364 uint32_t handle = 0;
1365 mbx_cmd_t mc;
1366 mbx_cmd_t *mcp = &mc;
1367 fc_port_t *fcport = sp->fcport;
1368 scsi_qla_host_t *vha = fcport->vha;
1369 struct qla_hw_data *ha = vha->hw;
1370 struct req_que *req;
1371 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1373 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1374 "Entered %s.\n", __func__);
1376 if (vha->flags.qpairs_available && sp->qpair)
1377 req = sp->qpair->req;
1378 else
1379 req = vha->req;
1381 spin_lock_irqsave(&ha->hardware_lock, flags);
1382 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1383 if (req->outstanding_cmds[handle] == sp)
1384 break;
1386 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1388 if (handle == req->num_outstanding_cmds) {
1389 /* command not found */
1390 return QLA_FUNCTION_FAILED;
1393 mcp->mb[0] = MBC_ABORT_COMMAND;
1394 if (HAS_EXTENDED_IDS(ha))
1395 mcp->mb[1] = fcport->loop_id;
1396 else
1397 mcp->mb[1] = fcport->loop_id << 8;
1398 mcp->mb[2] = (uint16_t)handle;
1399 mcp->mb[3] = (uint16_t)(handle >> 16);
1400 mcp->mb[6] = (uint16_t)cmd->device->lun;
1401 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1402 mcp->in_mb = MBX_0;
1403 mcp->tov = MBX_TOV_SECONDS;
1404 mcp->flags = 0;
1405 rval = qla2x00_mailbox_command(vha, mcp);
1407 if (rval != QLA_SUCCESS) {
1408 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1409 } else {
1410 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1411 "Done %s.\n", __func__);
1414 return rval;
1418 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1420 int rval, rval2;
1421 mbx_cmd_t mc;
1422 mbx_cmd_t *mcp = &mc;
1423 scsi_qla_host_t *vha;
1424 struct req_que *req;
1425 struct rsp_que *rsp;
1427 l = l;
1428 vha = fcport->vha;
1430 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1431 "Entered %s.\n", __func__);
1433 req = vha->hw->req_q_map[0];
1434 rsp = req->rsp;
1435 mcp->mb[0] = MBC_ABORT_TARGET;
1436 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1437 if (HAS_EXTENDED_IDS(vha->hw)) {
1438 mcp->mb[1] = fcport->loop_id;
1439 mcp->mb[10] = 0;
1440 mcp->out_mb |= MBX_10;
1441 } else {
1442 mcp->mb[1] = fcport->loop_id << 8;
1444 mcp->mb[2] = vha->hw->loop_reset_delay;
1445 mcp->mb[9] = vha->vp_idx;
1447 mcp->in_mb = MBX_0;
1448 mcp->tov = MBX_TOV_SECONDS;
1449 mcp->flags = 0;
1450 rval = qla2x00_mailbox_command(vha, mcp);
1451 if (rval != QLA_SUCCESS) {
1452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1453 "Failed=%x.\n", rval);
1456 /* Issue marker IOCB. */
1457 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1458 MK_SYNC_ID);
1459 if (rval2 != QLA_SUCCESS) {
1460 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1461 "Failed to issue marker IOCB (%x).\n", rval2);
1462 } else {
1463 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1464 "Done %s.\n", __func__);
1467 return rval;
1471 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1473 int rval, rval2;
1474 mbx_cmd_t mc;
1475 mbx_cmd_t *mcp = &mc;
1476 scsi_qla_host_t *vha;
1477 struct req_que *req;
1478 struct rsp_que *rsp;
1480 vha = fcport->vha;
1482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1483 "Entered %s.\n", __func__);
1485 req = vha->hw->req_q_map[0];
1486 rsp = req->rsp;
1487 mcp->mb[0] = MBC_LUN_RESET;
1488 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1489 if (HAS_EXTENDED_IDS(vha->hw))
1490 mcp->mb[1] = fcport->loop_id;
1491 else
1492 mcp->mb[1] = fcport->loop_id << 8;
1493 mcp->mb[2] = (u32)l;
1494 mcp->mb[3] = 0;
1495 mcp->mb[9] = vha->vp_idx;
1497 mcp->in_mb = MBX_0;
1498 mcp->tov = MBX_TOV_SECONDS;
1499 mcp->flags = 0;
1500 rval = qla2x00_mailbox_command(vha, mcp);
1501 if (rval != QLA_SUCCESS) {
1502 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1505 /* Issue marker IOCB. */
1506 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1507 MK_SYNC_ID_LUN);
1508 if (rval2 != QLA_SUCCESS) {
1509 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1510 "Failed to issue marker IOCB (%x).\n", rval2);
1511 } else {
1512 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1513 "Done %s.\n", __func__);
1516 return rval;
1520 * qla2x00_get_adapter_id
1521 * Get adapter ID and topology.
1523 * Input:
1524 * ha = adapter block pointer.
1525 * id = pointer for loop ID.
1526 * al_pa = pointer for AL_PA.
1527 * area = pointer for area.
1528 * domain = pointer for domain.
1529 * top = pointer for topology.
1530 * TARGET_QUEUE_LOCK must be released.
1531 * ADAPTER_STATE_LOCK must be released.
1533 * Returns:
1534 * qla2x00 local function return status code.
1536 * Context:
1537 * Kernel context.
1540 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1541 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1543 int rval;
1544 mbx_cmd_t mc;
1545 mbx_cmd_t *mcp = &mc;
1547 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1548 "Entered %s.\n", __func__);
1550 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1551 mcp->mb[9] = vha->vp_idx;
1552 mcp->out_mb = MBX_9|MBX_0;
1553 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1554 if (IS_CNA_CAPABLE(vha->hw))
1555 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1556 if (IS_FWI2_CAPABLE(vha->hw))
1557 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1558 if (IS_QLA27XX(vha->hw))
1559 mcp->in_mb |= MBX_15;
1560 mcp->tov = MBX_TOV_SECONDS;
1561 mcp->flags = 0;
1562 rval = qla2x00_mailbox_command(vha, mcp);
1563 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1564 rval = QLA_COMMAND_ERROR;
1565 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1566 rval = QLA_INVALID_COMMAND;
1568 /* Return data. */
1569 *id = mcp->mb[1];
1570 *al_pa = LSB(mcp->mb[2]);
1571 *area = MSB(mcp->mb[2]);
1572 *domain = LSB(mcp->mb[3]);
1573 *top = mcp->mb[6];
1574 *sw_cap = mcp->mb[7];
1576 if (rval != QLA_SUCCESS) {
1577 /*EMPTY*/
1578 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1579 } else {
1580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1581 "Done %s.\n", __func__);
1583 if (IS_CNA_CAPABLE(vha->hw)) {
1584 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1585 vha->fcoe_fcf_idx = mcp->mb[10];
1586 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1587 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1588 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1589 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1590 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1591 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1593 /* If FA-WWN supported */
1594 if (IS_FAWWN_CAPABLE(vha->hw)) {
1595 if (mcp->mb[7] & BIT_14) {
1596 vha->port_name[0] = MSB(mcp->mb[16]);
1597 vha->port_name[1] = LSB(mcp->mb[16]);
1598 vha->port_name[2] = MSB(mcp->mb[17]);
1599 vha->port_name[3] = LSB(mcp->mb[17]);
1600 vha->port_name[4] = MSB(mcp->mb[18]);
1601 vha->port_name[5] = LSB(mcp->mb[18]);
1602 vha->port_name[6] = MSB(mcp->mb[19]);
1603 vha->port_name[7] = LSB(mcp->mb[19]);
1604 fc_host_port_name(vha->host) =
1605 wwn_to_u64(vha->port_name);
1606 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1607 "FA-WWN acquired %016llx\n",
1608 wwn_to_u64(vha->port_name));
1612 if (IS_QLA27XX(vha->hw))
1613 vha->bbcr = mcp->mb[15];
1616 return rval;
1620 * qla2x00_get_retry_cnt
1621 * Get current firmware login retry count and delay.
1623 * Input:
1624 * ha = adapter block pointer.
1625 * retry_cnt = pointer to login retry count.
1626 * tov = pointer to login timeout value.
1628 * Returns:
1629 * qla2x00 local function return status code.
1631 * Context:
1632 * Kernel context.
1635 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1636 uint16_t *r_a_tov)
1638 int rval;
1639 uint16_t ratov;
1640 mbx_cmd_t mc;
1641 mbx_cmd_t *mcp = &mc;
1643 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1644 "Entered %s.\n", __func__);
1646 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1647 mcp->out_mb = MBX_0;
1648 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1649 mcp->tov = MBX_TOV_SECONDS;
1650 mcp->flags = 0;
1651 rval = qla2x00_mailbox_command(vha, mcp);
1653 if (rval != QLA_SUCCESS) {
1654 /*EMPTY*/
1655 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1656 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1657 } else {
1658 /* Convert returned data and check our values. */
1659 *r_a_tov = mcp->mb[3] / 2;
1660 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1661 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1662 /* Update to the larger values */
1663 *retry_cnt = (uint8_t)mcp->mb[1];
1664 *tov = ratov;
1667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1668 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1671 return rval;
1675 * qla2x00_init_firmware
1676 * Initialize adapter firmware.
1678 * Input:
1679 * ha = adapter block pointer.
1680 * dptr = Initialization control block pointer.
1681 * size = size of initialization control block.
1682 * TARGET_QUEUE_LOCK must be released.
1683 * ADAPTER_STATE_LOCK must be released.
1685 * Returns:
1686 * qla2x00 local function return status code.
1688 * Context:
1689 * Kernel context.
1692 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1694 int rval;
1695 mbx_cmd_t mc;
1696 mbx_cmd_t *mcp = &mc;
1697 struct qla_hw_data *ha = vha->hw;
1699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1700 "Entered %s.\n", __func__);
1702 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1703 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1704 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1706 if (ha->flags.npiv_supported)
1707 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1708 else
1709 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1711 mcp->mb[1] = 0;
1712 mcp->mb[2] = MSW(ha->init_cb_dma);
1713 mcp->mb[3] = LSW(ha->init_cb_dma);
1714 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1715 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1716 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1717 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1718 mcp->mb[1] = BIT_0;
1719 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1720 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1721 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1722 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1723 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1724 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1726 /* 1 and 2 should normally be captured. */
1727 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1728 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1729 /* mb3 is additional info about the installed SFP. */
1730 mcp->in_mb |= MBX_3;
1731 mcp->buf_size = size;
1732 mcp->flags = MBX_DMA_OUT;
1733 mcp->tov = MBX_TOV_SECONDS;
1734 rval = qla2x00_mailbox_command(vha, mcp);
1736 if (rval != QLA_SUCCESS) {
1737 /*EMPTY*/
1738 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1739 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1740 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1741 } else {
1742 if (IS_QLA27XX(ha)) {
1743 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1744 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1745 "Invalid SFP/Validation Failed\n");
1747 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1748 "Done %s.\n", __func__);
1751 return rval;
1756 * qla2x00_get_port_database
1757 * Issue normal/enhanced get port database mailbox command
1758 * and copy device name as necessary.
1760 * Input:
1761 * ha = adapter state pointer.
1762 * dev = structure pointer.
1763 * opt = enhanced cmd option byte.
1765 * Returns:
1766 * qla2x00 local function return status code.
1768 * Context:
1769 * Kernel context.
1772 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1774 int rval;
1775 mbx_cmd_t mc;
1776 mbx_cmd_t *mcp = &mc;
1777 port_database_t *pd;
1778 struct port_database_24xx *pd24;
1779 dma_addr_t pd_dma;
1780 struct qla_hw_data *ha = vha->hw;
1782 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1783 "Entered %s.\n", __func__);
1785 pd24 = NULL;
1786 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1787 if (pd == NULL) {
1788 ql_log(ql_log_warn, vha, 0x1050,
1789 "Failed to allocate port database structure.\n");
1790 fcport->query = 0;
1791 return QLA_MEMORY_ALLOC_FAILED;
1794 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1795 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1796 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1797 mcp->mb[2] = MSW(pd_dma);
1798 mcp->mb[3] = LSW(pd_dma);
1799 mcp->mb[6] = MSW(MSD(pd_dma));
1800 mcp->mb[7] = LSW(MSD(pd_dma));
1801 mcp->mb[9] = vha->vp_idx;
1802 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1803 mcp->in_mb = MBX_0;
1804 if (IS_FWI2_CAPABLE(ha)) {
1805 mcp->mb[1] = fcport->loop_id;
1806 mcp->mb[10] = opt;
1807 mcp->out_mb |= MBX_10|MBX_1;
1808 mcp->in_mb |= MBX_1;
1809 } else if (HAS_EXTENDED_IDS(ha)) {
1810 mcp->mb[1] = fcport->loop_id;
1811 mcp->mb[10] = opt;
1812 mcp->out_mb |= MBX_10|MBX_1;
1813 } else {
1814 mcp->mb[1] = fcport->loop_id << 8 | opt;
1815 mcp->out_mb |= MBX_1;
1817 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1818 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1819 mcp->flags = MBX_DMA_IN;
1820 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1821 rval = qla2x00_mailbox_command(vha, mcp);
1822 if (rval != QLA_SUCCESS)
1823 goto gpd_error_out;
1825 if (IS_FWI2_CAPABLE(ha)) {
1826 uint64_t zero = 0;
1827 u8 current_login_state, last_login_state;
1829 pd24 = (struct port_database_24xx *) pd;
1831 /* Check for logged in state. */
1832 if (fcport->fc4f_nvme) {
1833 current_login_state = pd24->current_login_state >> 4;
1834 last_login_state = pd24->last_login_state >> 4;
1835 } else {
1836 current_login_state = pd24->current_login_state & 0xf;
1837 last_login_state = pd24->last_login_state & 0xf;
1839 fcport->current_login_state = pd24->current_login_state;
1840 fcport->last_login_state = pd24->last_login_state;
1842 /* Check for logged in state. */
1843 if (current_login_state != PDS_PRLI_COMPLETE &&
1844 last_login_state != PDS_PRLI_COMPLETE) {
1845 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1846 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1847 current_login_state, last_login_state,
1848 fcport->loop_id);
1849 rval = QLA_FUNCTION_FAILED;
1851 if (!fcport->query)
1852 goto gpd_error_out;
1855 if (fcport->loop_id == FC_NO_LOOP_ID ||
1856 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1857 memcmp(fcport->port_name, pd24->port_name, 8))) {
1858 /* We lost the device mid way. */
1859 rval = QLA_NOT_LOGGED_IN;
1860 goto gpd_error_out;
1863 /* Names are little-endian. */
1864 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1865 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1867 /* Get port_id of device. */
1868 fcport->d_id.b.domain = pd24->port_id[0];
1869 fcport->d_id.b.area = pd24->port_id[1];
1870 fcport->d_id.b.al_pa = pd24->port_id[2];
1871 fcport->d_id.b.rsvd_1 = 0;
1873 /* If not target must be initiator or unknown type. */
1874 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1875 fcport->port_type = FCT_INITIATOR;
1876 else
1877 fcport->port_type = FCT_TARGET;
1879 /* Passback COS information. */
1880 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1881 FC_COS_CLASS2 : FC_COS_CLASS3;
1883 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1884 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1885 } else {
1886 uint64_t zero = 0;
1888 /* Check for logged in state. */
1889 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1890 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1891 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1892 "Unable to verify login-state (%x/%x) - "
1893 "portid=%02x%02x%02x.\n", pd->master_state,
1894 pd->slave_state, fcport->d_id.b.domain,
1895 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1896 rval = QLA_FUNCTION_FAILED;
1897 goto gpd_error_out;
1900 if (fcport->loop_id == FC_NO_LOOP_ID ||
1901 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1902 memcmp(fcport->port_name, pd->port_name, 8))) {
1903 /* We lost the device mid way. */
1904 rval = QLA_NOT_LOGGED_IN;
1905 goto gpd_error_out;
1908 /* Names are little-endian. */
1909 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1910 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1912 /* Get port_id of device. */
1913 fcport->d_id.b.domain = pd->port_id[0];
1914 fcport->d_id.b.area = pd->port_id[3];
1915 fcport->d_id.b.al_pa = pd->port_id[2];
1916 fcport->d_id.b.rsvd_1 = 0;
1918 /* If not target must be initiator or unknown type. */
1919 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1920 fcport->port_type = FCT_INITIATOR;
1921 else
1922 fcport->port_type = FCT_TARGET;
1924 /* Passback COS information. */
1925 fcport->supported_classes = (pd->options & BIT_4) ?
1926 FC_COS_CLASS2: FC_COS_CLASS3;
1929 gpd_error_out:
1930 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1931 fcport->query = 0;
1933 if (rval != QLA_SUCCESS) {
1934 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1935 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1936 mcp->mb[0], mcp->mb[1]);
1937 } else {
1938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1939 "Done %s.\n", __func__);
1942 return rval;
1946 * qla2x00_get_firmware_state
1947 * Get adapter firmware state.
1949 * Input:
1950 * ha = adapter block pointer.
1951 * dptr = pointer for firmware state.
1952 * TARGET_QUEUE_LOCK must be released.
1953 * ADAPTER_STATE_LOCK must be released.
1955 * Returns:
1956 * qla2x00 local function return status code.
1958 * Context:
1959 * Kernel context.
1962 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1964 int rval;
1965 mbx_cmd_t mc;
1966 mbx_cmd_t *mcp = &mc;
1967 struct qla_hw_data *ha = vha->hw;
1969 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1970 "Entered %s.\n", __func__);
1972 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1973 mcp->out_mb = MBX_0;
1974 if (IS_FWI2_CAPABLE(vha->hw))
1975 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1976 else
1977 mcp->in_mb = MBX_1|MBX_0;
1978 mcp->tov = MBX_TOV_SECONDS;
1979 mcp->flags = 0;
1980 rval = qla2x00_mailbox_command(vha, mcp);
1982 /* Return firmware states. */
1983 states[0] = mcp->mb[1];
1984 if (IS_FWI2_CAPABLE(vha->hw)) {
1985 states[1] = mcp->mb[2];
1986 states[2] = mcp->mb[3]; /* SFP info */
1987 states[3] = mcp->mb[4];
1988 states[4] = mcp->mb[5];
1989 states[5] = mcp->mb[6]; /* DPORT status */
1992 if (rval != QLA_SUCCESS) {
1993 /*EMPTY*/
1994 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1995 } else {
1996 if (IS_QLA27XX(ha)) {
1997 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1998 ql_dbg(ql_dbg_mbx, vha, 0x119e,
1999 "Invalid SFP/Validation Failed\n");
2001 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2002 "Done %s.\n", __func__);
2005 return rval;
2009 * qla2x00_get_port_name
2010 * Issue get port name mailbox command.
2011 * Returned name is in big endian format.
2013 * Input:
2014 * ha = adapter block pointer.
2015 * loop_id = loop ID of device.
2016 * name = pointer for name.
2017 * TARGET_QUEUE_LOCK must be released.
2018 * ADAPTER_STATE_LOCK must be released.
2020 * Returns:
2021 * qla2x00 local function return status code.
2023 * Context:
2024 * Kernel context.
2027 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2028 uint8_t opt)
2030 int rval;
2031 mbx_cmd_t mc;
2032 mbx_cmd_t *mcp = &mc;
2034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2035 "Entered %s.\n", __func__);
2037 mcp->mb[0] = MBC_GET_PORT_NAME;
2038 mcp->mb[9] = vha->vp_idx;
2039 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2040 if (HAS_EXTENDED_IDS(vha->hw)) {
2041 mcp->mb[1] = loop_id;
2042 mcp->mb[10] = opt;
2043 mcp->out_mb |= MBX_10;
2044 } else {
2045 mcp->mb[1] = loop_id << 8 | opt;
2048 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2049 mcp->tov = MBX_TOV_SECONDS;
2050 mcp->flags = 0;
2051 rval = qla2x00_mailbox_command(vha, mcp);
2053 if (rval != QLA_SUCCESS) {
2054 /*EMPTY*/
2055 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2056 } else {
2057 if (name != NULL) {
2058 /* This function returns name in big endian. */
2059 name[0] = MSB(mcp->mb[2]);
2060 name[1] = LSB(mcp->mb[2]);
2061 name[2] = MSB(mcp->mb[3]);
2062 name[3] = LSB(mcp->mb[3]);
2063 name[4] = MSB(mcp->mb[6]);
2064 name[5] = LSB(mcp->mb[6]);
2065 name[6] = MSB(mcp->mb[7]);
2066 name[7] = LSB(mcp->mb[7]);
2069 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2070 "Done %s.\n", __func__);
2073 return rval;
2077 * qla24xx_link_initialization
2078 * Issue link initialization mailbox command.
2080 * Input:
2081 * ha = adapter block pointer.
2082 * TARGET_QUEUE_LOCK must be released.
2083 * ADAPTER_STATE_LOCK must be released.
2085 * Returns:
2086 * qla2x00 local function return status code.
2088 * Context:
2089 * Kernel context.
2092 qla24xx_link_initialize(scsi_qla_host_t *vha)
2094 int rval;
2095 mbx_cmd_t mc;
2096 mbx_cmd_t *mcp = &mc;
2098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2099 "Entered %s.\n", __func__);
2101 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2102 return QLA_FUNCTION_FAILED;
2104 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2105 mcp->mb[1] = BIT_4;
2106 if (vha->hw->operating_mode == LOOP)
2107 mcp->mb[1] |= BIT_6;
2108 else
2109 mcp->mb[1] |= BIT_5;
2110 mcp->mb[2] = 0;
2111 mcp->mb[3] = 0;
2112 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2113 mcp->in_mb = MBX_0;
2114 mcp->tov = MBX_TOV_SECONDS;
2115 mcp->flags = 0;
2116 rval = qla2x00_mailbox_command(vha, mcp);
2118 if (rval != QLA_SUCCESS) {
2119 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2120 } else {
2121 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2122 "Done %s.\n", __func__);
2125 return rval;
2129 * qla2x00_lip_reset
2130 * Issue LIP reset mailbox command.
2132 * Input:
2133 * ha = adapter block pointer.
2134 * TARGET_QUEUE_LOCK must be released.
2135 * ADAPTER_STATE_LOCK must be released.
2137 * Returns:
2138 * qla2x00 local function return status code.
2140 * Context:
2141 * Kernel context.
2144 qla2x00_lip_reset(scsi_qla_host_t *vha)
2146 int rval;
2147 mbx_cmd_t mc;
2148 mbx_cmd_t *mcp = &mc;
2150 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2151 "Entered %s.\n", __func__);
2153 if (IS_CNA_CAPABLE(vha->hw)) {
2154 /* Logout across all FCFs. */
2155 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2156 mcp->mb[1] = BIT_1;
2157 mcp->mb[2] = 0;
2158 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2159 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2160 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2161 mcp->mb[1] = BIT_6;
2162 mcp->mb[2] = 0;
2163 mcp->mb[3] = vha->hw->loop_reset_delay;
2164 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2165 } else {
2166 mcp->mb[0] = MBC_LIP_RESET;
2167 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2168 if (HAS_EXTENDED_IDS(vha->hw)) {
2169 mcp->mb[1] = 0x00ff;
2170 mcp->mb[10] = 0;
2171 mcp->out_mb |= MBX_10;
2172 } else {
2173 mcp->mb[1] = 0xff00;
2175 mcp->mb[2] = vha->hw->loop_reset_delay;
2176 mcp->mb[3] = 0;
2178 mcp->in_mb = MBX_0;
2179 mcp->tov = MBX_TOV_SECONDS;
2180 mcp->flags = 0;
2181 rval = qla2x00_mailbox_command(vha, mcp);
2183 if (rval != QLA_SUCCESS) {
2184 /*EMPTY*/
2185 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2186 } else {
2187 /*EMPTY*/
2188 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2189 "Done %s.\n", __func__);
2192 return rval;
2196 * qla2x00_send_sns
2197 * Send SNS command.
2199 * Input:
2200 * ha = adapter block pointer.
2201 * sns = pointer for command.
2202 * cmd_size = command size.
2203 * buf_size = response/command size.
2204 * TARGET_QUEUE_LOCK must be released.
2205 * ADAPTER_STATE_LOCK must be released.
2207 * Returns:
2208 * qla2x00 local function return status code.
2210 * Context:
2211 * Kernel context.
2214 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2215 uint16_t cmd_size, size_t buf_size)
2217 int rval;
2218 mbx_cmd_t mc;
2219 mbx_cmd_t *mcp = &mc;
2221 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2222 "Entered %s.\n", __func__);
2224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2225 "Retry cnt=%d ratov=%d total tov=%d.\n",
2226 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2228 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2229 mcp->mb[1] = cmd_size;
2230 mcp->mb[2] = MSW(sns_phys_address);
2231 mcp->mb[3] = LSW(sns_phys_address);
2232 mcp->mb[6] = MSW(MSD(sns_phys_address));
2233 mcp->mb[7] = LSW(MSD(sns_phys_address));
2234 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2235 mcp->in_mb = MBX_0|MBX_1;
2236 mcp->buf_size = buf_size;
2237 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2238 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2239 rval = qla2x00_mailbox_command(vha, mcp);
2241 if (rval != QLA_SUCCESS) {
2242 /*EMPTY*/
2243 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2244 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2245 rval, mcp->mb[0], mcp->mb[1]);
2246 } else {
2247 /*EMPTY*/
2248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2249 "Done %s.\n", __func__);
2252 return rval;
2256 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2257 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2259 int rval;
2261 struct logio_entry_24xx *lg;
2262 dma_addr_t lg_dma;
2263 uint32_t iop[2];
2264 struct qla_hw_data *ha = vha->hw;
2265 struct req_que *req;
2267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2268 "Entered %s.\n", __func__);
2270 if (vha->vp_idx && vha->qpair)
2271 req = vha->qpair->req;
2272 else
2273 req = ha->req_q_map[0];
2275 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2276 if (lg == NULL) {
2277 ql_log(ql_log_warn, vha, 0x1062,
2278 "Failed to allocate login IOCB.\n");
2279 return QLA_MEMORY_ALLOC_FAILED;
2282 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2283 lg->entry_count = 1;
2284 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2285 lg->nport_handle = cpu_to_le16(loop_id);
2286 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2287 if (opt & BIT_0)
2288 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2289 if (opt & BIT_1)
2290 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2291 lg->port_id[0] = al_pa;
2292 lg->port_id[1] = area;
2293 lg->port_id[2] = domain;
2294 lg->vp_index = vha->vp_idx;
2295 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2296 (ha->r_a_tov / 10 * 2) + 2);
2297 if (rval != QLA_SUCCESS) {
2298 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2299 "Failed to issue login IOCB (%x).\n", rval);
2300 } else if (lg->entry_status != 0) {
2301 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2302 "Failed to complete IOCB -- error status (%x).\n",
2303 lg->entry_status);
2304 rval = QLA_FUNCTION_FAILED;
2305 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2306 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2307 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2309 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2310 "Failed to complete IOCB -- completion status (%x) "
2311 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2312 iop[0], iop[1]);
2314 switch (iop[0]) {
2315 case LSC_SCODE_PORTID_USED:
2316 mb[0] = MBS_PORT_ID_USED;
2317 mb[1] = LSW(iop[1]);
2318 break;
2319 case LSC_SCODE_NPORT_USED:
2320 mb[0] = MBS_LOOP_ID_USED;
2321 break;
2322 case LSC_SCODE_NOLINK:
2323 case LSC_SCODE_NOIOCB:
2324 case LSC_SCODE_NOXCB:
2325 case LSC_SCODE_CMD_FAILED:
2326 case LSC_SCODE_NOFABRIC:
2327 case LSC_SCODE_FW_NOT_READY:
2328 case LSC_SCODE_NOT_LOGGED_IN:
2329 case LSC_SCODE_NOPCB:
2330 case LSC_SCODE_ELS_REJECT:
2331 case LSC_SCODE_CMD_PARAM_ERR:
2332 case LSC_SCODE_NONPORT:
2333 case LSC_SCODE_LOGGED_IN:
2334 case LSC_SCODE_NOFLOGI_ACC:
2335 default:
2336 mb[0] = MBS_COMMAND_ERROR;
2337 break;
2339 } else {
2340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2341 "Done %s.\n", __func__);
2343 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2345 mb[0] = MBS_COMMAND_COMPLETE;
2346 mb[1] = 0;
2347 if (iop[0] & BIT_4) {
2348 if (iop[0] & BIT_8)
2349 mb[1] |= BIT_1;
2350 } else
2351 mb[1] = BIT_0;
2353 /* Passback COS information. */
2354 mb[10] = 0;
2355 if (lg->io_parameter[7] || lg->io_parameter[8])
2356 mb[10] |= BIT_0; /* Class 2. */
2357 if (lg->io_parameter[9] || lg->io_parameter[10])
2358 mb[10] |= BIT_1; /* Class 3. */
2359 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2360 mb[10] |= BIT_7; /* Confirmed Completion
2361 * Allowed
2365 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2367 return rval;
2371 * qla2x00_login_fabric
2372 * Issue login fabric port mailbox command.
2374 * Input:
2375 * ha = adapter block pointer.
2376 * loop_id = device loop ID.
2377 * domain = device domain.
2378 * area = device area.
2379 * al_pa = device AL_PA.
2380 * status = pointer for return status.
2381 * opt = command options.
2382 * TARGET_QUEUE_LOCK must be released.
2383 * ADAPTER_STATE_LOCK must be released.
2385 * Returns:
2386 * qla2x00 local function return status code.
2388 * Context:
2389 * Kernel context.
2392 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2393 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2395 int rval;
2396 mbx_cmd_t mc;
2397 mbx_cmd_t *mcp = &mc;
2398 struct qla_hw_data *ha = vha->hw;
2400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2401 "Entered %s.\n", __func__);
2403 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2404 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2405 if (HAS_EXTENDED_IDS(ha)) {
2406 mcp->mb[1] = loop_id;
2407 mcp->mb[10] = opt;
2408 mcp->out_mb |= MBX_10;
2409 } else {
2410 mcp->mb[1] = (loop_id << 8) | opt;
2412 mcp->mb[2] = domain;
2413 mcp->mb[3] = area << 8 | al_pa;
2415 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2416 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2417 mcp->flags = 0;
2418 rval = qla2x00_mailbox_command(vha, mcp);
2420 /* Return mailbox statuses. */
2421 if (mb != NULL) {
2422 mb[0] = mcp->mb[0];
2423 mb[1] = mcp->mb[1];
2424 mb[2] = mcp->mb[2];
2425 mb[6] = mcp->mb[6];
2426 mb[7] = mcp->mb[7];
2427 /* COS retrieved from Get-Port-Database mailbox command. */
2428 mb[10] = 0;
2431 if (rval != QLA_SUCCESS) {
2432 /* RLU tmp code: need to change main mailbox_command function to
2433 * return ok even when the mailbox completion value is not
2434 * SUCCESS. The caller needs to be responsible to interpret
2435 * the return values of this mailbox command if we're not
2436 * to change too much of the existing code.
2438 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2439 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2440 mcp->mb[0] == 0x4006)
2441 rval = QLA_SUCCESS;
2443 /*EMPTY*/
2444 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2445 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2446 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2447 } else {
2448 /*EMPTY*/
2449 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2450 "Done %s.\n", __func__);
2453 return rval;
2457 * qla2x00_login_local_device
2458 * Issue login loop port mailbox command.
2460 * Input:
2461 * ha = adapter block pointer.
2462 * loop_id = device loop ID.
2463 * opt = command options.
2465 * Returns:
2466 * Return status code.
2468 * Context:
2469 * Kernel context.
2473 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2474 uint16_t *mb_ret, uint8_t opt)
2476 int rval;
2477 mbx_cmd_t mc;
2478 mbx_cmd_t *mcp = &mc;
2479 struct qla_hw_data *ha = vha->hw;
2481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2482 "Entered %s.\n", __func__);
2484 if (IS_FWI2_CAPABLE(ha))
2485 return qla24xx_login_fabric(vha, fcport->loop_id,
2486 fcport->d_id.b.domain, fcport->d_id.b.area,
2487 fcport->d_id.b.al_pa, mb_ret, opt);
2489 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2490 if (HAS_EXTENDED_IDS(ha))
2491 mcp->mb[1] = fcport->loop_id;
2492 else
2493 mcp->mb[1] = fcport->loop_id << 8;
2494 mcp->mb[2] = opt;
2495 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2496 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2497 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2498 mcp->flags = 0;
2499 rval = qla2x00_mailbox_command(vha, mcp);
2501 /* Return mailbox statuses. */
2502 if (mb_ret != NULL) {
2503 mb_ret[0] = mcp->mb[0];
2504 mb_ret[1] = mcp->mb[1];
2505 mb_ret[6] = mcp->mb[6];
2506 mb_ret[7] = mcp->mb[7];
2509 if (rval != QLA_SUCCESS) {
2510 /* AV tmp code: need to change main mailbox_command function to
2511 * return ok even when the mailbox completion value is not
2512 * SUCCESS. The caller needs to be responsible to interpret
2513 * the return values of this mailbox command if we're not
2514 * to change too much of the existing code.
2516 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2517 rval = QLA_SUCCESS;
2519 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2520 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2521 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2522 } else {
2523 /*EMPTY*/
2524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2525 "Done %s.\n", __func__);
2528 return (rval);
2532 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2533 uint8_t area, uint8_t al_pa)
2535 int rval;
2536 struct logio_entry_24xx *lg;
2537 dma_addr_t lg_dma;
2538 struct qla_hw_data *ha = vha->hw;
2539 struct req_que *req;
2541 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2542 "Entered %s.\n", __func__);
2544 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2545 if (lg == NULL) {
2546 ql_log(ql_log_warn, vha, 0x106e,
2547 "Failed to allocate logout IOCB.\n");
2548 return QLA_MEMORY_ALLOC_FAILED;
2551 req = vha->req;
2552 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2553 lg->entry_count = 1;
2554 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2555 lg->nport_handle = cpu_to_le16(loop_id);
2556 lg->control_flags =
2557 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2558 LCF_FREE_NPORT);
2559 lg->port_id[0] = al_pa;
2560 lg->port_id[1] = area;
2561 lg->port_id[2] = domain;
2562 lg->vp_index = vha->vp_idx;
2563 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2564 (ha->r_a_tov / 10 * 2) + 2);
2565 if (rval != QLA_SUCCESS) {
2566 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2567 "Failed to issue logout IOCB (%x).\n", rval);
2568 } else if (lg->entry_status != 0) {
2569 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2570 "Failed to complete IOCB -- error status (%x).\n",
2571 lg->entry_status);
2572 rval = QLA_FUNCTION_FAILED;
2573 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2574 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2575 "Failed to complete IOCB -- completion status (%x) "
2576 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2577 le32_to_cpu(lg->io_parameter[0]),
2578 le32_to_cpu(lg->io_parameter[1]));
2579 } else {
2580 /*EMPTY*/
2581 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2582 "Done %s.\n", __func__);
2585 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2587 return rval;
2591 * qla2x00_fabric_logout
2592 * Issue logout fabric port mailbox command.
2594 * Input:
2595 * ha = adapter block pointer.
2596 * loop_id = device loop ID.
2597 * TARGET_QUEUE_LOCK must be released.
2598 * ADAPTER_STATE_LOCK must be released.
2600 * Returns:
2601 * qla2x00 local function return status code.
2603 * Context:
2604 * Kernel context.
2607 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2608 uint8_t area, uint8_t al_pa)
2610 int rval;
2611 mbx_cmd_t mc;
2612 mbx_cmd_t *mcp = &mc;
2614 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2615 "Entered %s.\n", __func__);
2617 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2618 mcp->out_mb = MBX_1|MBX_0;
2619 if (HAS_EXTENDED_IDS(vha->hw)) {
2620 mcp->mb[1] = loop_id;
2621 mcp->mb[10] = 0;
2622 mcp->out_mb |= MBX_10;
2623 } else {
2624 mcp->mb[1] = loop_id << 8;
2627 mcp->in_mb = MBX_1|MBX_0;
2628 mcp->tov = MBX_TOV_SECONDS;
2629 mcp->flags = 0;
2630 rval = qla2x00_mailbox_command(vha, mcp);
2632 if (rval != QLA_SUCCESS) {
2633 /*EMPTY*/
2634 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2635 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2636 } else {
2637 /*EMPTY*/
2638 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2639 "Done %s.\n", __func__);
2642 return rval;
2646 * qla2x00_full_login_lip
2647 * Issue full login LIP mailbox command.
2649 * Input:
2650 * ha = adapter block pointer.
2651 * TARGET_QUEUE_LOCK must be released.
2652 * ADAPTER_STATE_LOCK must be released.
2654 * Returns:
2655 * qla2x00 local function return status code.
2657 * Context:
2658 * Kernel context.
2661 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2663 int rval;
2664 mbx_cmd_t mc;
2665 mbx_cmd_t *mcp = &mc;
2667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2668 "Entered %s.\n", __func__);
2670 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2671 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2672 mcp->mb[2] = 0;
2673 mcp->mb[3] = 0;
2674 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2675 mcp->in_mb = MBX_0;
2676 mcp->tov = MBX_TOV_SECONDS;
2677 mcp->flags = 0;
2678 rval = qla2x00_mailbox_command(vha, mcp);
2680 if (rval != QLA_SUCCESS) {
2681 /*EMPTY*/
2682 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2683 } else {
2684 /*EMPTY*/
2685 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2686 "Done %s.\n", __func__);
2689 return rval;
2693 * qla2x00_get_id_list
2695 * Input:
2696 * ha = adapter block pointer.
2698 * Returns:
2699 * qla2x00 local function return status code.
2701 * Context:
2702 * Kernel context.
2705 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2706 uint16_t *entries)
2708 int rval;
2709 mbx_cmd_t mc;
2710 mbx_cmd_t *mcp = &mc;
2712 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2713 "Entered %s.\n", __func__);
2715 if (id_list == NULL)
2716 return QLA_FUNCTION_FAILED;
2718 mcp->mb[0] = MBC_GET_ID_LIST;
2719 mcp->out_mb = MBX_0;
2720 if (IS_FWI2_CAPABLE(vha->hw)) {
2721 mcp->mb[2] = MSW(id_list_dma);
2722 mcp->mb[3] = LSW(id_list_dma);
2723 mcp->mb[6] = MSW(MSD(id_list_dma));
2724 mcp->mb[7] = LSW(MSD(id_list_dma));
2725 mcp->mb[8] = 0;
2726 mcp->mb[9] = vha->vp_idx;
2727 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2728 } else {
2729 mcp->mb[1] = MSW(id_list_dma);
2730 mcp->mb[2] = LSW(id_list_dma);
2731 mcp->mb[3] = MSW(MSD(id_list_dma));
2732 mcp->mb[6] = LSW(MSD(id_list_dma));
2733 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2735 mcp->in_mb = MBX_1|MBX_0;
2736 mcp->tov = MBX_TOV_SECONDS;
2737 mcp->flags = 0;
2738 rval = qla2x00_mailbox_command(vha, mcp);
2740 if (rval != QLA_SUCCESS) {
2741 /*EMPTY*/
2742 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2743 } else {
2744 *entries = mcp->mb[1];
2745 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2746 "Done %s.\n", __func__);
2749 return rval;
2753 * qla2x00_get_resource_cnts
2754 * Get current firmware resource counts.
2756 * Input:
2757 * ha = adapter block pointer.
2759 * Returns:
2760 * qla2x00 local function return status code.
2762 * Context:
2763 * Kernel context.
2766 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2768 struct qla_hw_data *ha = vha->hw;
2769 int rval;
2770 mbx_cmd_t mc;
2771 mbx_cmd_t *mcp = &mc;
2773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2774 "Entered %s.\n", __func__);
2776 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2777 mcp->out_mb = MBX_0;
2778 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2779 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2780 mcp->in_mb |= MBX_12;
2781 mcp->tov = MBX_TOV_SECONDS;
2782 mcp->flags = 0;
2783 rval = qla2x00_mailbox_command(vha, mcp);
2785 if (rval != QLA_SUCCESS) {
2786 /*EMPTY*/
2787 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2788 "Failed mb[0]=%x.\n", mcp->mb[0]);
2789 } else {
2790 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2791 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2792 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2793 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2794 mcp->mb[11], mcp->mb[12]);
2796 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2797 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2798 ha->cur_fw_xcb_count = mcp->mb[3];
2799 ha->orig_fw_xcb_count = mcp->mb[6];
2800 ha->cur_fw_iocb_count = mcp->mb[7];
2801 ha->orig_fw_iocb_count = mcp->mb[10];
2802 if (ha->flags.npiv_supported)
2803 ha->max_npiv_vports = mcp->mb[11];
2804 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2805 ha->fw_max_fcf_count = mcp->mb[12];
2808 return (rval);
2812 * qla2x00_get_fcal_position_map
2813 * Get FCAL (LILP) position map using mailbox command
2815 * Input:
2816 * ha = adapter state pointer.
2817 * pos_map = buffer pointer (can be NULL).
2819 * Returns:
2820 * qla2x00 local function return status code.
2822 * Context:
2823 * Kernel context.
2826 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2828 int rval;
2829 mbx_cmd_t mc;
2830 mbx_cmd_t *mcp = &mc;
2831 char *pmap;
2832 dma_addr_t pmap_dma;
2833 struct qla_hw_data *ha = vha->hw;
2835 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2836 "Entered %s.\n", __func__);
2838 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2839 if (pmap == NULL) {
2840 ql_log(ql_log_warn, vha, 0x1080,
2841 "Memory alloc failed.\n");
2842 return QLA_MEMORY_ALLOC_FAILED;
2845 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2846 mcp->mb[2] = MSW(pmap_dma);
2847 mcp->mb[3] = LSW(pmap_dma);
2848 mcp->mb[6] = MSW(MSD(pmap_dma));
2849 mcp->mb[7] = LSW(MSD(pmap_dma));
2850 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2851 mcp->in_mb = MBX_1|MBX_0;
2852 mcp->buf_size = FCAL_MAP_SIZE;
2853 mcp->flags = MBX_DMA_IN;
2854 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2855 rval = qla2x00_mailbox_command(vha, mcp);
2857 if (rval == QLA_SUCCESS) {
2858 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2859 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2860 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2861 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2862 pmap, pmap[0] + 1);
2864 if (pos_map)
2865 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2867 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2869 if (rval != QLA_SUCCESS) {
2870 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2871 } else {
2872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2873 "Done %s.\n", __func__);
2876 return rval;
2880 * qla2x00_get_link_status
2882 * Input:
2883 * ha = adapter block pointer.
2884 * loop_id = device loop ID.
2885 * ret_buf = pointer to link status return buffer.
2887 * Returns:
2888 * 0 = success.
2889 * BIT_0 = mem alloc error.
2890 * BIT_1 = mailbox error.
2893 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2894 struct link_statistics *stats, dma_addr_t stats_dma)
2896 int rval;
2897 mbx_cmd_t mc;
2898 mbx_cmd_t *mcp = &mc;
2899 uint32_t *iter = (void *)stats;
2900 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2901 struct qla_hw_data *ha = vha->hw;
2903 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2904 "Entered %s.\n", __func__);
2906 mcp->mb[0] = MBC_GET_LINK_STATUS;
2907 mcp->mb[2] = MSW(LSD(stats_dma));
2908 mcp->mb[3] = LSW(LSD(stats_dma));
2909 mcp->mb[6] = MSW(MSD(stats_dma));
2910 mcp->mb[7] = LSW(MSD(stats_dma));
2911 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2912 mcp->in_mb = MBX_0;
2913 if (IS_FWI2_CAPABLE(ha)) {
2914 mcp->mb[1] = loop_id;
2915 mcp->mb[4] = 0;
2916 mcp->mb[10] = 0;
2917 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2918 mcp->in_mb |= MBX_1;
2919 } else if (HAS_EXTENDED_IDS(ha)) {
2920 mcp->mb[1] = loop_id;
2921 mcp->mb[10] = 0;
2922 mcp->out_mb |= MBX_10|MBX_1;
2923 } else {
2924 mcp->mb[1] = loop_id << 8;
2925 mcp->out_mb |= MBX_1;
2927 mcp->tov = MBX_TOV_SECONDS;
2928 mcp->flags = IOCTL_CMD;
2929 rval = qla2x00_mailbox_command(vha, mcp);
2931 if (rval == QLA_SUCCESS) {
2932 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2933 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2934 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2935 rval = QLA_FUNCTION_FAILED;
2936 } else {
2937 /* Re-endianize - firmware data is le32. */
2938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2939 "Done %s.\n", __func__);
2940 for ( ; dwords--; iter++)
2941 le32_to_cpus(iter);
2943 } else {
2944 /* Failed. */
2945 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2948 return rval;
2952 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2953 dma_addr_t stats_dma, uint16_t options)
2955 int rval;
2956 mbx_cmd_t mc;
2957 mbx_cmd_t *mcp = &mc;
2958 uint32_t *iter, dwords;
2960 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2961 "Entered %s.\n", __func__);
2963 memset(&mc, 0, sizeof(mc));
2964 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2965 mc.mb[2] = MSW(stats_dma);
2966 mc.mb[3] = LSW(stats_dma);
2967 mc.mb[6] = MSW(MSD(stats_dma));
2968 mc.mb[7] = LSW(MSD(stats_dma));
2969 mc.mb[8] = sizeof(struct link_statistics) / 4;
2970 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2971 mc.mb[10] = cpu_to_le16(options);
2973 rval = qla24xx_send_mb_cmd(vha, &mc);
2975 if (rval == QLA_SUCCESS) {
2976 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2977 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2978 "Failed mb[0]=%x.\n", mcp->mb[0]);
2979 rval = QLA_FUNCTION_FAILED;
2980 } else {
2981 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2982 "Done %s.\n", __func__);
2983 /* Re-endianize - firmware data is le32. */
2984 dwords = sizeof(struct link_statistics) / 4;
2985 iter = &stats->link_fail_cnt;
2986 for ( ; dwords--; iter++)
2987 le32_to_cpus(iter);
2989 } else {
2990 /* Failed. */
2991 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2994 return rval;
2998 qla24xx_abort_command(srb_t *sp)
3000 int rval;
3001 unsigned long flags = 0;
3003 struct abort_entry_24xx *abt;
3004 dma_addr_t abt_dma;
3005 uint32_t handle;
3006 fc_port_t *fcport = sp->fcport;
3007 struct scsi_qla_host *vha = fcport->vha;
3008 struct qla_hw_data *ha = vha->hw;
3009 struct req_que *req = vha->req;
3011 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3012 "Entered %s.\n", __func__);
3014 if (vha->flags.qpairs_available && sp->qpair)
3015 req = sp->qpair->req;
3017 if (ql2xasynctmfenable)
3018 return qla24xx_async_abort_command(sp);
3020 spin_lock_irqsave(&ha->hardware_lock, flags);
3021 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3022 if (req->outstanding_cmds[handle] == sp)
3023 break;
3025 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3026 if (handle == req->num_outstanding_cmds) {
3027 /* Command not found. */
3028 return QLA_FUNCTION_FAILED;
3031 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3032 if (abt == NULL) {
3033 ql_log(ql_log_warn, vha, 0x108d,
3034 "Failed to allocate abort IOCB.\n");
3035 return QLA_MEMORY_ALLOC_FAILED;
3038 abt->entry_type = ABORT_IOCB_TYPE;
3039 abt->entry_count = 1;
3040 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3041 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3042 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3043 abt->port_id[0] = fcport->d_id.b.al_pa;
3044 abt->port_id[1] = fcport->d_id.b.area;
3045 abt->port_id[2] = fcport->d_id.b.domain;
3046 abt->vp_index = fcport->vha->vp_idx;
3048 abt->req_que_no = cpu_to_le16(req->id);
3050 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3051 if (rval != QLA_SUCCESS) {
3052 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3053 "Failed to issue IOCB (%x).\n", rval);
3054 } else if (abt->entry_status != 0) {
3055 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3056 "Failed to complete IOCB -- error status (%x).\n",
3057 abt->entry_status);
3058 rval = QLA_FUNCTION_FAILED;
3059 } else if (abt->nport_handle != cpu_to_le16(0)) {
3060 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3061 "Failed to complete IOCB -- completion status (%x).\n",
3062 le16_to_cpu(abt->nport_handle));
3063 if (abt->nport_handle == CS_IOCB_ERROR)
3064 rval = QLA_FUNCTION_PARAMETER_ERROR;
3065 else
3066 rval = QLA_FUNCTION_FAILED;
3067 } else {
3068 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3069 "Done %s.\n", __func__);
3072 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3074 return rval;
3077 struct tsk_mgmt_cmd {
3078 union {
3079 struct tsk_mgmt_entry tsk;
3080 struct sts_entry_24xx sts;
3081 } p;
3084 static int
3085 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3086 uint64_t l, int tag)
3088 int rval, rval2;
3089 struct tsk_mgmt_cmd *tsk;
3090 struct sts_entry_24xx *sts;
3091 dma_addr_t tsk_dma;
3092 scsi_qla_host_t *vha;
3093 struct qla_hw_data *ha;
3094 struct req_que *req;
3095 struct rsp_que *rsp;
3096 struct qla_qpair *qpair;
3098 vha = fcport->vha;
3099 ha = vha->hw;
3100 req = vha->req;
3102 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3103 "Entered %s.\n", __func__);
3105 if (vha->vp_idx && vha->qpair) {
3106 /* NPIV port */
3107 qpair = vha->qpair;
3108 rsp = qpair->rsp;
3109 req = qpair->req;
3110 } else {
3111 rsp = req->rsp;
3114 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3115 if (tsk == NULL) {
3116 ql_log(ql_log_warn, vha, 0x1093,
3117 "Failed to allocate task management IOCB.\n");
3118 return QLA_MEMORY_ALLOC_FAILED;
3121 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3122 tsk->p.tsk.entry_count = 1;
3123 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3124 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3125 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3126 tsk->p.tsk.control_flags = cpu_to_le32(type);
3127 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3128 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3129 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3130 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3131 if (type == TCF_LUN_RESET) {
3132 int_to_scsilun(l, &tsk->p.tsk.lun);
3133 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3134 sizeof(tsk->p.tsk.lun));
3137 sts = &tsk->p.sts;
3138 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3139 if (rval != QLA_SUCCESS) {
3140 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3141 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3142 } else if (sts->entry_status != 0) {
3143 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3144 "Failed to complete IOCB -- error status (%x).\n",
3145 sts->entry_status);
3146 rval = QLA_FUNCTION_FAILED;
3147 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3148 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3149 "Failed to complete IOCB -- completion status (%x).\n",
3150 le16_to_cpu(sts->comp_status));
3151 rval = QLA_FUNCTION_FAILED;
3152 } else if (le16_to_cpu(sts->scsi_status) &
3153 SS_RESPONSE_INFO_LEN_VALID) {
3154 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3155 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3156 "Ignoring inconsistent data length -- not enough "
3157 "response info (%d).\n",
3158 le32_to_cpu(sts->rsp_data_len));
3159 } else if (sts->data[3]) {
3160 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3161 "Failed to complete IOCB -- response (%x).\n",
3162 sts->data[3]);
3163 rval = QLA_FUNCTION_FAILED;
3167 /* Issue marker IOCB. */
3168 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3169 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3170 if (rval2 != QLA_SUCCESS) {
3171 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3172 "Failed to issue marker IOCB (%x).\n", rval2);
3173 } else {
3174 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3175 "Done %s.\n", __func__);
3178 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3180 return rval;
3184 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3186 struct qla_hw_data *ha = fcport->vha->hw;
3188 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3189 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3191 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3195 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3197 struct qla_hw_data *ha = fcport->vha->hw;
3199 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3200 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3202 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3206 qla2x00_system_error(scsi_qla_host_t *vha)
3208 int rval;
3209 mbx_cmd_t mc;
3210 mbx_cmd_t *mcp = &mc;
3211 struct qla_hw_data *ha = vha->hw;
3213 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3214 return QLA_FUNCTION_FAILED;
3216 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3217 "Entered %s.\n", __func__);
3219 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3220 mcp->out_mb = MBX_0;
3221 mcp->in_mb = MBX_0;
3222 mcp->tov = 5;
3223 mcp->flags = 0;
3224 rval = qla2x00_mailbox_command(vha, mcp);
3226 if (rval != QLA_SUCCESS) {
3227 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3228 } else {
3229 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3230 "Done %s.\n", __func__);
3233 return rval;
3237 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3239 int rval;
3240 mbx_cmd_t mc;
3241 mbx_cmd_t *mcp = &mc;
3243 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3244 !IS_QLA27XX(vha->hw))
3245 return QLA_FUNCTION_FAILED;
3247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3248 "Entered %s.\n", __func__);
3250 mcp->mb[0] = MBC_WRITE_SERDES;
3251 mcp->mb[1] = addr;
3252 if (IS_QLA2031(vha->hw))
3253 mcp->mb[2] = data & 0xff;
3254 else
3255 mcp->mb[2] = data;
3257 mcp->mb[3] = 0;
3258 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3259 mcp->in_mb = MBX_0;
3260 mcp->tov = MBX_TOV_SECONDS;
3261 mcp->flags = 0;
3262 rval = qla2x00_mailbox_command(vha, mcp);
3264 if (rval != QLA_SUCCESS) {
3265 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3266 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3267 } else {
3268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3269 "Done %s.\n", __func__);
3272 return rval;
3276 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3278 int rval;
3279 mbx_cmd_t mc;
3280 mbx_cmd_t *mcp = &mc;
3282 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3283 !IS_QLA27XX(vha->hw))
3284 return QLA_FUNCTION_FAILED;
3286 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3287 "Entered %s.\n", __func__);
3289 mcp->mb[0] = MBC_READ_SERDES;
3290 mcp->mb[1] = addr;
3291 mcp->mb[3] = 0;
3292 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3293 mcp->in_mb = MBX_1|MBX_0;
3294 mcp->tov = MBX_TOV_SECONDS;
3295 mcp->flags = 0;
3296 rval = qla2x00_mailbox_command(vha, mcp);
3298 if (IS_QLA2031(vha->hw))
3299 *data = mcp->mb[1] & 0xff;
3300 else
3301 *data = mcp->mb[1];
3303 if (rval != QLA_SUCCESS) {
3304 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3305 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3306 } else {
3307 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3308 "Done %s.\n", __func__);
3311 return rval;
3315 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3317 int rval;
3318 mbx_cmd_t mc;
3319 mbx_cmd_t *mcp = &mc;
3321 if (!IS_QLA8044(vha->hw))
3322 return QLA_FUNCTION_FAILED;
3324 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3325 "Entered %s.\n", __func__);
3327 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3328 mcp->mb[1] = HCS_WRITE_SERDES;
3329 mcp->mb[3] = LSW(addr);
3330 mcp->mb[4] = MSW(addr);
3331 mcp->mb[5] = LSW(data);
3332 mcp->mb[6] = MSW(data);
3333 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3334 mcp->in_mb = MBX_0;
3335 mcp->tov = MBX_TOV_SECONDS;
3336 mcp->flags = 0;
3337 rval = qla2x00_mailbox_command(vha, mcp);
3339 if (rval != QLA_SUCCESS) {
3340 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3341 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3342 } else {
3343 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3344 "Done %s.\n", __func__);
3347 return rval;
3351 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3353 int rval;
3354 mbx_cmd_t mc;
3355 mbx_cmd_t *mcp = &mc;
3357 if (!IS_QLA8044(vha->hw))
3358 return QLA_FUNCTION_FAILED;
3360 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3361 "Entered %s.\n", __func__);
3363 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3364 mcp->mb[1] = HCS_READ_SERDES;
3365 mcp->mb[3] = LSW(addr);
3366 mcp->mb[4] = MSW(addr);
3367 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3368 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3369 mcp->tov = MBX_TOV_SECONDS;
3370 mcp->flags = 0;
3371 rval = qla2x00_mailbox_command(vha, mcp);
3373 *data = mcp->mb[2] << 16 | mcp->mb[1];
3375 if (rval != QLA_SUCCESS) {
3376 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3377 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3378 } else {
3379 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3380 "Done %s.\n", __func__);
3383 return rval;
3387 * qla2x00_set_serdes_params() -
3388 * @ha: HA context
3390 * Returns
3393 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3394 uint16_t sw_em_2g, uint16_t sw_em_4g)
3396 int rval;
3397 mbx_cmd_t mc;
3398 mbx_cmd_t *mcp = &mc;
3400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3401 "Entered %s.\n", __func__);
3403 mcp->mb[0] = MBC_SERDES_PARAMS;
3404 mcp->mb[1] = BIT_0;
3405 mcp->mb[2] = sw_em_1g | BIT_15;
3406 mcp->mb[3] = sw_em_2g | BIT_15;
3407 mcp->mb[4] = sw_em_4g | BIT_15;
3408 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3409 mcp->in_mb = MBX_0;
3410 mcp->tov = MBX_TOV_SECONDS;
3411 mcp->flags = 0;
3412 rval = qla2x00_mailbox_command(vha, mcp);
3414 if (rval != QLA_SUCCESS) {
3415 /*EMPTY*/
3416 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3417 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3418 } else {
3419 /*EMPTY*/
3420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3421 "Done %s.\n", __func__);
3424 return rval;
3428 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3430 int rval;
3431 mbx_cmd_t mc;
3432 mbx_cmd_t *mcp = &mc;
3434 if (!IS_FWI2_CAPABLE(vha->hw))
3435 return QLA_FUNCTION_FAILED;
3437 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3438 "Entered %s.\n", __func__);
3440 mcp->mb[0] = MBC_STOP_FIRMWARE;
3441 mcp->mb[1] = 0;
3442 mcp->out_mb = MBX_1|MBX_0;
3443 mcp->in_mb = MBX_0;
3444 mcp->tov = 5;
3445 mcp->flags = 0;
3446 rval = qla2x00_mailbox_command(vha, mcp);
3448 if (rval != QLA_SUCCESS) {
3449 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3450 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3451 rval = QLA_INVALID_COMMAND;
3452 } else {
3453 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3454 "Done %s.\n", __func__);
3457 return rval;
3461 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3462 uint16_t buffers)
3464 int rval;
3465 mbx_cmd_t mc;
3466 mbx_cmd_t *mcp = &mc;
3468 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3469 "Entered %s.\n", __func__);
3471 if (!IS_FWI2_CAPABLE(vha->hw))
3472 return QLA_FUNCTION_FAILED;
3474 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3475 return QLA_FUNCTION_FAILED;
3477 mcp->mb[0] = MBC_TRACE_CONTROL;
3478 mcp->mb[1] = TC_EFT_ENABLE;
3479 mcp->mb[2] = LSW(eft_dma);
3480 mcp->mb[3] = MSW(eft_dma);
3481 mcp->mb[4] = LSW(MSD(eft_dma));
3482 mcp->mb[5] = MSW(MSD(eft_dma));
3483 mcp->mb[6] = buffers;
3484 mcp->mb[7] = TC_AEN_DISABLE;
3485 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3486 mcp->in_mb = MBX_1|MBX_0;
3487 mcp->tov = MBX_TOV_SECONDS;
3488 mcp->flags = 0;
3489 rval = qla2x00_mailbox_command(vha, mcp);
3490 if (rval != QLA_SUCCESS) {
3491 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3492 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3493 rval, mcp->mb[0], mcp->mb[1]);
3494 } else {
3495 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3496 "Done %s.\n", __func__);
3499 return rval;
3503 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3505 int rval;
3506 mbx_cmd_t mc;
3507 mbx_cmd_t *mcp = &mc;
3509 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3510 "Entered %s.\n", __func__);
3512 if (!IS_FWI2_CAPABLE(vha->hw))
3513 return QLA_FUNCTION_FAILED;
3515 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3516 return QLA_FUNCTION_FAILED;
3518 mcp->mb[0] = MBC_TRACE_CONTROL;
3519 mcp->mb[1] = TC_EFT_DISABLE;
3520 mcp->out_mb = MBX_1|MBX_0;
3521 mcp->in_mb = MBX_1|MBX_0;
3522 mcp->tov = MBX_TOV_SECONDS;
3523 mcp->flags = 0;
3524 rval = qla2x00_mailbox_command(vha, mcp);
3525 if (rval != QLA_SUCCESS) {
3526 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3527 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3528 rval, mcp->mb[0], mcp->mb[1]);
3529 } else {
3530 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3531 "Done %s.\n", __func__);
3534 return rval;
3538 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3539 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3541 int rval;
3542 mbx_cmd_t mc;
3543 mbx_cmd_t *mcp = &mc;
3545 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3546 "Entered %s.\n", __func__);
3548 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3549 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3550 return QLA_FUNCTION_FAILED;
3552 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3553 return QLA_FUNCTION_FAILED;
3555 mcp->mb[0] = MBC_TRACE_CONTROL;
3556 mcp->mb[1] = TC_FCE_ENABLE;
3557 mcp->mb[2] = LSW(fce_dma);
3558 mcp->mb[3] = MSW(fce_dma);
3559 mcp->mb[4] = LSW(MSD(fce_dma));
3560 mcp->mb[5] = MSW(MSD(fce_dma));
3561 mcp->mb[6] = buffers;
3562 mcp->mb[7] = TC_AEN_DISABLE;
3563 mcp->mb[8] = 0;
3564 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3565 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3566 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3567 MBX_1|MBX_0;
3568 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3569 mcp->tov = MBX_TOV_SECONDS;
3570 mcp->flags = 0;
3571 rval = qla2x00_mailbox_command(vha, mcp);
3572 if (rval != QLA_SUCCESS) {
3573 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3574 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3575 rval, mcp->mb[0], mcp->mb[1]);
3576 } else {
3577 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3578 "Done %s.\n", __func__);
3580 if (mb)
3581 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3582 if (dwords)
3583 *dwords = buffers;
3586 return rval;
3590 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3592 int rval;
3593 mbx_cmd_t mc;
3594 mbx_cmd_t *mcp = &mc;
3596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3597 "Entered %s.\n", __func__);
3599 if (!IS_FWI2_CAPABLE(vha->hw))
3600 return QLA_FUNCTION_FAILED;
3602 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3603 return QLA_FUNCTION_FAILED;
3605 mcp->mb[0] = MBC_TRACE_CONTROL;
3606 mcp->mb[1] = TC_FCE_DISABLE;
3607 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3608 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3609 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3610 MBX_1|MBX_0;
3611 mcp->tov = MBX_TOV_SECONDS;
3612 mcp->flags = 0;
3613 rval = qla2x00_mailbox_command(vha, mcp);
3614 if (rval != QLA_SUCCESS) {
3615 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3616 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3617 rval, mcp->mb[0], mcp->mb[1]);
3618 } else {
3619 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3620 "Done %s.\n", __func__);
3622 if (wr)
3623 *wr = (uint64_t) mcp->mb[5] << 48 |
3624 (uint64_t) mcp->mb[4] << 32 |
3625 (uint64_t) mcp->mb[3] << 16 |
3626 (uint64_t) mcp->mb[2];
3627 if (rd)
3628 *rd = (uint64_t) mcp->mb[9] << 48 |
3629 (uint64_t) mcp->mb[8] << 32 |
3630 (uint64_t) mcp->mb[7] << 16 |
3631 (uint64_t) mcp->mb[6];
3634 return rval;
3638 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3639 uint16_t *port_speed, uint16_t *mb)
3641 int rval;
3642 mbx_cmd_t mc;
3643 mbx_cmd_t *mcp = &mc;
3645 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3646 "Entered %s.\n", __func__);
3648 if (!IS_IIDMA_CAPABLE(vha->hw))
3649 return QLA_FUNCTION_FAILED;
3651 mcp->mb[0] = MBC_PORT_PARAMS;
3652 mcp->mb[1] = loop_id;
3653 mcp->mb[2] = mcp->mb[3] = 0;
3654 mcp->mb[9] = vha->vp_idx;
3655 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3656 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3657 mcp->tov = MBX_TOV_SECONDS;
3658 mcp->flags = 0;
3659 rval = qla2x00_mailbox_command(vha, mcp);
3661 /* Return mailbox statuses. */
3662 if (mb != NULL) {
3663 mb[0] = mcp->mb[0];
3664 mb[1] = mcp->mb[1];
3665 mb[3] = mcp->mb[3];
3668 if (rval != QLA_SUCCESS) {
3669 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3670 } else {
3671 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3672 "Done %s.\n", __func__);
3673 if (port_speed)
3674 *port_speed = mcp->mb[3];
3677 return rval;
3681 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3682 uint16_t port_speed, uint16_t *mb)
3684 int rval;
3685 mbx_cmd_t mc;
3686 mbx_cmd_t *mcp = &mc;
3688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3689 "Entered %s.\n", __func__);
3691 if (!IS_IIDMA_CAPABLE(vha->hw))
3692 return QLA_FUNCTION_FAILED;
3694 mcp->mb[0] = MBC_PORT_PARAMS;
3695 mcp->mb[1] = loop_id;
3696 mcp->mb[2] = BIT_0;
3697 if (IS_CNA_CAPABLE(vha->hw))
3698 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3699 else
3700 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3701 mcp->mb[9] = vha->vp_idx;
3702 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3703 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3704 mcp->tov = MBX_TOV_SECONDS;
3705 mcp->flags = 0;
3706 rval = qla2x00_mailbox_command(vha, mcp);
3708 /* Return mailbox statuses. */
3709 if (mb != NULL) {
3710 mb[0] = mcp->mb[0];
3711 mb[1] = mcp->mb[1];
3712 mb[3] = mcp->mb[3];
3715 if (rval != QLA_SUCCESS) {
3716 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3717 "Failed=%x.\n", rval);
3718 } else {
3719 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3720 "Done %s.\n", __func__);
3723 return rval;
3726 void
3727 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3728 struct vp_rpt_id_entry_24xx *rptid_entry)
3730 struct qla_hw_data *ha = vha->hw;
3731 scsi_qla_host_t *vp = NULL;
3732 unsigned long flags;
3733 int found;
3734 port_id_t id;
3735 struct fc_port *fcport;
3737 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3738 "Entered %s.\n", __func__);
3740 if (rptid_entry->entry_status != 0)
3741 return;
3743 id.b.domain = rptid_entry->port_id[2];
3744 id.b.area = rptid_entry->port_id[1];
3745 id.b.al_pa = rptid_entry->port_id[0];
3746 id.b.rsvd_1 = 0;
3748 if (rptid_entry->format == 0) {
3749 /* loop */
3750 ql_dbg(ql_dbg_async, vha, 0x10b7,
3751 "Format 0 : Number of VPs setup %d, number of "
3752 "VPs acquired %d.\n", rptid_entry->vp_setup,
3753 rptid_entry->vp_acquired);
3754 ql_dbg(ql_dbg_async, vha, 0x10b8,
3755 "Primary port id %02x%02x%02x.\n",
3756 rptid_entry->port_id[2], rptid_entry->port_id[1],
3757 rptid_entry->port_id[0]);
3758 ha->current_topology = ISP_CFG_NL;
3759 qlt_update_host_map(vha, id);
3761 } else if (rptid_entry->format == 1) {
3762 /* fabric */
3763 ql_dbg(ql_dbg_async, vha, 0x10b9,
3764 "Format 1: VP[%d] enabled - status %d - with "
3765 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3766 rptid_entry->vp_status,
3767 rptid_entry->port_id[2], rptid_entry->port_id[1],
3768 rptid_entry->port_id[0]);
3769 ql_dbg(ql_dbg_async, vha, 0x5075,
3770 "Format 1: Remote WWPN %8phC.\n",
3771 rptid_entry->u.f1.port_name);
3773 ql_dbg(ql_dbg_async, vha, 0x5075,
3774 "Format 1: WWPN %8phC.\n",
3775 vha->port_name);
3777 /* N2N. direct connect */
3778 if (IS_QLA27XX(ha) &&
3779 ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
3780 /* if our portname is higher then initiate N2N login */
3781 if (wwn_to_u64(vha->port_name) >
3782 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3783 // ??? qlt_update_host_map(vha, id);
3784 vha->n2n_id = 0x1;
3785 ql_dbg(ql_dbg_async, vha, 0x5075,
3786 "Format 1: Setting n2n_update_needed for id %d\n",
3787 vha->n2n_id);
3788 } else {
3789 ql_dbg(ql_dbg_async, vha, 0x5075,
3790 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3791 rptid_entry->u.f1.port_name);
3794 memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
3795 WWN_SIZE);
3796 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3797 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3798 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3799 return;
3802 ha->flags.gpsc_supported = 1;
3803 ha->current_topology = ISP_CFG_F;
3804 /* buffer to buffer credit flag */
3805 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3807 if (rptid_entry->vp_idx == 0) {
3808 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3809 /* FA-WWN is only for physical port */
3810 if (qla_ini_mode_enabled(vha) &&
3811 ha->flags.fawwpn_enabled &&
3812 (rptid_entry->u.f1.flags &
3813 BIT_6)) {
3814 memcpy(vha->port_name,
3815 rptid_entry->u.f1.port_name,
3816 WWN_SIZE);
3819 qlt_update_host_map(vha, id);
3822 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3823 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3824 } else {
3825 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3826 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3827 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3828 "Could not acquire ID for VP[%d].\n",
3829 rptid_entry->vp_idx);
3830 return;
3833 found = 0;
3834 spin_lock_irqsave(&ha->vport_slock, flags);
3835 list_for_each_entry(vp, &ha->vp_list, list) {
3836 if (rptid_entry->vp_idx == vp->vp_idx) {
3837 found = 1;
3838 break;
3841 spin_unlock_irqrestore(&ha->vport_slock, flags);
3843 if (!found)
3844 return;
3846 qlt_update_host_map(vp, id);
3849 * Cannot configure here as we are still sitting on the
3850 * response queue. Handle it in dpc context.
3852 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3853 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3854 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3856 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3857 qla2xxx_wake_dpc(vha);
3858 } else if (rptid_entry->format == 2) {
3859 ql_dbg(ql_dbg_async, vha, 0x505f,
3860 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3861 rptid_entry->port_id[2], rptid_entry->port_id[1],
3862 rptid_entry->port_id[0]);
3864 ql_dbg(ql_dbg_async, vha, 0x5075,
3865 "N2N: Remote WWPN %8phC.\n",
3866 rptid_entry->u.f2.port_name);
3868 /* N2N. direct connect */
3869 ha->current_topology = ISP_CFG_N;
3870 ha->flags.rida_fmt2 = 1;
3871 vha->d_id.b.domain = rptid_entry->port_id[2];
3872 vha->d_id.b.area = rptid_entry->port_id[1];
3873 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3875 spin_lock_irqsave(&ha->vport_slock, flags);
3876 qlt_update_vp_map(vha, SET_AL_PA);
3877 spin_unlock_irqrestore(&ha->vport_slock, flags);
3879 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3880 fcport->scan_state = QLA_FCPORT_SCAN;
3883 fcport = qla2x00_find_fcport_by_wwpn(vha,
3884 rptid_entry->u.f2.port_name, 1);
3886 if (fcport) {
3887 fcport->plogi_nack_done_deadline = jiffies + HZ;
3888 fcport->scan_state = QLA_FCPORT_FOUND;
3889 switch (fcport->disc_state) {
3890 case DSC_DELETED:
3891 ql_dbg(ql_dbg_disc, vha, 0x210d,
3892 "%s %d %8phC login\n",
3893 __func__, __LINE__, fcport->port_name);
3894 qla24xx_fcport_handle_login(vha, fcport);
3895 break;
3896 case DSC_DELETE_PEND:
3897 break;
3898 default:
3899 qlt_schedule_sess_for_deletion(fcport);
3900 break;
3902 } else {
3903 id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0];
3904 id.b.area = rptid_entry->u.f2.remote_nport_id[1];
3905 id.b.domain = rptid_entry->u.f2.remote_nport_id[2];
3906 qla24xx_post_newsess_work(vha, &id,
3907 rptid_entry->u.f2.port_name,
3908 rptid_entry->u.f2.node_name,
3909 NULL,
3910 FC4_TYPE_UNKNOWN);
3916 * qla24xx_modify_vp_config
3917 * Change VP configuration for vha
3919 * Input:
3920 * vha = adapter block pointer.
3922 * Returns:
3923 * qla2xxx local function return status code.
3925 * Context:
3926 * Kernel context.
3929 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3931 int rval;
3932 struct vp_config_entry_24xx *vpmod;
3933 dma_addr_t vpmod_dma;
3934 struct qla_hw_data *ha = vha->hw;
3935 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3937 /* This can be called by the parent */
3939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3940 "Entered %s.\n", __func__);
3942 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3943 if (!vpmod) {
3944 ql_log(ql_log_warn, vha, 0x10bc,
3945 "Failed to allocate modify VP IOCB.\n");
3946 return QLA_MEMORY_ALLOC_FAILED;
3949 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3950 vpmod->entry_count = 1;
3951 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3952 vpmod->vp_count = 1;
3953 vpmod->vp_index1 = vha->vp_idx;
3954 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3956 qlt_modify_vp_config(vha, vpmod);
3958 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3959 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3960 vpmod->entry_count = 1;
3962 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3963 if (rval != QLA_SUCCESS) {
3964 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3965 "Failed to issue VP config IOCB (%x).\n", rval);
3966 } else if (vpmod->comp_status != 0) {
3967 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3968 "Failed to complete IOCB -- error status (%x).\n",
3969 vpmod->comp_status);
3970 rval = QLA_FUNCTION_FAILED;
3971 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3972 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3973 "Failed to complete IOCB -- completion status (%x).\n",
3974 le16_to_cpu(vpmod->comp_status));
3975 rval = QLA_FUNCTION_FAILED;
3976 } else {
3977 /* EMPTY */
3978 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3979 "Done %s.\n", __func__);
3980 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3982 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3984 return rval;
3988 * qla2x00_send_change_request
3989 * Receive or disable RSCN request from fabric controller
3991 * Input:
3992 * ha = adapter block pointer
3993 * format = registration format:
3994 * 0 - Reserved
3995 * 1 - Fabric detected registration
3996 * 2 - N_port detected registration
3997 * 3 - Full registration
3998 * FF - clear registration
3999 * vp_idx = Virtual port index
4001 * Returns:
4002 * qla2x00 local function return status code.
4004 * Context:
4005 * Kernel Context
4009 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4010 uint16_t vp_idx)
4012 int rval;
4013 mbx_cmd_t mc;
4014 mbx_cmd_t *mcp = &mc;
4016 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4017 "Entered %s.\n", __func__);
4019 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4020 mcp->mb[1] = format;
4021 mcp->mb[9] = vp_idx;
4022 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4023 mcp->in_mb = MBX_0|MBX_1;
4024 mcp->tov = MBX_TOV_SECONDS;
4025 mcp->flags = 0;
4026 rval = qla2x00_mailbox_command(vha, mcp);
4028 if (rval == QLA_SUCCESS) {
4029 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4030 rval = BIT_1;
4032 } else
4033 rval = BIT_1;
4035 return rval;
4039 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4040 uint32_t size)
4042 int rval;
4043 mbx_cmd_t mc;
4044 mbx_cmd_t *mcp = &mc;
4046 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4047 "Entered %s.\n", __func__);
4049 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4050 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4051 mcp->mb[8] = MSW(addr);
4052 mcp->out_mb = MBX_8|MBX_0;
4053 } else {
4054 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4055 mcp->out_mb = MBX_0;
4057 mcp->mb[1] = LSW(addr);
4058 mcp->mb[2] = MSW(req_dma);
4059 mcp->mb[3] = LSW(req_dma);
4060 mcp->mb[6] = MSW(MSD(req_dma));
4061 mcp->mb[7] = LSW(MSD(req_dma));
4062 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4063 if (IS_FWI2_CAPABLE(vha->hw)) {
4064 mcp->mb[4] = MSW(size);
4065 mcp->mb[5] = LSW(size);
4066 mcp->out_mb |= MBX_5|MBX_4;
4067 } else {
4068 mcp->mb[4] = LSW(size);
4069 mcp->out_mb |= MBX_4;
4072 mcp->in_mb = MBX_0;
4073 mcp->tov = MBX_TOV_SECONDS;
4074 mcp->flags = 0;
4075 rval = qla2x00_mailbox_command(vha, mcp);
4077 if (rval != QLA_SUCCESS) {
4078 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4079 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4080 } else {
4081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4082 "Done %s.\n", __func__);
4085 return rval;
4087 /* 84XX Support **************************************************************/
4089 struct cs84xx_mgmt_cmd {
4090 union {
4091 struct verify_chip_entry_84xx req;
4092 struct verify_chip_rsp_84xx rsp;
4093 } p;
4097 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4099 int rval, retry;
4100 struct cs84xx_mgmt_cmd *mn;
4101 dma_addr_t mn_dma;
4102 uint16_t options;
4103 unsigned long flags;
4104 struct qla_hw_data *ha = vha->hw;
4106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4107 "Entered %s.\n", __func__);
4109 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4110 if (mn == NULL) {
4111 return QLA_MEMORY_ALLOC_FAILED;
4114 /* Force Update? */
4115 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4116 /* Diagnostic firmware? */
4117 /* options |= MENLO_DIAG_FW; */
4118 /* We update the firmware with only one data sequence. */
4119 options |= VCO_END_OF_DATA;
4121 do {
4122 retry = 0;
4123 memset(mn, 0, sizeof(*mn));
4124 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4125 mn->p.req.entry_count = 1;
4126 mn->p.req.options = cpu_to_le16(options);
4128 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4129 "Dump of Verify Request.\n");
4130 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4131 (uint8_t *)mn, sizeof(*mn));
4133 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4134 if (rval != QLA_SUCCESS) {
4135 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4136 "Failed to issue verify IOCB (%x).\n", rval);
4137 goto verify_done;
4140 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4141 "Dump of Verify Response.\n");
4142 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4143 (uint8_t *)mn, sizeof(*mn));
4145 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4146 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4147 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4148 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4149 "cs=%x fc=%x.\n", status[0], status[1]);
4151 if (status[0] != CS_COMPLETE) {
4152 rval = QLA_FUNCTION_FAILED;
4153 if (!(options & VCO_DONT_UPDATE_FW)) {
4154 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4155 "Firmware update failed. Retrying "
4156 "without update firmware.\n");
4157 options |= VCO_DONT_UPDATE_FW;
4158 options &= ~VCO_FORCE_UPDATE;
4159 retry = 1;
4161 } else {
4162 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4163 "Firmware updated to %x.\n",
4164 le32_to_cpu(mn->p.rsp.fw_ver));
4166 /* NOTE: we only update OP firmware. */
4167 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4168 ha->cs84xx->op_fw_version =
4169 le32_to_cpu(mn->p.rsp.fw_ver);
4170 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4171 flags);
4173 } while (retry);
4175 verify_done:
4176 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4178 if (rval != QLA_SUCCESS) {
4179 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4180 "Failed=%x.\n", rval);
4181 } else {
4182 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4183 "Done %s.\n", __func__);
4186 return rval;
4190 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4192 int rval;
4193 unsigned long flags;
4194 mbx_cmd_t mc;
4195 mbx_cmd_t *mcp = &mc;
4196 struct qla_hw_data *ha = vha->hw;
4198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4199 "Entered %s.\n", __func__);
4201 if (IS_SHADOW_REG_CAPABLE(ha))
4202 req->options |= BIT_13;
4204 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4205 mcp->mb[1] = req->options;
4206 mcp->mb[2] = MSW(LSD(req->dma));
4207 mcp->mb[3] = LSW(LSD(req->dma));
4208 mcp->mb[6] = MSW(MSD(req->dma));
4209 mcp->mb[7] = LSW(MSD(req->dma));
4210 mcp->mb[5] = req->length;
4211 if (req->rsp)
4212 mcp->mb[10] = req->rsp->id;
4213 mcp->mb[12] = req->qos;
4214 mcp->mb[11] = req->vp_idx;
4215 mcp->mb[13] = req->rid;
4216 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4217 mcp->mb[15] = 0;
4219 mcp->mb[4] = req->id;
4220 /* que in ptr index */
4221 mcp->mb[8] = 0;
4222 /* que out ptr index */
4223 mcp->mb[9] = *req->out_ptr = 0;
4224 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4225 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4226 mcp->in_mb = MBX_0;
4227 mcp->flags = MBX_DMA_OUT;
4228 mcp->tov = MBX_TOV_SECONDS * 2;
4230 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4231 mcp->in_mb |= MBX_1;
4232 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4233 mcp->out_mb |= MBX_15;
4234 /* debug q create issue in SR-IOV */
4235 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4238 spin_lock_irqsave(&ha->hardware_lock, flags);
4239 if (!(req->options & BIT_0)) {
4240 WRT_REG_DWORD(req->req_q_in, 0);
4241 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4242 WRT_REG_DWORD(req->req_q_out, 0);
4244 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4246 rval = qla2x00_mailbox_command(vha, mcp);
4247 if (rval != QLA_SUCCESS) {
4248 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4249 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4250 } else {
4251 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4252 "Done %s.\n", __func__);
4255 return rval;
4259 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4261 int rval;
4262 unsigned long flags;
4263 mbx_cmd_t mc;
4264 mbx_cmd_t *mcp = &mc;
4265 struct qla_hw_data *ha = vha->hw;
4267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4268 "Entered %s.\n", __func__);
4270 if (IS_SHADOW_REG_CAPABLE(ha))
4271 rsp->options |= BIT_13;
4273 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4274 mcp->mb[1] = rsp->options;
4275 mcp->mb[2] = MSW(LSD(rsp->dma));
4276 mcp->mb[3] = LSW(LSD(rsp->dma));
4277 mcp->mb[6] = MSW(MSD(rsp->dma));
4278 mcp->mb[7] = LSW(MSD(rsp->dma));
4279 mcp->mb[5] = rsp->length;
4280 mcp->mb[14] = rsp->msix->entry;
4281 mcp->mb[13] = rsp->rid;
4282 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4283 mcp->mb[15] = 0;
4285 mcp->mb[4] = rsp->id;
4286 /* que in ptr index */
4287 mcp->mb[8] = *rsp->in_ptr = 0;
4288 /* que out ptr index */
4289 mcp->mb[9] = 0;
4290 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4291 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4292 mcp->in_mb = MBX_0;
4293 mcp->flags = MBX_DMA_OUT;
4294 mcp->tov = MBX_TOV_SECONDS * 2;
4296 if (IS_QLA81XX(ha)) {
4297 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4298 mcp->in_mb |= MBX_1;
4299 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4300 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4301 mcp->in_mb |= MBX_1;
4302 /* debug q create issue in SR-IOV */
4303 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4306 spin_lock_irqsave(&ha->hardware_lock, flags);
4307 if (!(rsp->options & BIT_0)) {
4308 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4309 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4310 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4313 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4315 rval = qla2x00_mailbox_command(vha, mcp);
4316 if (rval != QLA_SUCCESS) {
4317 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4318 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4319 } else {
4320 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4321 "Done %s.\n", __func__);
4324 return rval;
4328 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4330 int rval;
4331 mbx_cmd_t mc;
4332 mbx_cmd_t *mcp = &mc;
4334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4335 "Entered %s.\n", __func__);
4337 mcp->mb[0] = MBC_IDC_ACK;
4338 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4339 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4340 mcp->in_mb = MBX_0;
4341 mcp->tov = MBX_TOV_SECONDS;
4342 mcp->flags = 0;
4343 rval = qla2x00_mailbox_command(vha, mcp);
4345 if (rval != QLA_SUCCESS) {
4346 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4347 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4348 } else {
4349 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4350 "Done %s.\n", __func__);
4353 return rval;
4357 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4359 int rval;
4360 mbx_cmd_t mc;
4361 mbx_cmd_t *mcp = &mc;
4363 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4364 "Entered %s.\n", __func__);
4366 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4367 !IS_QLA27XX(vha->hw))
4368 return QLA_FUNCTION_FAILED;
4370 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4371 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4372 mcp->out_mb = MBX_1|MBX_0;
4373 mcp->in_mb = MBX_1|MBX_0;
4374 mcp->tov = MBX_TOV_SECONDS;
4375 mcp->flags = 0;
4376 rval = qla2x00_mailbox_command(vha, mcp);
4378 if (rval != QLA_SUCCESS) {
4379 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4380 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4381 rval, mcp->mb[0], mcp->mb[1]);
4382 } else {
4383 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4384 "Done %s.\n", __func__);
4385 *sector_size = mcp->mb[1];
4388 return rval;
4392 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4394 int rval;
4395 mbx_cmd_t mc;
4396 mbx_cmd_t *mcp = &mc;
4398 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4399 !IS_QLA27XX(vha->hw))
4400 return QLA_FUNCTION_FAILED;
4402 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4403 "Entered %s.\n", __func__);
4405 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4406 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4407 FAC_OPT_CMD_WRITE_PROTECT;
4408 mcp->out_mb = MBX_1|MBX_0;
4409 mcp->in_mb = MBX_1|MBX_0;
4410 mcp->tov = MBX_TOV_SECONDS;
4411 mcp->flags = 0;
4412 rval = qla2x00_mailbox_command(vha, mcp);
4414 if (rval != QLA_SUCCESS) {
4415 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4416 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4417 rval, mcp->mb[0], mcp->mb[1]);
4418 } else {
4419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4420 "Done %s.\n", __func__);
4423 return rval;
4427 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4429 int rval;
4430 mbx_cmd_t mc;
4431 mbx_cmd_t *mcp = &mc;
4433 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4434 !IS_QLA27XX(vha->hw))
4435 return QLA_FUNCTION_FAILED;
4437 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4438 "Entered %s.\n", __func__);
4440 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4441 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4442 mcp->mb[2] = LSW(start);
4443 mcp->mb[3] = MSW(start);
4444 mcp->mb[4] = LSW(finish);
4445 mcp->mb[5] = MSW(finish);
4446 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4447 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4448 mcp->tov = MBX_TOV_SECONDS;
4449 mcp->flags = 0;
4450 rval = qla2x00_mailbox_command(vha, mcp);
4452 if (rval != QLA_SUCCESS) {
4453 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4454 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4455 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4456 } else {
4457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4458 "Done %s.\n", __func__);
4461 return rval;
4465 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4467 int rval = 0;
4468 mbx_cmd_t mc;
4469 mbx_cmd_t *mcp = &mc;
4471 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4472 "Entered %s.\n", __func__);
4474 mcp->mb[0] = MBC_RESTART_MPI_FW;
4475 mcp->out_mb = MBX_0;
4476 mcp->in_mb = MBX_0|MBX_1;
4477 mcp->tov = MBX_TOV_SECONDS;
4478 mcp->flags = 0;
4479 rval = qla2x00_mailbox_command(vha, mcp);
4481 if (rval != QLA_SUCCESS) {
4482 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4483 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4484 rval, mcp->mb[0], mcp->mb[1]);
4485 } else {
4486 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4487 "Done %s.\n", __func__);
4490 return rval;
4494 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4496 int rval;
4497 mbx_cmd_t mc;
4498 mbx_cmd_t *mcp = &mc;
4499 int i;
4500 int len;
4501 uint16_t *str;
4502 struct qla_hw_data *ha = vha->hw;
4504 if (!IS_P3P_TYPE(ha))
4505 return QLA_FUNCTION_FAILED;
4507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4508 "Entered %s.\n", __func__);
4510 str = (void *)version;
4511 len = strlen(version);
4513 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4514 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4515 mcp->out_mb = MBX_1|MBX_0;
4516 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4517 mcp->mb[i] = cpu_to_le16p(str);
4518 mcp->out_mb |= 1<<i;
4520 for (; i < 16; i++) {
4521 mcp->mb[i] = 0;
4522 mcp->out_mb |= 1<<i;
4524 mcp->in_mb = MBX_1|MBX_0;
4525 mcp->tov = MBX_TOV_SECONDS;
4526 mcp->flags = 0;
4527 rval = qla2x00_mailbox_command(vha, mcp);
4529 if (rval != QLA_SUCCESS) {
4530 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4531 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4532 } else {
4533 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4534 "Done %s.\n", __func__);
4537 return rval;
4541 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4543 int rval;
4544 mbx_cmd_t mc;
4545 mbx_cmd_t *mcp = &mc;
4546 int len;
4547 uint16_t dwlen;
4548 uint8_t *str;
4549 dma_addr_t str_dma;
4550 struct qla_hw_data *ha = vha->hw;
4552 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4553 IS_P3P_TYPE(ha))
4554 return QLA_FUNCTION_FAILED;
4556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4557 "Entered %s.\n", __func__);
4559 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4560 if (!str) {
4561 ql_log(ql_log_warn, vha, 0x117f,
4562 "Failed to allocate driver version param.\n");
4563 return QLA_MEMORY_ALLOC_FAILED;
4566 memcpy(str, "\x7\x3\x11\x0", 4);
4567 dwlen = str[0];
4568 len = dwlen * 4 - 4;
4569 memset(str + 4, 0, len);
4570 if (len > strlen(version))
4571 len = strlen(version);
4572 memcpy(str + 4, version, len);
4574 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4575 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4576 mcp->mb[2] = MSW(LSD(str_dma));
4577 mcp->mb[3] = LSW(LSD(str_dma));
4578 mcp->mb[6] = MSW(MSD(str_dma));
4579 mcp->mb[7] = LSW(MSD(str_dma));
4580 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4581 mcp->in_mb = MBX_1|MBX_0;
4582 mcp->tov = MBX_TOV_SECONDS;
4583 mcp->flags = 0;
4584 rval = qla2x00_mailbox_command(vha, mcp);
4586 if (rval != QLA_SUCCESS) {
4587 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4588 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4589 } else {
4590 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4591 "Done %s.\n", __func__);
4594 dma_pool_free(ha->s_dma_pool, str, str_dma);
4596 return rval;
4600 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4601 void *buf, uint16_t bufsiz)
4603 int rval, i;
4604 mbx_cmd_t mc;
4605 mbx_cmd_t *mcp = &mc;
4606 uint32_t *bp;
4608 if (!IS_FWI2_CAPABLE(vha->hw))
4609 return QLA_FUNCTION_FAILED;
4611 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4612 "Entered %s.\n", __func__);
4614 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4615 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4616 mcp->mb[2] = MSW(buf_dma);
4617 mcp->mb[3] = LSW(buf_dma);
4618 mcp->mb[6] = MSW(MSD(buf_dma));
4619 mcp->mb[7] = LSW(MSD(buf_dma));
4620 mcp->mb[8] = bufsiz/4;
4621 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4622 mcp->in_mb = MBX_1|MBX_0;
4623 mcp->tov = MBX_TOV_SECONDS;
4624 mcp->flags = 0;
4625 rval = qla2x00_mailbox_command(vha, mcp);
4627 if (rval != QLA_SUCCESS) {
4628 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4629 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4630 } else {
4631 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4632 "Done %s.\n", __func__);
4633 bp = (uint32_t *) buf;
4634 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4635 *bp = cpu_to_be32(*bp);
4638 return rval;
4641 static int
4642 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4644 int rval;
4645 mbx_cmd_t mc;
4646 mbx_cmd_t *mcp = &mc;
4648 if (!IS_FWI2_CAPABLE(vha->hw))
4649 return QLA_FUNCTION_FAILED;
4651 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4652 "Entered %s.\n", __func__);
4654 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4655 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4656 mcp->out_mb = MBX_1|MBX_0;
4657 mcp->in_mb = MBX_1|MBX_0;
4658 mcp->tov = MBX_TOV_SECONDS;
4659 mcp->flags = 0;
4660 rval = qla2x00_mailbox_command(vha, mcp);
4661 *temp = mcp->mb[1];
4663 if (rval != QLA_SUCCESS) {
4664 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4665 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4666 } else {
4667 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4668 "Done %s.\n", __func__);
4671 return rval;
4675 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4676 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4678 int rval;
4679 mbx_cmd_t mc;
4680 mbx_cmd_t *mcp = &mc;
4681 struct qla_hw_data *ha = vha->hw;
4683 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4684 "Entered %s.\n", __func__);
4686 if (!IS_FWI2_CAPABLE(ha))
4687 return QLA_FUNCTION_FAILED;
4689 if (len == 1)
4690 opt |= BIT_0;
4692 mcp->mb[0] = MBC_READ_SFP;
4693 mcp->mb[1] = dev;
4694 mcp->mb[2] = MSW(sfp_dma);
4695 mcp->mb[3] = LSW(sfp_dma);
4696 mcp->mb[6] = MSW(MSD(sfp_dma));
4697 mcp->mb[7] = LSW(MSD(sfp_dma));
4698 mcp->mb[8] = len;
4699 mcp->mb[9] = off;
4700 mcp->mb[10] = opt;
4701 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4702 mcp->in_mb = MBX_1|MBX_0;
4703 mcp->tov = MBX_TOV_SECONDS;
4704 mcp->flags = 0;
4705 rval = qla2x00_mailbox_command(vha, mcp);
4707 if (opt & BIT_0)
4708 *sfp = mcp->mb[1];
4710 if (rval != QLA_SUCCESS) {
4711 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4712 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4713 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4714 mcp->mb[1] == 0x22)
4715 /* sfp is not there */
4716 rval = QLA_INTERFACE_ERROR;
4717 } else {
4718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4719 "Done %s.\n", __func__);
4722 return rval;
4726 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4727 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4729 int rval;
4730 mbx_cmd_t mc;
4731 mbx_cmd_t *mcp = &mc;
4732 struct qla_hw_data *ha = vha->hw;
4734 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4735 "Entered %s.\n", __func__);
4737 if (!IS_FWI2_CAPABLE(ha))
4738 return QLA_FUNCTION_FAILED;
4740 if (len == 1)
4741 opt |= BIT_0;
4743 if (opt & BIT_0)
4744 len = *sfp;
4746 mcp->mb[0] = MBC_WRITE_SFP;
4747 mcp->mb[1] = dev;
4748 mcp->mb[2] = MSW(sfp_dma);
4749 mcp->mb[3] = LSW(sfp_dma);
4750 mcp->mb[6] = MSW(MSD(sfp_dma));
4751 mcp->mb[7] = LSW(MSD(sfp_dma));
4752 mcp->mb[8] = len;
4753 mcp->mb[9] = off;
4754 mcp->mb[10] = opt;
4755 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4756 mcp->in_mb = MBX_1|MBX_0;
4757 mcp->tov = MBX_TOV_SECONDS;
4758 mcp->flags = 0;
4759 rval = qla2x00_mailbox_command(vha, mcp);
4761 if (rval != QLA_SUCCESS) {
4762 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4763 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4764 } else {
4765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4766 "Done %s.\n", __func__);
4769 return rval;
4773 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4774 uint16_t size_in_bytes, uint16_t *actual_size)
4776 int rval;
4777 mbx_cmd_t mc;
4778 mbx_cmd_t *mcp = &mc;
4780 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4781 "Entered %s.\n", __func__);
4783 if (!IS_CNA_CAPABLE(vha->hw))
4784 return QLA_FUNCTION_FAILED;
4786 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4787 mcp->mb[2] = MSW(stats_dma);
4788 mcp->mb[3] = LSW(stats_dma);
4789 mcp->mb[6] = MSW(MSD(stats_dma));
4790 mcp->mb[7] = LSW(MSD(stats_dma));
4791 mcp->mb[8] = size_in_bytes >> 2;
4792 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4793 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4794 mcp->tov = MBX_TOV_SECONDS;
4795 mcp->flags = 0;
4796 rval = qla2x00_mailbox_command(vha, mcp);
4798 if (rval != QLA_SUCCESS) {
4799 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4800 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4801 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4802 } else {
4803 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4804 "Done %s.\n", __func__);
4807 *actual_size = mcp->mb[2] << 2;
4810 return rval;
4814 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4815 uint16_t size)
4817 int rval;
4818 mbx_cmd_t mc;
4819 mbx_cmd_t *mcp = &mc;
4821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4822 "Entered %s.\n", __func__);
4824 if (!IS_CNA_CAPABLE(vha->hw))
4825 return QLA_FUNCTION_FAILED;
4827 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4828 mcp->mb[1] = 0;
4829 mcp->mb[2] = MSW(tlv_dma);
4830 mcp->mb[3] = LSW(tlv_dma);
4831 mcp->mb[6] = MSW(MSD(tlv_dma));
4832 mcp->mb[7] = LSW(MSD(tlv_dma));
4833 mcp->mb[8] = size;
4834 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4835 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4836 mcp->tov = MBX_TOV_SECONDS;
4837 mcp->flags = 0;
4838 rval = qla2x00_mailbox_command(vha, mcp);
4840 if (rval != QLA_SUCCESS) {
4841 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4842 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4843 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4844 } else {
4845 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4846 "Done %s.\n", __func__);
4849 return rval;
4853 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4855 int rval;
4856 mbx_cmd_t mc;
4857 mbx_cmd_t *mcp = &mc;
4859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4860 "Entered %s.\n", __func__);
4862 if (!IS_FWI2_CAPABLE(vha->hw))
4863 return QLA_FUNCTION_FAILED;
4865 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4866 mcp->mb[1] = LSW(risc_addr);
4867 mcp->mb[8] = MSW(risc_addr);
4868 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4869 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4870 mcp->tov = 30;
4871 mcp->flags = 0;
4872 rval = qla2x00_mailbox_command(vha, mcp);
4873 if (rval != QLA_SUCCESS) {
4874 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4875 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4876 } else {
4877 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4878 "Done %s.\n", __func__);
4879 *data = mcp->mb[3] << 16 | mcp->mb[2];
4882 return rval;
4886 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4887 uint16_t *mresp)
4889 int rval;
4890 mbx_cmd_t mc;
4891 mbx_cmd_t *mcp = &mc;
4893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4894 "Entered %s.\n", __func__);
4896 memset(mcp->mb, 0 , sizeof(mcp->mb));
4897 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4898 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4900 /* transfer count */
4901 mcp->mb[10] = LSW(mreq->transfer_size);
4902 mcp->mb[11] = MSW(mreq->transfer_size);
4904 /* send data address */
4905 mcp->mb[14] = LSW(mreq->send_dma);
4906 mcp->mb[15] = MSW(mreq->send_dma);
4907 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4908 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4910 /* receive data address */
4911 mcp->mb[16] = LSW(mreq->rcv_dma);
4912 mcp->mb[17] = MSW(mreq->rcv_dma);
4913 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4914 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4916 /* Iteration count */
4917 mcp->mb[18] = LSW(mreq->iteration_count);
4918 mcp->mb[19] = MSW(mreq->iteration_count);
4920 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4921 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4922 if (IS_CNA_CAPABLE(vha->hw))
4923 mcp->out_mb |= MBX_2;
4924 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4926 mcp->buf_size = mreq->transfer_size;
4927 mcp->tov = MBX_TOV_SECONDS;
4928 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4930 rval = qla2x00_mailbox_command(vha, mcp);
4932 if (rval != QLA_SUCCESS) {
4933 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4934 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4935 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4936 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4937 } else {
4938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4939 "Done %s.\n", __func__);
4942 /* Copy mailbox information */
4943 memcpy( mresp, mcp->mb, 64);
4944 return rval;
4948 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4949 uint16_t *mresp)
4951 int rval;
4952 mbx_cmd_t mc;
4953 mbx_cmd_t *mcp = &mc;
4954 struct qla_hw_data *ha = vha->hw;
4956 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4957 "Entered %s.\n", __func__);
4959 memset(mcp->mb, 0 , sizeof(mcp->mb));
4960 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4961 /* BIT_6 specifies 64bit address */
4962 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
4963 if (IS_CNA_CAPABLE(ha)) {
4964 mcp->mb[2] = vha->fcoe_fcf_idx;
4966 mcp->mb[16] = LSW(mreq->rcv_dma);
4967 mcp->mb[17] = MSW(mreq->rcv_dma);
4968 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4969 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4971 mcp->mb[10] = LSW(mreq->transfer_size);
4973 mcp->mb[14] = LSW(mreq->send_dma);
4974 mcp->mb[15] = MSW(mreq->send_dma);
4975 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4976 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4978 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
4979 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4980 if (IS_CNA_CAPABLE(ha))
4981 mcp->out_mb |= MBX_2;
4983 mcp->in_mb = MBX_0;
4984 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
4985 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4986 mcp->in_mb |= MBX_1;
4987 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
4988 mcp->in_mb |= MBX_3;
4990 mcp->tov = MBX_TOV_SECONDS;
4991 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4992 mcp->buf_size = mreq->transfer_size;
4994 rval = qla2x00_mailbox_command(vha, mcp);
4996 if (rval != QLA_SUCCESS) {
4997 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
4998 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4999 rval, mcp->mb[0], mcp->mb[1]);
5000 } else {
5001 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5002 "Done %s.\n", __func__);
5005 /* Copy mailbox information */
5006 memcpy(mresp, mcp->mb, 64);
5007 return rval;
5011 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5013 int rval;
5014 mbx_cmd_t mc;
5015 mbx_cmd_t *mcp = &mc;
5017 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5018 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5020 mcp->mb[0] = MBC_ISP84XX_RESET;
5021 mcp->mb[1] = enable_diagnostic;
5022 mcp->out_mb = MBX_1|MBX_0;
5023 mcp->in_mb = MBX_1|MBX_0;
5024 mcp->tov = MBX_TOV_SECONDS;
5025 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5026 rval = qla2x00_mailbox_command(vha, mcp);
5028 if (rval != QLA_SUCCESS)
5029 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5030 else
5031 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5032 "Done %s.\n", __func__);
5034 return rval;
5038 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5040 int rval;
5041 mbx_cmd_t mc;
5042 mbx_cmd_t *mcp = &mc;
5044 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5045 "Entered %s.\n", __func__);
5047 if (!IS_FWI2_CAPABLE(vha->hw))
5048 return QLA_FUNCTION_FAILED;
5050 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5051 mcp->mb[1] = LSW(risc_addr);
5052 mcp->mb[2] = LSW(data);
5053 mcp->mb[3] = MSW(data);
5054 mcp->mb[8] = MSW(risc_addr);
5055 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5056 mcp->in_mb = MBX_0;
5057 mcp->tov = 30;
5058 mcp->flags = 0;
5059 rval = qla2x00_mailbox_command(vha, mcp);
5060 if (rval != QLA_SUCCESS) {
5061 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5062 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5063 } else {
5064 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5065 "Done %s.\n", __func__);
5068 return rval;
5072 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5074 int rval;
5075 uint32_t stat, timer;
5076 uint16_t mb0 = 0;
5077 struct qla_hw_data *ha = vha->hw;
5078 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5080 rval = QLA_SUCCESS;
5082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5083 "Entered %s.\n", __func__);
5085 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5087 /* Write the MBC data to the registers */
5088 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5089 WRT_REG_WORD(&reg->mailbox1, mb[0]);
5090 WRT_REG_WORD(&reg->mailbox2, mb[1]);
5091 WRT_REG_WORD(&reg->mailbox3, mb[2]);
5092 WRT_REG_WORD(&reg->mailbox4, mb[3]);
5094 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5096 /* Poll for MBC interrupt */
5097 for (timer = 6000000; timer; timer--) {
5098 /* Check for pending interrupts. */
5099 stat = RD_REG_DWORD(&reg->host_status);
5100 if (stat & HSRX_RISC_INT) {
5101 stat &= 0xff;
5103 if (stat == 0x1 || stat == 0x2 ||
5104 stat == 0x10 || stat == 0x11) {
5105 set_bit(MBX_INTERRUPT,
5106 &ha->mbx_cmd_flags);
5107 mb0 = RD_REG_WORD(&reg->mailbox0);
5108 WRT_REG_DWORD(&reg->hccr,
5109 HCCRX_CLR_RISC_INT);
5110 RD_REG_DWORD(&reg->hccr);
5111 break;
5114 udelay(5);
5117 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5118 rval = mb0 & MBS_MASK;
5119 else
5120 rval = QLA_FUNCTION_FAILED;
5122 if (rval != QLA_SUCCESS) {
5123 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5124 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5125 } else {
5126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5127 "Done %s.\n", __func__);
5130 return rval;
5134 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5136 int rval;
5137 mbx_cmd_t mc;
5138 mbx_cmd_t *mcp = &mc;
5139 struct qla_hw_data *ha = vha->hw;
5141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5142 "Entered %s.\n", __func__);
5144 if (!IS_FWI2_CAPABLE(ha))
5145 return QLA_FUNCTION_FAILED;
5147 mcp->mb[0] = MBC_DATA_RATE;
5148 mcp->mb[1] = 0;
5149 mcp->out_mb = MBX_1|MBX_0;
5150 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5151 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5152 mcp->in_mb |= MBX_3;
5153 mcp->tov = MBX_TOV_SECONDS;
5154 mcp->flags = 0;
5155 rval = qla2x00_mailbox_command(vha, mcp);
5156 if (rval != QLA_SUCCESS) {
5157 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5158 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5159 } else {
5160 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5161 "Done %s.\n", __func__);
5162 if (mcp->mb[1] != 0x7)
5163 ha->link_data_rate = mcp->mb[1];
5166 return rval;
5170 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5172 int rval;
5173 mbx_cmd_t mc;
5174 mbx_cmd_t *mcp = &mc;
5175 struct qla_hw_data *ha = vha->hw;
5177 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5178 "Entered %s.\n", __func__);
5180 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5181 !IS_QLA27XX(ha))
5182 return QLA_FUNCTION_FAILED;
5183 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5184 mcp->out_mb = MBX_0;
5185 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5186 mcp->tov = MBX_TOV_SECONDS;
5187 mcp->flags = 0;
5189 rval = qla2x00_mailbox_command(vha, mcp);
5191 if (rval != QLA_SUCCESS) {
5192 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5193 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5194 } else {
5195 /* Copy all bits to preserve original value */
5196 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5198 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5199 "Done %s.\n", __func__);
5201 return rval;
5205 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5207 int rval;
5208 mbx_cmd_t mc;
5209 mbx_cmd_t *mcp = &mc;
5211 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5212 "Entered %s.\n", __func__);
5214 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5215 /* Copy all bits to preserve original setting */
5216 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5217 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5218 mcp->in_mb = MBX_0;
5219 mcp->tov = MBX_TOV_SECONDS;
5220 mcp->flags = 0;
5221 rval = qla2x00_mailbox_command(vha, mcp);
5223 if (rval != QLA_SUCCESS) {
5224 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5225 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5226 } else
5227 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5228 "Done %s.\n", __func__);
5230 return rval;
5235 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5236 uint16_t *mb)
5238 int rval;
5239 mbx_cmd_t mc;
5240 mbx_cmd_t *mcp = &mc;
5241 struct qla_hw_data *ha = vha->hw;
5243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5244 "Entered %s.\n", __func__);
5246 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5247 return QLA_FUNCTION_FAILED;
5249 mcp->mb[0] = MBC_PORT_PARAMS;
5250 mcp->mb[1] = loop_id;
5251 if (ha->flags.fcp_prio_enabled)
5252 mcp->mb[2] = BIT_1;
5253 else
5254 mcp->mb[2] = BIT_2;
5255 mcp->mb[4] = priority & 0xf;
5256 mcp->mb[9] = vha->vp_idx;
5257 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5258 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5259 mcp->tov = 30;
5260 mcp->flags = 0;
5261 rval = qla2x00_mailbox_command(vha, mcp);
5262 if (mb != NULL) {
5263 mb[0] = mcp->mb[0];
5264 mb[1] = mcp->mb[1];
5265 mb[3] = mcp->mb[3];
5266 mb[4] = mcp->mb[4];
5269 if (rval != QLA_SUCCESS) {
5270 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5271 } else {
5272 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5273 "Done %s.\n", __func__);
5276 return rval;
5280 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5282 int rval = QLA_FUNCTION_FAILED;
5283 struct qla_hw_data *ha = vha->hw;
5284 uint8_t byte;
5286 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5287 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5288 "Thermal not supported by this card.\n");
5289 return rval;
5292 if (IS_QLA25XX(ha)) {
5293 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5294 ha->pdev->subsystem_device == 0x0175) {
5295 rval = qla2x00_read_sfp(vha, 0, &byte,
5296 0x98, 0x1, 1, BIT_13|BIT_0);
5297 *temp = byte;
5298 return rval;
5300 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5301 ha->pdev->subsystem_device == 0x338e) {
5302 rval = qla2x00_read_sfp(vha, 0, &byte,
5303 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5304 *temp = byte;
5305 return rval;
5307 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5308 "Thermal not supported by this card.\n");
5309 return rval;
5312 if (IS_QLA82XX(ha)) {
5313 *temp = qla82xx_read_temperature(vha);
5314 rval = QLA_SUCCESS;
5315 return rval;
5316 } else if (IS_QLA8044(ha)) {
5317 *temp = qla8044_read_temperature(vha);
5318 rval = QLA_SUCCESS;
5319 return rval;
5322 rval = qla2x00_read_asic_temperature(vha, temp);
5323 return rval;
5327 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5329 int rval;
5330 struct qla_hw_data *ha = vha->hw;
5331 mbx_cmd_t mc;
5332 mbx_cmd_t *mcp = &mc;
5334 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5335 "Entered %s.\n", __func__);
5337 if (!IS_FWI2_CAPABLE(ha))
5338 return QLA_FUNCTION_FAILED;
5340 memset(mcp, 0, sizeof(mbx_cmd_t));
5341 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5342 mcp->mb[1] = 1;
5344 mcp->out_mb = MBX_1|MBX_0;
5345 mcp->in_mb = MBX_0;
5346 mcp->tov = 30;
5347 mcp->flags = 0;
5349 rval = qla2x00_mailbox_command(vha, mcp);
5350 if (rval != QLA_SUCCESS) {
5351 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5352 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5353 } else {
5354 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5355 "Done %s.\n", __func__);
5358 return rval;
5362 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5364 int rval;
5365 struct qla_hw_data *ha = vha->hw;
5366 mbx_cmd_t mc;
5367 mbx_cmd_t *mcp = &mc;
5369 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5370 "Entered %s.\n", __func__);
5372 if (!IS_P3P_TYPE(ha))
5373 return QLA_FUNCTION_FAILED;
5375 memset(mcp, 0, sizeof(mbx_cmd_t));
5376 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5377 mcp->mb[1] = 0;
5379 mcp->out_mb = MBX_1|MBX_0;
5380 mcp->in_mb = MBX_0;
5381 mcp->tov = 30;
5382 mcp->flags = 0;
5384 rval = qla2x00_mailbox_command(vha, mcp);
5385 if (rval != QLA_SUCCESS) {
5386 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5387 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5388 } else {
5389 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5390 "Done %s.\n", __func__);
5393 return rval;
5397 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5399 struct qla_hw_data *ha = vha->hw;
5400 mbx_cmd_t mc;
5401 mbx_cmd_t *mcp = &mc;
5402 int rval = QLA_FUNCTION_FAILED;
5404 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5405 "Entered %s.\n", __func__);
5407 memset(mcp->mb, 0 , sizeof(mcp->mb));
5408 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5409 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5410 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5411 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5413 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5414 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5415 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5417 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5418 mcp->tov = MBX_TOV_SECONDS;
5419 rval = qla2x00_mailbox_command(vha, mcp);
5421 /* Always copy back return mailbox values. */
5422 if (rval != QLA_SUCCESS) {
5423 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5424 "mailbox command FAILED=0x%x, subcode=%x.\n",
5425 (mcp->mb[1] << 16) | mcp->mb[0],
5426 (mcp->mb[3] << 16) | mcp->mb[2]);
5427 } else {
5428 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5429 "Done %s.\n", __func__);
5430 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5431 if (!ha->md_template_size) {
5432 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5433 "Null template size obtained.\n");
5434 rval = QLA_FUNCTION_FAILED;
5437 return rval;
5441 qla82xx_md_get_template(scsi_qla_host_t *vha)
5443 struct qla_hw_data *ha = vha->hw;
5444 mbx_cmd_t mc;
5445 mbx_cmd_t *mcp = &mc;
5446 int rval = QLA_FUNCTION_FAILED;
5448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5449 "Entered %s.\n", __func__);
5451 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5452 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5453 if (!ha->md_tmplt_hdr) {
5454 ql_log(ql_log_warn, vha, 0x1124,
5455 "Unable to allocate memory for Minidump template.\n");
5456 return rval;
5459 memset(mcp->mb, 0 , sizeof(mcp->mb));
5460 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5461 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5462 mcp->mb[2] = LSW(RQST_TMPLT);
5463 mcp->mb[3] = MSW(RQST_TMPLT);
5464 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5465 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5466 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5467 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5468 mcp->mb[8] = LSW(ha->md_template_size);
5469 mcp->mb[9] = MSW(ha->md_template_size);
5471 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5472 mcp->tov = MBX_TOV_SECONDS;
5473 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5474 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5475 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5476 rval = qla2x00_mailbox_command(vha, mcp);
5478 if (rval != QLA_SUCCESS) {
5479 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5480 "mailbox command FAILED=0x%x, subcode=%x.\n",
5481 ((mcp->mb[1] << 16) | mcp->mb[0]),
5482 ((mcp->mb[3] << 16) | mcp->mb[2]));
5483 } else
5484 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5485 "Done %s.\n", __func__);
5486 return rval;
5490 qla8044_md_get_template(scsi_qla_host_t *vha)
5492 struct qla_hw_data *ha = vha->hw;
5493 mbx_cmd_t mc;
5494 mbx_cmd_t *mcp = &mc;
5495 int rval = QLA_FUNCTION_FAILED;
5496 int offset = 0, size = MINIDUMP_SIZE_36K;
5497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5498 "Entered %s.\n", __func__);
5500 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5501 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5502 if (!ha->md_tmplt_hdr) {
5503 ql_log(ql_log_warn, vha, 0xb11b,
5504 "Unable to allocate memory for Minidump template.\n");
5505 return rval;
5508 memset(mcp->mb, 0 , sizeof(mcp->mb));
5509 while (offset < ha->md_template_size) {
5510 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5511 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5512 mcp->mb[2] = LSW(RQST_TMPLT);
5513 mcp->mb[3] = MSW(RQST_TMPLT);
5514 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5515 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5516 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5517 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5518 mcp->mb[8] = LSW(size);
5519 mcp->mb[9] = MSW(size);
5520 mcp->mb[10] = offset & 0x0000FFFF;
5521 mcp->mb[11] = offset & 0xFFFF0000;
5522 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5523 mcp->tov = MBX_TOV_SECONDS;
5524 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5525 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5526 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5527 rval = qla2x00_mailbox_command(vha, mcp);
5529 if (rval != QLA_SUCCESS) {
5530 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5531 "mailbox command FAILED=0x%x, subcode=%x.\n",
5532 ((mcp->mb[1] << 16) | mcp->mb[0]),
5533 ((mcp->mb[3] << 16) | mcp->mb[2]));
5534 return rval;
5535 } else
5536 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5537 "Done %s.\n", __func__);
5538 offset = offset + size;
5540 return rval;
5544 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5546 int rval;
5547 struct qla_hw_data *ha = vha->hw;
5548 mbx_cmd_t mc;
5549 mbx_cmd_t *mcp = &mc;
5551 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5552 return QLA_FUNCTION_FAILED;
5554 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5555 "Entered %s.\n", __func__);
5557 memset(mcp, 0, sizeof(mbx_cmd_t));
5558 mcp->mb[0] = MBC_SET_LED_CONFIG;
5559 mcp->mb[1] = led_cfg[0];
5560 mcp->mb[2] = led_cfg[1];
5561 if (IS_QLA8031(ha)) {
5562 mcp->mb[3] = led_cfg[2];
5563 mcp->mb[4] = led_cfg[3];
5564 mcp->mb[5] = led_cfg[4];
5565 mcp->mb[6] = led_cfg[5];
5568 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5569 if (IS_QLA8031(ha))
5570 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5571 mcp->in_mb = MBX_0;
5572 mcp->tov = 30;
5573 mcp->flags = 0;
5575 rval = qla2x00_mailbox_command(vha, mcp);
5576 if (rval != QLA_SUCCESS) {
5577 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5578 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5579 } else {
5580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5581 "Done %s.\n", __func__);
5584 return rval;
5588 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5590 int rval;
5591 struct qla_hw_data *ha = vha->hw;
5592 mbx_cmd_t mc;
5593 mbx_cmd_t *mcp = &mc;
5595 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5596 return QLA_FUNCTION_FAILED;
5598 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5599 "Entered %s.\n", __func__);
5601 memset(mcp, 0, sizeof(mbx_cmd_t));
5602 mcp->mb[0] = MBC_GET_LED_CONFIG;
5604 mcp->out_mb = MBX_0;
5605 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5606 if (IS_QLA8031(ha))
5607 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5608 mcp->tov = 30;
5609 mcp->flags = 0;
5611 rval = qla2x00_mailbox_command(vha, mcp);
5612 if (rval != QLA_SUCCESS) {
5613 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5614 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5615 } else {
5616 led_cfg[0] = mcp->mb[1];
5617 led_cfg[1] = mcp->mb[2];
5618 if (IS_QLA8031(ha)) {
5619 led_cfg[2] = mcp->mb[3];
5620 led_cfg[3] = mcp->mb[4];
5621 led_cfg[4] = mcp->mb[5];
5622 led_cfg[5] = mcp->mb[6];
5624 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5625 "Done %s.\n", __func__);
5628 return rval;
5632 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5634 int rval;
5635 struct qla_hw_data *ha = vha->hw;
5636 mbx_cmd_t mc;
5637 mbx_cmd_t *mcp = &mc;
5639 if (!IS_P3P_TYPE(ha))
5640 return QLA_FUNCTION_FAILED;
5642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5643 "Entered %s.\n", __func__);
5645 memset(mcp, 0, sizeof(mbx_cmd_t));
5646 mcp->mb[0] = MBC_SET_LED_CONFIG;
5647 if (enable)
5648 mcp->mb[7] = 0xE;
5649 else
5650 mcp->mb[7] = 0xD;
5652 mcp->out_mb = MBX_7|MBX_0;
5653 mcp->in_mb = MBX_0;
5654 mcp->tov = MBX_TOV_SECONDS;
5655 mcp->flags = 0;
5657 rval = qla2x00_mailbox_command(vha, mcp);
5658 if (rval != QLA_SUCCESS) {
5659 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5660 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5661 } else {
5662 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5663 "Done %s.\n", __func__);
5666 return rval;
5670 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5672 int rval;
5673 struct qla_hw_data *ha = vha->hw;
5674 mbx_cmd_t mc;
5675 mbx_cmd_t *mcp = &mc;
5677 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5678 return QLA_FUNCTION_FAILED;
5680 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5681 "Entered %s.\n", __func__);
5683 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5684 mcp->mb[1] = LSW(reg);
5685 mcp->mb[2] = MSW(reg);
5686 mcp->mb[3] = LSW(data);
5687 mcp->mb[4] = MSW(data);
5688 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5690 mcp->in_mb = MBX_1|MBX_0;
5691 mcp->tov = MBX_TOV_SECONDS;
5692 mcp->flags = 0;
5693 rval = qla2x00_mailbox_command(vha, mcp);
5695 if (rval != QLA_SUCCESS) {
5696 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5697 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5698 } else {
5699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5700 "Done %s.\n", __func__);
5703 return rval;
5707 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5709 int rval;
5710 struct qla_hw_data *ha = vha->hw;
5711 mbx_cmd_t mc;
5712 mbx_cmd_t *mcp = &mc;
5714 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5716 "Implicit LOGO Unsupported.\n");
5717 return QLA_FUNCTION_FAILED;
5721 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5722 "Entering %s.\n", __func__);
5724 /* Perform Implicit LOGO. */
5725 mcp->mb[0] = MBC_PORT_LOGOUT;
5726 mcp->mb[1] = fcport->loop_id;
5727 mcp->mb[10] = BIT_15;
5728 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5729 mcp->in_mb = MBX_0;
5730 mcp->tov = MBX_TOV_SECONDS;
5731 mcp->flags = 0;
5732 rval = qla2x00_mailbox_command(vha, mcp);
5733 if (rval != QLA_SUCCESS)
5734 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5735 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5736 else
5737 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5738 "Done %s.\n", __func__);
5740 return rval;
5744 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5746 int rval;
5747 mbx_cmd_t mc;
5748 mbx_cmd_t *mcp = &mc;
5749 struct qla_hw_data *ha = vha->hw;
5750 unsigned long retry_max_time = jiffies + (2 * HZ);
5752 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5753 return QLA_FUNCTION_FAILED;
5755 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5757 retry_rd_reg:
5758 mcp->mb[0] = MBC_READ_REMOTE_REG;
5759 mcp->mb[1] = LSW(reg);
5760 mcp->mb[2] = MSW(reg);
5761 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5762 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5763 mcp->tov = MBX_TOV_SECONDS;
5764 mcp->flags = 0;
5765 rval = qla2x00_mailbox_command(vha, mcp);
5767 if (rval != QLA_SUCCESS) {
5768 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5769 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5770 rval, mcp->mb[0], mcp->mb[1]);
5771 } else {
5772 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5773 if (*data == QLA8XXX_BAD_VALUE) {
5775 * During soft-reset CAMRAM register reads might
5776 * return 0xbad0bad0. So retry for MAX of 2 sec
5777 * while reading camram registers.
5779 if (time_after(jiffies, retry_max_time)) {
5780 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5781 "Failure to read CAMRAM register. "
5782 "data=0x%x.\n", *data);
5783 return QLA_FUNCTION_FAILED;
5785 msleep(100);
5786 goto retry_rd_reg;
5788 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5791 return rval;
5795 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5797 int rval;
5798 mbx_cmd_t mc;
5799 mbx_cmd_t *mcp = &mc;
5800 struct qla_hw_data *ha = vha->hw;
5802 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5803 return QLA_FUNCTION_FAILED;
5805 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5807 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5808 mcp->out_mb = MBX_0;
5809 mcp->in_mb = MBX_1|MBX_0;
5810 mcp->tov = MBX_TOV_SECONDS;
5811 mcp->flags = 0;
5812 rval = qla2x00_mailbox_command(vha, mcp);
5814 if (rval != QLA_SUCCESS) {
5815 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5816 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5817 rval, mcp->mb[0], mcp->mb[1]);
5818 ha->isp_ops->fw_dump(vha, 0);
5819 } else {
5820 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5823 return rval;
5827 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5828 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5830 int rval;
5831 mbx_cmd_t mc;
5832 mbx_cmd_t *mcp = &mc;
5833 uint8_t subcode = (uint8_t)options;
5834 struct qla_hw_data *ha = vha->hw;
5836 if (!IS_QLA8031(ha))
5837 return QLA_FUNCTION_FAILED;
5839 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5841 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5842 mcp->mb[1] = options;
5843 mcp->out_mb = MBX_1|MBX_0;
5844 if (subcode & BIT_2) {
5845 mcp->mb[2] = LSW(start_addr);
5846 mcp->mb[3] = MSW(start_addr);
5847 mcp->mb[4] = LSW(end_addr);
5848 mcp->mb[5] = MSW(end_addr);
5849 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5851 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5852 if (!(subcode & (BIT_2 | BIT_5)))
5853 mcp->in_mb |= MBX_4|MBX_3;
5854 mcp->tov = MBX_TOV_SECONDS;
5855 mcp->flags = 0;
5856 rval = qla2x00_mailbox_command(vha, mcp);
5858 if (rval != QLA_SUCCESS) {
5859 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5860 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5861 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5862 mcp->mb[4]);
5863 ha->isp_ops->fw_dump(vha, 0);
5864 } else {
5865 if (subcode & BIT_5)
5866 *sector_size = mcp->mb[1];
5867 else if (subcode & (BIT_6 | BIT_7)) {
5868 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5869 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5870 } else if (subcode & (BIT_3 | BIT_4)) {
5871 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5872 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5874 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5877 return rval;
5881 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5882 uint32_t size)
5884 int rval;
5885 mbx_cmd_t mc;
5886 mbx_cmd_t *mcp = &mc;
5888 if (!IS_MCTP_CAPABLE(vha->hw))
5889 return QLA_FUNCTION_FAILED;
5891 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5892 "Entered %s.\n", __func__);
5894 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5895 mcp->mb[1] = LSW(addr);
5896 mcp->mb[2] = MSW(req_dma);
5897 mcp->mb[3] = LSW(req_dma);
5898 mcp->mb[4] = MSW(size);
5899 mcp->mb[5] = LSW(size);
5900 mcp->mb[6] = MSW(MSD(req_dma));
5901 mcp->mb[7] = LSW(MSD(req_dma));
5902 mcp->mb[8] = MSW(addr);
5903 /* Setting RAM ID to valid */
5904 mcp->mb[10] |= BIT_7;
5905 /* For MCTP RAM ID is 0x40 */
5906 mcp->mb[10] |= 0x40;
5908 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5909 MBX_0;
5911 mcp->in_mb = MBX_0;
5912 mcp->tov = MBX_TOV_SECONDS;
5913 mcp->flags = 0;
5914 rval = qla2x00_mailbox_command(vha, mcp);
5916 if (rval != QLA_SUCCESS) {
5917 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5918 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5919 } else {
5920 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5921 "Done %s.\n", __func__);
5924 return rval;
5928 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5929 void *dd_buf, uint size, uint options)
5931 int rval;
5932 mbx_cmd_t mc;
5933 mbx_cmd_t *mcp = &mc;
5934 dma_addr_t dd_dma;
5936 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
5937 return QLA_FUNCTION_FAILED;
5939 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
5940 "Entered %s.\n", __func__);
5942 dd_dma = dma_map_single(&vha->hw->pdev->dev,
5943 dd_buf, size, DMA_FROM_DEVICE);
5944 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
5945 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
5946 return QLA_MEMORY_ALLOC_FAILED;
5949 memset(dd_buf, 0, size);
5951 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
5952 mcp->mb[1] = options;
5953 mcp->mb[2] = MSW(LSD(dd_dma));
5954 mcp->mb[3] = LSW(LSD(dd_dma));
5955 mcp->mb[6] = MSW(MSD(dd_dma));
5956 mcp->mb[7] = LSW(MSD(dd_dma));
5957 mcp->mb[8] = size;
5958 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5959 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5960 mcp->buf_size = size;
5961 mcp->flags = MBX_DMA_IN;
5962 mcp->tov = MBX_TOV_SECONDS * 4;
5963 rval = qla2x00_mailbox_command(vha, mcp);
5965 if (rval != QLA_SUCCESS) {
5966 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
5967 } else {
5968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
5969 "Done %s.\n", __func__);
5972 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
5973 size, DMA_FROM_DEVICE);
5975 return rval;
5978 static void qla2x00_async_mb_sp_done(void *s, int res)
5980 struct srb *sp = s;
5982 sp->u.iocb_cmd.u.mbx.rc = res;
5984 complete(&sp->u.iocb_cmd.u.mbx.comp);
5985 /* don't free sp here. Let the caller do the free */
5989 * This mailbox uses the iocb interface to send MB command.
5990 * This allows non-critial (non chip setup) command to go
5991 * out in parrallel.
5993 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
5995 int rval = QLA_FUNCTION_FAILED;
5996 srb_t *sp;
5997 struct srb_iocb *c;
5999 if (!vha->hw->flags.fw_started)
6000 goto done;
6002 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6003 if (!sp)
6004 goto done;
6006 sp->type = SRB_MB_IOCB;
6007 sp->name = mb_to_str(mcp->mb[0]);
6009 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6011 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6013 c = &sp->u.iocb_cmd;
6014 c->timeout = qla2x00_async_iocb_timeout;
6015 init_completion(&c->u.mbx.comp);
6017 sp->done = qla2x00_async_mb_sp_done;
6019 rval = qla2x00_start_sp(sp);
6020 if (rval != QLA_SUCCESS) {
6021 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6022 "%s: %s Failed submission. %x.\n",
6023 __func__, sp->name, rval);
6024 goto done_free_sp;
6027 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6028 sp->name, sp->handle);
6030 wait_for_completion(&c->u.mbx.comp);
6031 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6033 rval = c->u.mbx.rc;
6034 switch (rval) {
6035 case QLA_FUNCTION_TIMEOUT:
6036 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6037 __func__, sp->name, rval);
6038 break;
6039 case QLA_SUCCESS:
6040 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6041 __func__, sp->name);
6042 sp->free(sp);
6043 break;
6044 default:
6045 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6046 __func__, sp->name, rval);
6047 sp->free(sp);
6048 break;
6051 return rval;
6053 done_free_sp:
6054 sp->free(sp);
6055 done:
6056 return rval;
6060 * qla24xx_gpdb_wait
6061 * NOTE: Do not call this routine from DPC thread
6063 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6065 int rval = QLA_FUNCTION_FAILED;
6066 dma_addr_t pd_dma;
6067 struct port_database_24xx *pd;
6068 struct qla_hw_data *ha = vha->hw;
6069 mbx_cmd_t mc;
6071 if (!vha->hw->flags.fw_started)
6072 goto done;
6074 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6075 if (pd == NULL) {
6076 ql_log(ql_log_warn, vha, 0xd047,
6077 "Failed to allocate port database structure.\n");
6078 goto done_free_sp;
6081 memset(&mc, 0, sizeof(mc));
6082 mc.mb[0] = MBC_GET_PORT_DATABASE;
6083 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6084 mc.mb[2] = MSW(pd_dma);
6085 mc.mb[3] = LSW(pd_dma);
6086 mc.mb[6] = MSW(MSD(pd_dma));
6087 mc.mb[7] = LSW(MSD(pd_dma));
6088 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6089 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6091 rval = qla24xx_send_mb_cmd(vha, &mc);
6092 if (rval != QLA_SUCCESS) {
6093 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6094 "%s: %8phC fail\n", __func__, fcport->port_name);
6095 goto done_free_sp;
6098 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6100 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6101 __func__, fcport->port_name);
6103 done_free_sp:
6104 if (pd)
6105 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6106 done:
6107 return rval;
6110 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6111 struct port_database_24xx *pd)
6113 int rval = QLA_SUCCESS;
6114 uint64_t zero = 0;
6115 u8 current_login_state, last_login_state;
6117 if (fcport->fc4f_nvme) {
6118 current_login_state = pd->current_login_state >> 4;
6119 last_login_state = pd->last_login_state >> 4;
6120 } else {
6121 current_login_state = pd->current_login_state & 0xf;
6122 last_login_state = pd->last_login_state & 0xf;
6125 /* Check for logged in state. */
6126 if (current_login_state != PDS_PRLI_COMPLETE) {
6127 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6128 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6129 current_login_state, last_login_state, fcport->loop_id);
6130 rval = QLA_FUNCTION_FAILED;
6131 goto gpd_error_out;
6134 if (fcport->loop_id == FC_NO_LOOP_ID ||
6135 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6136 memcmp(fcport->port_name, pd->port_name, 8))) {
6137 /* We lost the device mid way. */
6138 rval = QLA_NOT_LOGGED_IN;
6139 goto gpd_error_out;
6142 /* Names are little-endian. */
6143 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6144 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6146 /* Get port_id of device. */
6147 fcport->d_id.b.domain = pd->port_id[0];
6148 fcport->d_id.b.area = pd->port_id[1];
6149 fcport->d_id.b.al_pa = pd->port_id[2];
6150 fcport->d_id.b.rsvd_1 = 0;
6152 if (fcport->fc4f_nvme) {
6153 fcport->nvme_prli_service_param =
6154 pd->prli_nvme_svc_param_word_3;
6155 fcport->port_type = FCT_NVME;
6156 } else {
6157 /* If not target must be initiator or unknown type. */
6158 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6159 fcport->port_type = FCT_INITIATOR;
6160 else
6161 fcport->port_type = FCT_TARGET;
6163 /* Passback COS information. */
6164 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6165 FC_COS_CLASS2 : FC_COS_CLASS3;
6167 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6168 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6169 fcport->conf_compl_supported = 1;
6172 gpd_error_out:
6173 return rval;
6177 * qla24xx_gidlist__wait
6178 * NOTE: don't call this routine from DPC thread.
6180 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6181 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6183 int rval = QLA_FUNCTION_FAILED;
6184 mbx_cmd_t mc;
6186 if (!vha->hw->flags.fw_started)
6187 goto done;
6189 memset(&mc, 0, sizeof(mc));
6190 mc.mb[0] = MBC_GET_ID_LIST;
6191 mc.mb[2] = MSW(id_list_dma);
6192 mc.mb[3] = LSW(id_list_dma);
6193 mc.mb[6] = MSW(MSD(id_list_dma));
6194 mc.mb[7] = LSW(MSD(id_list_dma));
6195 mc.mb[8] = 0;
6196 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6198 rval = qla24xx_send_mb_cmd(vha, &mc);
6199 if (rval != QLA_SUCCESS) {
6200 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6201 "%s: fail\n", __func__);
6202 } else {
6203 *entries = mc.mb[1];
6204 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6205 "%s: done\n", __func__);
6207 done:
6208 return rval;
6211 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6213 int rval;
6214 mbx_cmd_t mc;
6215 mbx_cmd_t *mcp = &mc;
6217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6218 "Entered %s\n", __func__);
6220 memset(mcp->mb, 0 , sizeof(mcp->mb));
6221 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6222 mcp->mb[1] = cpu_to_le16(1);
6223 mcp->mb[2] = cpu_to_le16(value);
6224 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6225 mcp->in_mb = MBX_2 | MBX_0;
6226 mcp->tov = MBX_TOV_SECONDS;
6227 mcp->flags = 0;
6229 rval = qla2x00_mailbox_command(vha, mcp);
6231 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6232 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6234 return rval;
6237 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6239 int rval;
6240 mbx_cmd_t mc;
6241 mbx_cmd_t *mcp = &mc;
6243 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6244 "Entered %s\n", __func__);
6246 memset(mcp->mb, 0, sizeof(mcp->mb));
6247 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6248 mcp->mb[1] = cpu_to_le16(0);
6249 mcp->out_mb = MBX_1 | MBX_0;
6250 mcp->in_mb = MBX_2 | MBX_0;
6251 mcp->tov = MBX_TOV_SECONDS;
6252 mcp->flags = 0;
6254 rval = qla2x00_mailbox_command(vha, mcp);
6255 if (rval == QLA_SUCCESS)
6256 *value = mc.mb[2];
6258 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6259 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6261 return rval;
6265 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6267 struct qla_hw_data *ha = vha->hw;
6268 uint16_t iter, addr, offset;
6269 dma_addr_t phys_addr;
6270 int rval, c;
6271 u8 *sfp_data;
6273 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6274 addr = 0xa0;
6275 phys_addr = ha->sfp_data_dma;
6276 sfp_data = ha->sfp_data;
6277 offset = c = 0;
6279 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6280 if (iter == 4) {
6281 /* Skip to next device address. */
6282 addr = 0xa2;
6283 offset = 0;
6286 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6287 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6288 if (rval != QLA_SUCCESS) {
6289 ql_log(ql_log_warn, vha, 0x706d,
6290 "Unable to read SFP data (%x/%x/%x).\n", rval,
6291 addr, offset);
6293 return rval;
6296 if (buf && (c < count)) {
6297 u16 sz;
6299 if ((count - c) >= SFP_BLOCK_SIZE)
6300 sz = SFP_BLOCK_SIZE;
6301 else
6302 sz = count - c;
6304 memcpy(buf, sfp_data, sz);
6305 buf += SFP_BLOCK_SIZE;
6306 c += sz;
6308 phys_addr += SFP_BLOCK_SIZE;
6309 sfp_data += SFP_BLOCK_SIZE;
6310 offset += SFP_BLOCK_SIZE;
6313 return rval;
6316 int qla24xx_res_count_wait(struct scsi_qla_host *vha,
6317 uint16_t *out_mb, int out_mb_sz)
6319 int rval = QLA_FUNCTION_FAILED;
6320 mbx_cmd_t mc;
6322 if (!vha->hw->flags.fw_started)
6323 goto done;
6325 memset(&mc, 0, sizeof(mc));
6326 mc.mb[0] = MBC_GET_RESOURCE_COUNTS;
6328 rval = qla24xx_send_mb_cmd(vha, &mc);
6329 if (rval != QLA_SUCCESS) {
6330 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6331 "%s: fail\n", __func__);
6332 } else {
6333 if (out_mb_sz <= SIZEOF_IOCB_MB_REG)
6334 memcpy(out_mb, mc.mb, out_mb_sz);
6335 else
6336 memcpy(out_mb, mc.mb, SIZEOF_IOCB_MB_REG);
6338 ql_dbg(ql_dbg_mbx, vha, 0xffff,
6339 "%s: done\n", __func__);
6341 done:
6342 return rval;