mei: me: add cannon point device ids
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_mbx.c
blobcb717d47339f6546be17a3066e00741936fd4cd3
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/gfp.h>
13 static struct mb_cmd_name {
14 uint16_t cmd;
15 const char *str;
16 } mb_str[] = {
17 {MBC_GET_PORT_DATABASE, "GPDB"},
18 {MBC_GET_ID_LIST, "GIDList"},
19 {MBC_GET_LINK_PRIV_STATS, "Stats"},
22 static const char *mb_to_str(uint16_t cmd)
24 int i;
25 struct mb_cmd_name *e;
27 for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
28 e = mb_str + i;
29 if (cmd == e->cmd)
30 return e->str;
32 return "unknown";
35 static struct rom_cmd {
36 uint16_t cmd;
37 } rom_cmds[] = {
38 { MBC_LOAD_RAM },
39 { MBC_EXECUTE_FIRMWARE },
40 { MBC_READ_RAM_WORD },
41 { MBC_MAILBOX_REGISTER_TEST },
42 { MBC_VERIFY_CHECKSUM },
43 { MBC_GET_FIRMWARE_VERSION },
44 { MBC_LOAD_RISC_RAM },
45 { MBC_DUMP_RISC_RAM },
46 { MBC_LOAD_RISC_RAM_EXTENDED },
47 { MBC_DUMP_RISC_RAM_EXTENDED },
48 { MBC_WRITE_RAM_WORD_EXTENDED },
49 { MBC_READ_RAM_EXTENDED },
50 { MBC_GET_RESOURCE_COUNTS },
51 { MBC_SET_FIRMWARE_OPTION },
52 { MBC_MID_INITIALIZE_FIRMWARE },
53 { MBC_GET_FIRMWARE_STATE },
54 { MBC_GET_MEM_OFFLOAD_CNTRL_STAT },
55 { MBC_GET_RETRY_COUNT },
56 { MBC_TRACE_CONTROL },
57 { MBC_INITIALIZE_MULTIQ },
58 { MBC_IOCB_COMMAND_A64 },
59 { MBC_GET_ADAPTER_LOOP_ID },
60 { MBC_READ_SFP },
63 static int is_rom_cmd(uint16_t cmd)
65 int i;
66 struct rom_cmd *wc;
68 for (i = 0; i < ARRAY_SIZE(rom_cmds); i++) {
69 wc = rom_cmds + i;
70 if (wc->cmd == cmd)
71 return 1;
74 return 0;
78 * qla2x00_mailbox_command
79 * Issue mailbox command and waits for completion.
81 * Input:
82 * ha = adapter block pointer.
83 * mcp = driver internal mbx struct pointer.
85 * Output:
86 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
88 * Returns:
89 * 0 : QLA_SUCCESS = cmd performed success
90 * 1 : QLA_FUNCTION_FAILED (error encountered)
91 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
93 * Context:
94 * Kernel context.
96 static int
97 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
99 int rval, i;
100 unsigned long flags = 0;
101 device_reg_t *reg;
102 uint8_t abort_active;
103 uint8_t io_lock_on;
104 uint16_t command = 0;
105 uint16_t *iptr;
106 uint16_t __iomem *optr;
107 uint32_t cnt;
108 uint32_t mboxes;
109 unsigned long wait_time;
110 struct qla_hw_data *ha = vha->hw;
111 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
114 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
116 if (ha->pdev->error_state > pci_channel_io_frozen) {
117 ql_log(ql_log_warn, vha, 0x1001,
118 "error_state is greater than pci_channel_io_frozen, "
119 "exiting.\n");
120 return QLA_FUNCTION_TIMEOUT;
123 if (vha->device_flags & DFLG_DEV_FAILED) {
124 ql_log(ql_log_warn, vha, 0x1002,
125 "Device in failed state, exiting.\n");
126 return QLA_FUNCTION_TIMEOUT;
129 /* if PCI error, then avoid mbx processing.*/
130 if (test_bit(PFLG_DISCONNECTED, &base_vha->dpc_flags) &&
131 test_bit(UNLOADING, &base_vha->dpc_flags)) {
132 ql_log(ql_log_warn, vha, 0xd04e,
133 "PCI error, exiting.\n");
134 return QLA_FUNCTION_TIMEOUT;
137 reg = ha->iobase;
138 io_lock_on = base_vha->flags.init_done;
140 rval = QLA_SUCCESS;
141 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
144 if (ha->flags.pci_channel_io_perm_failure) {
145 ql_log(ql_log_warn, vha, 0x1003,
146 "Perm failure on EEH timeout MBX, exiting.\n");
147 return QLA_FUNCTION_TIMEOUT;
150 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
151 /* Setting Link-Down error */
152 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
153 ql_log(ql_log_warn, vha, 0x1004,
154 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
155 return QLA_FUNCTION_TIMEOUT;
158 /* check if ISP abort is active and return cmd with timeout */
159 if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
160 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
161 test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) &&
162 !is_rom_cmd(mcp->mb[0])) {
163 ql_log(ql_log_info, vha, 0x1005,
164 "Cmd 0x%x aborted with timeout since ISP Abort is pending\n",
165 mcp->mb[0]);
166 return QLA_FUNCTION_TIMEOUT;
170 * Wait for active mailbox commands to finish by waiting at most tov
171 * seconds. This is to serialize actual issuing of mailbox cmds during
172 * non ISP abort time.
174 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
175 /* Timeout occurred. Return error. */
176 ql_log(ql_log_warn, vha, 0xd035,
177 "Cmd access timeout, cmd=0x%x, Exiting.\n",
178 mcp->mb[0]);
179 return QLA_FUNCTION_TIMEOUT;
182 ha->flags.mbox_busy = 1;
183 /* Save mailbox command for debug */
184 ha->mcp = mcp;
186 ql_dbg(ql_dbg_mbx, vha, 0x1006,
187 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
189 spin_lock_irqsave(&ha->hardware_lock, flags);
191 /* Load mailbox registers. */
192 if (IS_P3P_TYPE(ha))
193 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
194 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
195 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
196 else
197 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
199 iptr = mcp->mb;
200 command = mcp->mb[0];
201 mboxes = mcp->out_mb;
203 ql_dbg(ql_dbg_mbx, vha, 0x1111,
204 "Mailbox registers (OUT):\n");
205 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
206 if (IS_QLA2200(ha) && cnt == 8)
207 optr =
208 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
209 if (mboxes & BIT_0) {
210 ql_dbg(ql_dbg_mbx, vha, 0x1112,
211 "mbox[%d]<-0x%04x\n", cnt, *iptr);
212 WRT_REG_WORD(optr, *iptr);
215 mboxes >>= 1;
216 optr++;
217 iptr++;
220 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
221 "I/O Address = %p.\n", optr);
223 /* Issue set host interrupt command to send cmd out. */
224 ha->flags.mbox_int = 0;
225 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
227 /* Unlock mbx registers and wait for interrupt */
228 ql_dbg(ql_dbg_mbx, vha, 0x100f,
229 "Going to unlock irq & waiting for interrupts. "
230 "jiffies=%lx.\n", jiffies);
232 /* Wait for mbx cmd completion until timeout */
234 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
235 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
237 if (IS_P3P_TYPE(ha)) {
238 if (RD_REG_DWORD(&reg->isp82.hint) &
239 HINT_MBX_INT_PENDING) {
240 spin_unlock_irqrestore(&ha->hardware_lock,
241 flags);
242 ha->flags.mbox_busy = 0;
243 ql_dbg(ql_dbg_mbx, vha, 0x1010,
244 "Pending mailbox timeout, exiting.\n");
245 rval = QLA_FUNCTION_TIMEOUT;
246 goto premature_exit;
248 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
249 } else if (IS_FWI2_CAPABLE(ha))
250 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
251 else
252 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
253 spin_unlock_irqrestore(&ha->hardware_lock, flags);
255 wait_time = jiffies;
256 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
257 mcp->tov * HZ)) {
258 ql_dbg(ql_dbg_mbx, vha, 0x117a,
259 "cmd=%x Timeout.\n", command);
260 spin_lock_irqsave(&ha->hardware_lock, flags);
261 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
262 spin_unlock_irqrestore(&ha->hardware_lock, flags);
264 if (time_after(jiffies, wait_time + 5 * HZ))
265 ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n",
266 command, jiffies_to_msecs(jiffies - wait_time));
267 } else {
268 ql_dbg(ql_dbg_mbx, vha, 0x1011,
269 "Cmd=%x Polling Mode.\n", command);
271 if (IS_P3P_TYPE(ha)) {
272 if (RD_REG_DWORD(&reg->isp82.hint) &
273 HINT_MBX_INT_PENDING) {
274 spin_unlock_irqrestore(&ha->hardware_lock,
275 flags);
276 ha->flags.mbox_busy = 0;
277 ql_dbg(ql_dbg_mbx, vha, 0x1012,
278 "Pending mailbox timeout, exiting.\n");
279 rval = QLA_FUNCTION_TIMEOUT;
280 goto premature_exit;
282 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
283 } else if (IS_FWI2_CAPABLE(ha))
284 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
285 else
286 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
287 spin_unlock_irqrestore(&ha->hardware_lock, flags);
289 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
290 while (!ha->flags.mbox_int) {
291 if (time_after(jiffies, wait_time))
292 break;
294 /* Check for pending interrupts. */
295 qla2x00_poll(ha->rsp_q_map[0]);
297 if (!ha->flags.mbox_int &&
298 !(IS_QLA2200(ha) &&
299 command == MBC_LOAD_RISC_RAM_EXTENDED))
300 msleep(10);
301 } /* while */
302 ql_dbg(ql_dbg_mbx, vha, 0x1013,
303 "Waited %d sec.\n",
304 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
307 /* Check whether we timed out */
308 if (ha->flags.mbox_int) {
309 uint16_t *iptr2;
311 ql_dbg(ql_dbg_mbx, vha, 0x1014,
312 "Cmd=%x completed.\n", command);
314 /* Got interrupt. Clear the flag. */
315 ha->flags.mbox_int = 0;
316 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
318 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
319 ha->flags.mbox_busy = 0;
320 /* Setting Link-Down error */
321 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
322 ha->mcp = NULL;
323 rval = QLA_FUNCTION_FAILED;
324 ql_log(ql_log_warn, vha, 0xd048,
325 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
326 goto premature_exit;
329 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
330 rval = QLA_FUNCTION_FAILED;
332 /* Load return mailbox registers. */
333 iptr2 = mcp->mb;
334 iptr = (uint16_t *)&ha->mailbox_out[0];
335 mboxes = mcp->in_mb;
337 ql_dbg(ql_dbg_mbx, vha, 0x1113,
338 "Mailbox registers (IN):\n");
339 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
340 if (mboxes & BIT_0) {
341 *iptr2 = *iptr;
342 ql_dbg(ql_dbg_mbx, vha, 0x1114,
343 "mbox[%d]->0x%04x\n", cnt, *iptr2);
346 mboxes >>= 1;
347 iptr2++;
348 iptr++;
350 } else {
352 uint16_t mb[8];
353 uint32_t ictrl, host_status, hccr;
354 uint16_t w;
356 if (IS_FWI2_CAPABLE(ha)) {
357 mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
358 mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
359 mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
360 mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
361 mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
362 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
363 host_status = RD_REG_DWORD(&reg->isp24.host_status);
364 hccr = RD_REG_DWORD(&reg->isp24.hccr);
366 ql_log(ql_log_warn, vha, 0xd04c,
367 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
368 "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
369 command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
370 mb[7], host_status, hccr);
372 } else {
373 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
374 ictrl = RD_REG_WORD(&reg->isp.ictrl);
375 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
376 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
377 "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
379 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
381 /* Capture FW dump only, if PCI device active */
382 if (!pci_channel_offline(vha->hw->pdev)) {
383 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
384 if (w == 0xffff || ictrl == 0xffffffff) {
385 /* This is special case if there is unload
386 * of driver happening and if PCI device go
387 * into bad state due to PCI error condition
388 * then only PCI ERR flag would be set.
389 * we will do premature exit for above case.
391 ha->flags.mbox_busy = 0;
392 rval = QLA_FUNCTION_TIMEOUT;
393 goto premature_exit;
396 /* Attempt to capture firmware dump for further
397 * anallysis of the current formware state. we do not
398 * need to do this if we are intentionally generating
399 * a dump
401 if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR)
402 ha->isp_ops->fw_dump(vha, 0);
403 rval = QLA_FUNCTION_TIMEOUT;
407 ha->flags.mbox_busy = 0;
409 /* Clean up */
410 ha->mcp = NULL;
412 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
413 ql_dbg(ql_dbg_mbx, vha, 0x101a,
414 "Checking for additional resp interrupt.\n");
416 /* polling mode for non isp_abort commands. */
417 qla2x00_poll(ha->rsp_q_map[0]);
420 if (rval == QLA_FUNCTION_TIMEOUT &&
421 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
422 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
423 ha->flags.eeh_busy) {
424 /* not in dpc. schedule it for dpc to take over. */
425 ql_dbg(ql_dbg_mbx, vha, 0x101b,
426 "Timeout, schedule isp_abort_needed.\n");
428 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
429 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
430 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
431 if (IS_QLA82XX(ha)) {
432 ql_dbg(ql_dbg_mbx, vha, 0x112a,
433 "disabling pause transmit on port "
434 "0 & 1.\n");
435 qla82xx_wr_32(ha,
436 QLA82XX_CRB_NIU + 0x98,
437 CRB_NIU_XG_PAUSE_CTL_P0|
438 CRB_NIU_XG_PAUSE_CTL_P1);
440 ql_log(ql_log_info, base_vha, 0x101c,
441 "Mailbox cmd timeout occurred, cmd=0x%x, "
442 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
443 "abort.\n", command, mcp->mb[0],
444 ha->flags.eeh_busy);
445 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
446 qla2xxx_wake_dpc(vha);
448 } else if (!abort_active) {
449 /* call abort directly since we are in the DPC thread */
450 ql_dbg(ql_dbg_mbx, vha, 0x101d,
451 "Timeout, calling abort_isp.\n");
453 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
454 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
455 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
456 if (IS_QLA82XX(ha)) {
457 ql_dbg(ql_dbg_mbx, vha, 0x112b,
458 "disabling pause transmit on port "
459 "0 & 1.\n");
460 qla82xx_wr_32(ha,
461 QLA82XX_CRB_NIU + 0x98,
462 CRB_NIU_XG_PAUSE_CTL_P0|
463 CRB_NIU_XG_PAUSE_CTL_P1);
465 ql_log(ql_log_info, base_vha, 0x101e,
466 "Mailbox cmd timeout occurred, cmd=0x%x, "
467 "mb[0]=0x%x. Scheduling ISP abort ",
468 command, mcp->mb[0]);
469 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
470 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
471 /* Allow next mbx cmd to come in. */
472 complete(&ha->mbx_cmd_comp);
473 if (ha->isp_ops->abort_isp(vha)) {
474 /* Failed. retry later. */
475 set_bit(ISP_ABORT_NEEDED,
476 &vha->dpc_flags);
478 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
479 ql_dbg(ql_dbg_mbx, vha, 0x101f,
480 "Finished abort_isp.\n");
481 goto mbx_done;
486 premature_exit:
487 /* Allow next mbx cmd to come in. */
488 complete(&ha->mbx_cmd_comp);
490 mbx_done:
491 if (rval) {
492 if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) {
493 pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR,
494 dev_name(&ha->pdev->dev), 0x1020+0x800,
495 vha->host_no);
496 mboxes = mcp->in_mb;
497 cnt = 4;
498 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
499 if (mboxes & BIT_0) {
500 printk(" mb[%u]=%x", i, mcp->mb[i]);
501 cnt--;
503 pr_warn(" cmd=%x ****\n", command);
505 ql_dbg(ql_dbg_mbx, vha, 0x1198,
506 "host_status=%#x intr_ctrl=%#x intr_status=%#x\n",
507 RD_REG_DWORD(&reg->isp24.host_status),
508 RD_REG_DWORD(&reg->isp24.ictrl),
509 RD_REG_DWORD(&reg->isp24.istatus));
510 } else {
511 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
514 return rval;
518 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
519 uint32_t risc_code_size)
521 int rval;
522 struct qla_hw_data *ha = vha->hw;
523 mbx_cmd_t mc;
524 mbx_cmd_t *mcp = &mc;
526 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
527 "Entered %s.\n", __func__);
529 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
530 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
531 mcp->mb[8] = MSW(risc_addr);
532 mcp->out_mb = MBX_8|MBX_0;
533 } else {
534 mcp->mb[0] = MBC_LOAD_RISC_RAM;
535 mcp->out_mb = MBX_0;
537 mcp->mb[1] = LSW(risc_addr);
538 mcp->mb[2] = MSW(req_dma);
539 mcp->mb[3] = LSW(req_dma);
540 mcp->mb[6] = MSW(MSD(req_dma));
541 mcp->mb[7] = LSW(MSD(req_dma));
542 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
543 if (IS_FWI2_CAPABLE(ha)) {
544 mcp->mb[4] = MSW(risc_code_size);
545 mcp->mb[5] = LSW(risc_code_size);
546 mcp->out_mb |= MBX_5|MBX_4;
547 } else {
548 mcp->mb[4] = LSW(risc_code_size);
549 mcp->out_mb |= MBX_4;
552 mcp->in_mb = MBX_0;
553 mcp->tov = MBX_TOV_SECONDS;
554 mcp->flags = 0;
555 rval = qla2x00_mailbox_command(vha, mcp);
557 if (rval != QLA_SUCCESS) {
558 ql_dbg(ql_dbg_mbx, vha, 0x1023,
559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
560 } else {
561 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
562 "Done %s.\n", __func__);
565 return rval;
568 #define EXTENDED_BB_CREDITS BIT_0
569 #define NVME_ENABLE_FLAG BIT_3
570 static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
572 uint16_t mb4 = BIT_0;
574 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
575 mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
577 return mb4;
580 static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
582 uint16_t mb4 = BIT_0;
584 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
585 struct nvram_81xx *nv = ha->nvram;
587 mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
590 return mb4;
594 * qla2x00_execute_fw
595 * Start adapter firmware.
597 * Input:
598 * ha = adapter block pointer.
599 * TARGET_QUEUE_LOCK must be released.
600 * ADAPTER_STATE_LOCK must be released.
602 * Returns:
603 * qla2x00 local function return status code.
605 * Context:
606 * Kernel context.
609 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
611 int rval;
612 struct qla_hw_data *ha = vha->hw;
613 mbx_cmd_t mc;
614 mbx_cmd_t *mcp = &mc;
616 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
617 "Entered %s.\n", __func__);
619 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
620 mcp->out_mb = MBX_0;
621 mcp->in_mb = MBX_0;
622 if (IS_FWI2_CAPABLE(ha)) {
623 mcp->mb[1] = MSW(risc_addr);
624 mcp->mb[2] = LSW(risc_addr);
625 mcp->mb[3] = 0;
626 mcp->mb[4] = 0;
627 ha->flags.using_lr_setting = 0;
628 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
629 IS_QLA27XX(ha)) {
630 if (ql2xautodetectsfp) {
631 if (ha->flags.detected_lr_sfp) {
632 mcp->mb[4] |=
633 qla25xx_set_sfp_lr_dist(ha);
634 ha->flags.using_lr_setting = 1;
636 } else {
637 struct nvram_81xx *nv = ha->nvram;
638 /* set LR distance if specified in nvram */
639 if (nv->enhanced_features &
640 NEF_LR_DIST_ENABLE) {
641 mcp->mb[4] |=
642 qla25xx_set_nvr_lr_dist(ha);
643 ha->flags.using_lr_setting = 1;
648 if (ql2xnvmeenable && IS_QLA27XX(ha))
649 mcp->mb[4] |= NVME_ENABLE_FLAG;
651 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
652 struct nvram_81xx *nv = ha->nvram;
653 /* set minimum speed if specified in nvram */
654 if (nv->min_link_speed >= 2 &&
655 nv->min_link_speed <= 5) {
656 mcp->mb[4] |= BIT_4;
657 mcp->mb[11] = nv->min_link_speed;
658 mcp->out_mb |= MBX_11;
659 mcp->in_mb |= BIT_5;
660 vha->min_link_speed_feat = nv->min_link_speed;
664 if (ha->flags.exlogins_enabled)
665 mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
667 if (ha->flags.exchoffld_enabled)
668 mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
670 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
671 mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
672 } else {
673 mcp->mb[1] = LSW(risc_addr);
674 mcp->out_mb |= MBX_1;
675 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
676 mcp->mb[2] = 0;
677 mcp->out_mb |= MBX_2;
681 mcp->tov = MBX_TOV_SECONDS;
682 mcp->flags = 0;
683 rval = qla2x00_mailbox_command(vha, mcp);
685 if (rval != QLA_SUCCESS) {
686 ql_dbg(ql_dbg_mbx, vha, 0x1026,
687 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
688 } else {
689 if (IS_FWI2_CAPABLE(ha)) {
690 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
691 ql_dbg(ql_dbg_mbx, vha, 0x119a,
692 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
693 ql_dbg(ql_dbg_mbx, vha, 0x1027,
694 "exchanges=%x.\n", mcp->mb[1]);
695 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
696 ha->max_speed_sup = mcp->mb[2] & BIT_0;
697 ql_dbg(ql_dbg_mbx, vha, 0x119b,
698 "Maximum speed supported=%s.\n",
699 ha->max_speed_sup ? "32Gps" : "16Gps");
700 if (vha->min_link_speed_feat) {
701 ha->min_link_speed = mcp->mb[5];
702 ql_dbg(ql_dbg_mbx, vha, 0x119c,
703 "Minimum speed set=%s.\n",
704 mcp->mb[5] == 5 ? "32Gps" :
705 mcp->mb[5] == 4 ? "16Gps" :
706 mcp->mb[5] == 3 ? "8Gps" :
707 mcp->mb[5] == 2 ? "4Gps" :
708 "unknown");
712 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
713 "Done.\n");
716 return rval;
720 * qla_get_exlogin_status
721 * Get extended login status
722 * uses the memory offload control/status Mailbox
724 * Input:
725 * ha: adapter state pointer.
726 * fwopt: firmware options
728 * Returns:
729 * qla2x00 local function status
731 * Context:
732 * Kernel context.
734 #define FETCH_XLOGINS_STAT 0x8
736 qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
737 uint16_t *ex_logins_cnt)
739 int rval;
740 mbx_cmd_t mc;
741 mbx_cmd_t *mcp = &mc;
743 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118f,
744 "Entered %s\n", __func__);
746 memset(mcp->mb, 0 , sizeof(mcp->mb));
747 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
748 mcp->mb[1] = FETCH_XLOGINS_STAT;
749 mcp->out_mb = MBX_1|MBX_0;
750 mcp->in_mb = MBX_10|MBX_4|MBX_0;
751 mcp->tov = MBX_TOV_SECONDS;
752 mcp->flags = 0;
754 rval = qla2x00_mailbox_command(vha, mcp);
755 if (rval != QLA_SUCCESS) {
756 ql_dbg(ql_dbg_mbx, vha, 0x1115, "Failed=%x.\n", rval);
757 } else {
758 *buf_sz = mcp->mb[4];
759 *ex_logins_cnt = mcp->mb[10];
761 ql_log(ql_log_info, vha, 0x1190,
762 "buffer size 0x%x, exchange login count=%d\n",
763 mcp->mb[4], mcp->mb[10]);
765 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1116,
766 "Done %s.\n", __func__);
769 return rval;
773 * qla_set_exlogin_mem_cfg
774 * set extended login memory configuration
775 * Mbx needs to be issues before init_cb is set
777 * Input:
778 * ha: adapter state pointer.
779 * buffer: buffer pointer
780 * phys_addr: physical address of buffer
781 * size: size of buffer
782 * TARGET_QUEUE_LOCK must be released
783 * ADAPTER_STATE_LOCK must be release
785 * Returns:
786 * qla2x00 local funxtion status code.
788 * Context:
789 * Kernel context.
791 #define CONFIG_XLOGINS_MEM 0x3
793 qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
795 int rval;
796 mbx_cmd_t mc;
797 mbx_cmd_t *mcp = &mc;
798 struct qla_hw_data *ha = vha->hw;
800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
801 "Entered %s.\n", __func__);
803 memset(mcp->mb, 0 , sizeof(mcp->mb));
804 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
805 mcp->mb[1] = CONFIG_XLOGINS_MEM;
806 mcp->mb[2] = MSW(phys_addr);
807 mcp->mb[3] = LSW(phys_addr);
808 mcp->mb[6] = MSW(MSD(phys_addr));
809 mcp->mb[7] = LSW(MSD(phys_addr));
810 mcp->mb[8] = MSW(ha->exlogin_size);
811 mcp->mb[9] = LSW(ha->exlogin_size);
812 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
813 mcp->in_mb = MBX_11|MBX_0;
814 mcp->tov = MBX_TOV_SECONDS;
815 mcp->flags = 0;
816 rval = qla2x00_mailbox_command(vha, mcp);
817 if (rval != QLA_SUCCESS) {
818 /*EMPTY*/
819 ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
820 } else {
821 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
822 "Done %s.\n", __func__);
825 return rval;
829 * qla_get_exchoffld_status
830 * Get exchange offload status
831 * uses the memory offload control/status Mailbox
833 * Input:
834 * ha: adapter state pointer.
835 * fwopt: firmware options
837 * Returns:
838 * qla2x00 local function status
840 * Context:
841 * Kernel context.
843 #define FETCH_XCHOFFLD_STAT 0x2
845 qla_get_exchoffld_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
846 uint16_t *ex_logins_cnt)
848 int rval;
849 mbx_cmd_t mc;
850 mbx_cmd_t *mcp = &mc;
852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1019,
853 "Entered %s\n", __func__);
855 memset(mcp->mb, 0 , sizeof(mcp->mb));
856 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
857 mcp->mb[1] = FETCH_XCHOFFLD_STAT;
858 mcp->out_mb = MBX_1|MBX_0;
859 mcp->in_mb = MBX_10|MBX_4|MBX_0;
860 mcp->tov = MBX_TOV_SECONDS;
861 mcp->flags = 0;
863 rval = qla2x00_mailbox_command(vha, mcp);
864 if (rval != QLA_SUCCESS) {
865 ql_dbg(ql_dbg_mbx, vha, 0x1155, "Failed=%x.\n", rval);
866 } else {
867 *buf_sz = mcp->mb[4];
868 *ex_logins_cnt = mcp->mb[10];
870 ql_log(ql_log_info, vha, 0x118e,
871 "buffer size 0x%x, exchange offload count=%d\n",
872 mcp->mb[4], mcp->mb[10]);
874 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1156,
875 "Done %s.\n", __func__);
878 return rval;
882 * qla_set_exchoffld_mem_cfg
883 * Set exchange offload memory configuration
884 * Mbx needs to be issues before init_cb is set
886 * Input:
887 * ha: adapter state pointer.
888 * buffer: buffer pointer
889 * phys_addr: physical address of buffer
890 * size: size of buffer
891 * TARGET_QUEUE_LOCK must be released
892 * ADAPTER_STATE_LOCK must be release
894 * Returns:
895 * qla2x00 local funxtion status code.
897 * Context:
898 * Kernel context.
900 #define CONFIG_XCHOFFLD_MEM 0x3
902 qla_set_exchoffld_mem_cfg(scsi_qla_host_t *vha)
904 int rval;
905 mbx_cmd_t mc;
906 mbx_cmd_t *mcp = &mc;
907 struct qla_hw_data *ha = vha->hw;
909 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1157,
910 "Entered %s.\n", __func__);
912 memset(mcp->mb, 0 , sizeof(mcp->mb));
913 mcp->mb[0] = MBC_GET_MEM_OFFLOAD_CNTRL_STAT;
914 mcp->mb[1] = CONFIG_XCHOFFLD_MEM;
915 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
916 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
917 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
918 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
919 mcp->mb[8] = MSW(ha->exchoffld_size);
920 mcp->mb[9] = LSW(ha->exchoffld_size);
921 mcp->out_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
922 mcp->in_mb = MBX_11|MBX_0;
923 mcp->tov = MBX_TOV_SECONDS;
924 mcp->flags = 0;
925 rval = qla2x00_mailbox_command(vha, mcp);
926 if (rval != QLA_SUCCESS) {
927 /*EMPTY*/
928 ql_dbg(ql_dbg_mbx, vha, 0x1158, "Failed=%x.\n", rval);
929 } else {
930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1192,
931 "Done %s.\n", __func__);
934 return rval;
938 * qla2x00_get_fw_version
939 * Get firmware version.
941 * Input:
942 * ha: adapter state pointer.
943 * major: pointer for major number.
944 * minor: pointer for minor number.
945 * subminor: pointer for subminor number.
947 * Returns:
948 * qla2x00 local function return status code.
950 * Context:
951 * Kernel context.
954 qla2x00_get_fw_version(scsi_qla_host_t *vha)
956 int rval;
957 mbx_cmd_t mc;
958 mbx_cmd_t *mcp = &mc;
959 struct qla_hw_data *ha = vha->hw;
961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
962 "Entered %s.\n", __func__);
964 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
965 mcp->out_mb = MBX_0;
966 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
967 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
968 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
969 if (IS_FWI2_CAPABLE(ha))
970 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
971 if (IS_QLA27XX(ha))
972 mcp->in_mb |=
973 MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
974 MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
976 mcp->flags = 0;
977 mcp->tov = MBX_TOV_SECONDS;
978 rval = qla2x00_mailbox_command(vha, mcp);
979 if (rval != QLA_SUCCESS)
980 goto failed;
982 /* Return mailbox data. */
983 ha->fw_major_version = mcp->mb[1];
984 ha->fw_minor_version = mcp->mb[2];
985 ha->fw_subminor_version = mcp->mb[3];
986 ha->fw_attributes = mcp->mb[6];
987 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
988 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
989 else
990 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
992 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
993 ha->mpi_version[0] = mcp->mb[10] & 0xff;
994 ha->mpi_version[1] = mcp->mb[11] >> 8;
995 ha->mpi_version[2] = mcp->mb[11] & 0xff;
996 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
997 ha->phy_version[0] = mcp->mb[8] & 0xff;
998 ha->phy_version[1] = mcp->mb[9] >> 8;
999 ha->phy_version[2] = mcp->mb[9] & 0xff;
1002 if (IS_FWI2_CAPABLE(ha)) {
1003 ha->fw_attributes_h = mcp->mb[15];
1004 ha->fw_attributes_ext[0] = mcp->mb[16];
1005 ha->fw_attributes_ext[1] = mcp->mb[17];
1006 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
1007 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
1008 __func__, mcp->mb[15], mcp->mb[6]);
1009 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
1010 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
1011 __func__, mcp->mb[17], mcp->mb[16]);
1013 if (ha->fw_attributes_h & 0x4)
1014 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118d,
1015 "%s: Firmware supports Extended Login 0x%x\n",
1016 __func__, ha->fw_attributes_h);
1018 if (ha->fw_attributes_h & 0x8)
1019 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
1020 "%s: Firmware supports Exchange Offload 0x%x\n",
1021 __func__, ha->fw_attributes_h);
1024 * FW supports nvme and driver load parameter requested nvme.
1025 * BIT 26 of fw_attributes indicates NVMe support.
1027 if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
1028 vha->flags.nvme_enabled = 1;
1032 if (IS_QLA27XX(ha)) {
1033 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1034 ha->mpi_version[1] = mcp->mb[11] >> 8;
1035 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1036 ha->pep_version[0] = mcp->mb[13] & 0xff;
1037 ha->pep_version[1] = mcp->mb[14] >> 8;
1038 ha->pep_version[2] = mcp->mb[14] & 0xff;
1039 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1040 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1041 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1042 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1045 failed:
1046 if (rval != QLA_SUCCESS) {
1047 /*EMPTY*/
1048 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
1049 } else {
1050 /*EMPTY*/
1051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
1052 "Done %s.\n", __func__);
1054 return rval;
1058 * qla2x00_get_fw_options
1059 * Set firmware options.
1061 * Input:
1062 * ha = adapter block pointer.
1063 * fwopt = pointer for firmware options.
1065 * Returns:
1066 * qla2x00 local function return status code.
1068 * Context:
1069 * Kernel context.
1072 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1074 int rval;
1075 mbx_cmd_t mc;
1076 mbx_cmd_t *mcp = &mc;
1078 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
1079 "Entered %s.\n", __func__);
1081 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
1082 mcp->out_mb = MBX_0;
1083 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1084 mcp->tov = MBX_TOV_SECONDS;
1085 mcp->flags = 0;
1086 rval = qla2x00_mailbox_command(vha, mcp);
1088 if (rval != QLA_SUCCESS) {
1089 /*EMPTY*/
1090 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
1091 } else {
1092 fwopts[0] = mcp->mb[0];
1093 fwopts[1] = mcp->mb[1];
1094 fwopts[2] = mcp->mb[2];
1095 fwopts[3] = mcp->mb[3];
1097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
1098 "Done %s.\n", __func__);
1101 return rval;
1106 * qla2x00_set_fw_options
1107 * Set firmware options.
1109 * Input:
1110 * ha = adapter block pointer.
1111 * fwopt = pointer for firmware options.
1113 * Returns:
1114 * qla2x00 local function return status code.
1116 * Context:
1117 * Kernel context.
1120 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
1122 int rval;
1123 mbx_cmd_t mc;
1124 mbx_cmd_t *mcp = &mc;
1126 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
1127 "Entered %s.\n", __func__);
1129 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
1130 mcp->mb[1] = fwopts[1];
1131 mcp->mb[2] = fwopts[2];
1132 mcp->mb[3] = fwopts[3];
1133 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1134 mcp->in_mb = MBX_0;
1135 if (IS_FWI2_CAPABLE(vha->hw)) {
1136 mcp->in_mb |= MBX_1;
1137 mcp->mb[10] = fwopts[10];
1138 mcp->out_mb |= MBX_10;
1139 } else {
1140 mcp->mb[10] = fwopts[10];
1141 mcp->mb[11] = fwopts[11];
1142 mcp->mb[12] = 0; /* Undocumented, but used */
1143 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
1145 mcp->tov = MBX_TOV_SECONDS;
1146 mcp->flags = 0;
1147 rval = qla2x00_mailbox_command(vha, mcp);
1149 fwopts[0] = mcp->mb[0];
1151 if (rval != QLA_SUCCESS) {
1152 /*EMPTY*/
1153 ql_dbg(ql_dbg_mbx, vha, 0x1030,
1154 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
1155 } else {
1156 /*EMPTY*/
1157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
1158 "Done %s.\n", __func__);
1161 return rval;
1165 * qla2x00_mbx_reg_test
1166 * Mailbox register wrap test.
1168 * Input:
1169 * ha = adapter block pointer.
1170 * TARGET_QUEUE_LOCK must be released.
1171 * ADAPTER_STATE_LOCK must be released.
1173 * Returns:
1174 * qla2x00 local function return status code.
1176 * Context:
1177 * Kernel context.
1180 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
1182 int rval;
1183 mbx_cmd_t mc;
1184 mbx_cmd_t *mcp = &mc;
1186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
1187 "Entered %s.\n", __func__);
1189 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
1190 mcp->mb[1] = 0xAAAA;
1191 mcp->mb[2] = 0x5555;
1192 mcp->mb[3] = 0xAA55;
1193 mcp->mb[4] = 0x55AA;
1194 mcp->mb[5] = 0xA5A5;
1195 mcp->mb[6] = 0x5A5A;
1196 mcp->mb[7] = 0x2525;
1197 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1198 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1199 mcp->tov = MBX_TOV_SECONDS;
1200 mcp->flags = 0;
1201 rval = qla2x00_mailbox_command(vha, mcp);
1203 if (rval == QLA_SUCCESS) {
1204 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
1205 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
1206 rval = QLA_FUNCTION_FAILED;
1207 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
1208 mcp->mb[7] != 0x2525)
1209 rval = QLA_FUNCTION_FAILED;
1212 if (rval != QLA_SUCCESS) {
1213 /*EMPTY*/
1214 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
1215 } else {
1216 /*EMPTY*/
1217 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
1218 "Done %s.\n", __func__);
1221 return rval;
1225 * qla2x00_verify_checksum
1226 * Verify firmware checksum.
1228 * Input:
1229 * ha = adapter block pointer.
1230 * TARGET_QUEUE_LOCK must be released.
1231 * ADAPTER_STATE_LOCK must be released.
1233 * Returns:
1234 * qla2x00 local function return status code.
1236 * Context:
1237 * Kernel context.
1240 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
1242 int rval;
1243 mbx_cmd_t mc;
1244 mbx_cmd_t *mcp = &mc;
1246 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
1247 "Entered %s.\n", __func__);
1249 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
1250 mcp->out_mb = MBX_0;
1251 mcp->in_mb = MBX_0;
1252 if (IS_FWI2_CAPABLE(vha->hw)) {
1253 mcp->mb[1] = MSW(risc_addr);
1254 mcp->mb[2] = LSW(risc_addr);
1255 mcp->out_mb |= MBX_2|MBX_1;
1256 mcp->in_mb |= MBX_2|MBX_1;
1257 } else {
1258 mcp->mb[1] = LSW(risc_addr);
1259 mcp->out_mb |= MBX_1;
1260 mcp->in_mb |= MBX_1;
1263 mcp->tov = MBX_TOV_SECONDS;
1264 mcp->flags = 0;
1265 rval = qla2x00_mailbox_command(vha, mcp);
1267 if (rval != QLA_SUCCESS) {
1268 ql_dbg(ql_dbg_mbx, vha, 0x1036,
1269 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
1270 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
1271 } else {
1272 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
1273 "Done %s.\n", __func__);
1276 return rval;
1280 * qla2x00_issue_iocb
1281 * Issue IOCB using mailbox command
1283 * Input:
1284 * ha = adapter state pointer.
1285 * buffer = buffer pointer.
1286 * phys_addr = physical address of buffer.
1287 * size = size of buffer.
1288 * TARGET_QUEUE_LOCK must be released.
1289 * ADAPTER_STATE_LOCK must be released.
1291 * Returns:
1292 * qla2x00 local function return status code.
1294 * Context:
1295 * Kernel context.
1298 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
1299 dma_addr_t phys_addr, size_t size, uint32_t tov)
1301 int rval;
1302 mbx_cmd_t mc;
1303 mbx_cmd_t *mcp = &mc;
1305 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
1306 "Entered %s.\n", __func__);
1308 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
1309 mcp->mb[1] = 0;
1310 mcp->mb[2] = MSW(phys_addr);
1311 mcp->mb[3] = LSW(phys_addr);
1312 mcp->mb[6] = MSW(MSD(phys_addr));
1313 mcp->mb[7] = LSW(MSD(phys_addr));
1314 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1315 mcp->in_mb = MBX_2|MBX_0;
1316 mcp->tov = tov;
1317 mcp->flags = 0;
1318 rval = qla2x00_mailbox_command(vha, mcp);
1320 if (rval != QLA_SUCCESS) {
1321 /*EMPTY*/
1322 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
1323 } else {
1324 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
1326 /* Mask reserved bits. */
1327 sts_entry->entry_status &=
1328 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
1329 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
1330 "Done %s.\n", __func__);
1333 return rval;
1337 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
1338 size_t size)
1340 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
1341 MBX_TOV_SECONDS);
1345 * qla2x00_abort_command
1346 * Abort command aborts a specified IOCB.
1348 * Input:
1349 * ha = adapter block pointer.
1350 * sp = SB structure pointer.
1352 * Returns:
1353 * qla2x00 local function return status code.
1355 * Context:
1356 * Kernel context.
1359 qla2x00_abort_command(srb_t *sp)
1361 unsigned long flags = 0;
1362 int rval;
1363 uint32_t handle = 0;
1364 mbx_cmd_t mc;
1365 mbx_cmd_t *mcp = &mc;
1366 fc_port_t *fcport = sp->fcport;
1367 scsi_qla_host_t *vha = fcport->vha;
1368 struct qla_hw_data *ha = vha->hw;
1369 struct req_que *req;
1370 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1372 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
1373 "Entered %s.\n", __func__);
1375 if (vha->flags.qpairs_available && sp->qpair)
1376 req = sp->qpair->req;
1377 else
1378 req = vha->req;
1380 spin_lock_irqsave(&ha->hardware_lock, flags);
1381 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
1382 if (req->outstanding_cmds[handle] == sp)
1383 break;
1385 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1387 if (handle == req->num_outstanding_cmds) {
1388 /* command not found */
1389 return QLA_FUNCTION_FAILED;
1392 mcp->mb[0] = MBC_ABORT_COMMAND;
1393 if (HAS_EXTENDED_IDS(ha))
1394 mcp->mb[1] = fcport->loop_id;
1395 else
1396 mcp->mb[1] = fcport->loop_id << 8;
1397 mcp->mb[2] = (uint16_t)handle;
1398 mcp->mb[3] = (uint16_t)(handle >> 16);
1399 mcp->mb[6] = (uint16_t)cmd->device->lun;
1400 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1401 mcp->in_mb = MBX_0;
1402 mcp->tov = MBX_TOV_SECONDS;
1403 mcp->flags = 0;
1404 rval = qla2x00_mailbox_command(vha, mcp);
1406 if (rval != QLA_SUCCESS) {
1407 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
1408 } else {
1409 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
1410 "Done %s.\n", __func__);
1413 return rval;
1417 qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
1419 int rval, rval2;
1420 mbx_cmd_t mc;
1421 mbx_cmd_t *mcp = &mc;
1422 scsi_qla_host_t *vha;
1423 struct req_que *req;
1424 struct rsp_que *rsp;
1426 l = l;
1427 vha = fcport->vha;
1429 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
1430 "Entered %s.\n", __func__);
1432 req = vha->hw->req_q_map[0];
1433 rsp = req->rsp;
1434 mcp->mb[0] = MBC_ABORT_TARGET;
1435 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
1436 if (HAS_EXTENDED_IDS(vha->hw)) {
1437 mcp->mb[1] = fcport->loop_id;
1438 mcp->mb[10] = 0;
1439 mcp->out_mb |= MBX_10;
1440 } else {
1441 mcp->mb[1] = fcport->loop_id << 8;
1443 mcp->mb[2] = vha->hw->loop_reset_delay;
1444 mcp->mb[9] = vha->vp_idx;
1446 mcp->in_mb = MBX_0;
1447 mcp->tov = MBX_TOV_SECONDS;
1448 mcp->flags = 0;
1449 rval = qla2x00_mailbox_command(vha, mcp);
1450 if (rval != QLA_SUCCESS) {
1451 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
1452 "Failed=%x.\n", rval);
1455 /* Issue marker IOCB. */
1456 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
1457 MK_SYNC_ID);
1458 if (rval2 != QLA_SUCCESS) {
1459 ql_dbg(ql_dbg_mbx, vha, 0x1040,
1460 "Failed to issue marker IOCB (%x).\n", rval2);
1461 } else {
1462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
1463 "Done %s.\n", __func__);
1466 return rval;
1470 qla2x00_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
1472 int rval, rval2;
1473 mbx_cmd_t mc;
1474 mbx_cmd_t *mcp = &mc;
1475 scsi_qla_host_t *vha;
1476 struct req_que *req;
1477 struct rsp_que *rsp;
1479 vha = fcport->vha;
1481 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1482 "Entered %s.\n", __func__);
1484 req = vha->hw->req_q_map[0];
1485 rsp = req->rsp;
1486 mcp->mb[0] = MBC_LUN_RESET;
1487 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
1488 if (HAS_EXTENDED_IDS(vha->hw))
1489 mcp->mb[1] = fcport->loop_id;
1490 else
1491 mcp->mb[1] = fcport->loop_id << 8;
1492 mcp->mb[2] = (u32)l;
1493 mcp->mb[3] = 0;
1494 mcp->mb[9] = vha->vp_idx;
1496 mcp->in_mb = MBX_0;
1497 mcp->tov = MBX_TOV_SECONDS;
1498 mcp->flags = 0;
1499 rval = qla2x00_mailbox_command(vha, mcp);
1500 if (rval != QLA_SUCCESS) {
1501 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
1504 /* Issue marker IOCB. */
1505 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
1506 MK_SYNC_ID_LUN);
1507 if (rval2 != QLA_SUCCESS) {
1508 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1509 "Failed to issue marker IOCB (%x).\n", rval2);
1510 } else {
1511 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1512 "Done %s.\n", __func__);
1515 return rval;
1519 * qla2x00_get_adapter_id
1520 * Get adapter ID and topology.
1522 * Input:
1523 * ha = adapter block pointer.
1524 * id = pointer for loop ID.
1525 * al_pa = pointer for AL_PA.
1526 * area = pointer for area.
1527 * domain = pointer for domain.
1528 * top = pointer for topology.
1529 * TARGET_QUEUE_LOCK must be released.
1530 * ADAPTER_STATE_LOCK must be released.
1532 * Returns:
1533 * qla2x00 local function return status code.
1535 * Context:
1536 * Kernel context.
1539 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1540 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1542 int rval;
1543 mbx_cmd_t mc;
1544 mbx_cmd_t *mcp = &mc;
1546 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1547 "Entered %s.\n", __func__);
1549 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1550 mcp->mb[9] = vha->vp_idx;
1551 mcp->out_mb = MBX_9|MBX_0;
1552 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1553 if (IS_CNA_CAPABLE(vha->hw))
1554 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1555 if (IS_FWI2_CAPABLE(vha->hw))
1556 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1557 if (IS_QLA27XX(vha->hw))
1558 mcp->in_mb |= MBX_15;
1559 mcp->tov = MBX_TOV_SECONDS;
1560 mcp->flags = 0;
1561 rval = qla2x00_mailbox_command(vha, mcp);
1562 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1563 rval = QLA_COMMAND_ERROR;
1564 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1565 rval = QLA_INVALID_COMMAND;
1567 /* Return data. */
1568 *id = mcp->mb[1];
1569 *al_pa = LSB(mcp->mb[2]);
1570 *area = MSB(mcp->mb[2]);
1571 *domain = LSB(mcp->mb[3]);
1572 *top = mcp->mb[6];
1573 *sw_cap = mcp->mb[7];
1575 if (rval != QLA_SUCCESS) {
1576 /*EMPTY*/
1577 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1578 } else {
1579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1580 "Done %s.\n", __func__);
1582 if (IS_CNA_CAPABLE(vha->hw)) {
1583 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1584 vha->fcoe_fcf_idx = mcp->mb[10];
1585 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1586 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1587 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1588 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1589 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1590 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1592 /* If FA-WWN supported */
1593 if (IS_FAWWN_CAPABLE(vha->hw)) {
1594 if (mcp->mb[7] & BIT_14) {
1595 vha->port_name[0] = MSB(mcp->mb[16]);
1596 vha->port_name[1] = LSB(mcp->mb[16]);
1597 vha->port_name[2] = MSB(mcp->mb[17]);
1598 vha->port_name[3] = LSB(mcp->mb[17]);
1599 vha->port_name[4] = MSB(mcp->mb[18]);
1600 vha->port_name[5] = LSB(mcp->mb[18]);
1601 vha->port_name[6] = MSB(mcp->mb[19]);
1602 vha->port_name[7] = LSB(mcp->mb[19]);
1603 fc_host_port_name(vha->host) =
1604 wwn_to_u64(vha->port_name);
1605 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1606 "FA-WWN acquired %016llx\n",
1607 wwn_to_u64(vha->port_name));
1611 if (IS_QLA27XX(vha->hw))
1612 vha->bbcr = mcp->mb[15];
1615 return rval;
1619 * qla2x00_get_retry_cnt
1620 * Get current firmware login retry count and delay.
1622 * Input:
1623 * ha = adapter block pointer.
1624 * retry_cnt = pointer to login retry count.
1625 * tov = pointer to login timeout value.
1627 * Returns:
1628 * qla2x00 local function return status code.
1630 * Context:
1631 * Kernel context.
1634 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1635 uint16_t *r_a_tov)
1637 int rval;
1638 uint16_t ratov;
1639 mbx_cmd_t mc;
1640 mbx_cmd_t *mcp = &mc;
1642 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1643 "Entered %s.\n", __func__);
1645 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1646 mcp->out_mb = MBX_0;
1647 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1648 mcp->tov = MBX_TOV_SECONDS;
1649 mcp->flags = 0;
1650 rval = qla2x00_mailbox_command(vha, mcp);
1652 if (rval != QLA_SUCCESS) {
1653 /*EMPTY*/
1654 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1655 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1656 } else {
1657 /* Convert returned data and check our values. */
1658 *r_a_tov = mcp->mb[3] / 2;
1659 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1660 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1661 /* Update to the larger values */
1662 *retry_cnt = (uint8_t)mcp->mb[1];
1663 *tov = ratov;
1666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1667 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1670 return rval;
1674 * qla2x00_init_firmware
1675 * Initialize adapter firmware.
1677 * Input:
1678 * ha = adapter block pointer.
1679 * dptr = Initialization control block pointer.
1680 * size = size of initialization control block.
1681 * TARGET_QUEUE_LOCK must be released.
1682 * ADAPTER_STATE_LOCK must be released.
1684 * Returns:
1685 * qla2x00 local function return status code.
1687 * Context:
1688 * Kernel context.
1691 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1693 int rval;
1694 mbx_cmd_t mc;
1695 mbx_cmd_t *mcp = &mc;
1696 struct qla_hw_data *ha = vha->hw;
1698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1699 "Entered %s.\n", __func__);
1701 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1702 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1703 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1705 if (ha->flags.npiv_supported)
1706 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1707 else
1708 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1710 mcp->mb[1] = 0;
1711 mcp->mb[2] = MSW(ha->init_cb_dma);
1712 mcp->mb[3] = LSW(ha->init_cb_dma);
1713 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1714 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1715 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1716 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1717 mcp->mb[1] = BIT_0;
1718 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1719 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1720 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1721 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1722 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1723 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1725 /* 1 and 2 should normally be captured. */
1726 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1727 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1728 /* mb3 is additional info about the installed SFP. */
1729 mcp->in_mb |= MBX_3;
1730 mcp->buf_size = size;
1731 mcp->flags = MBX_DMA_OUT;
1732 mcp->tov = MBX_TOV_SECONDS;
1733 rval = qla2x00_mailbox_command(vha, mcp);
1735 if (rval != QLA_SUCCESS) {
1736 /*EMPTY*/
1737 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1738 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1739 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1740 } else {
1741 if (IS_QLA27XX(ha)) {
1742 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1743 ql_dbg(ql_dbg_mbx, vha, 0x119d,
1744 "Invalid SFP/Validation Failed\n");
1746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1747 "Done %s.\n", __func__);
1750 return rval;
1755 * qla2x00_get_port_database
1756 * Issue normal/enhanced get port database mailbox command
1757 * and copy device name as necessary.
1759 * Input:
1760 * ha = adapter state pointer.
1761 * dev = structure pointer.
1762 * opt = enhanced cmd option byte.
1764 * Returns:
1765 * qla2x00 local function return status code.
1767 * Context:
1768 * Kernel context.
1771 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1773 int rval;
1774 mbx_cmd_t mc;
1775 mbx_cmd_t *mcp = &mc;
1776 port_database_t *pd;
1777 struct port_database_24xx *pd24;
1778 dma_addr_t pd_dma;
1779 struct qla_hw_data *ha = vha->hw;
1781 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1782 "Entered %s.\n", __func__);
1784 pd24 = NULL;
1785 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1786 if (pd == NULL) {
1787 ql_log(ql_log_warn, vha, 0x1050,
1788 "Failed to allocate port database structure.\n");
1789 fcport->query = 0;
1790 return QLA_MEMORY_ALLOC_FAILED;
1793 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1794 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1795 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1796 mcp->mb[2] = MSW(pd_dma);
1797 mcp->mb[3] = LSW(pd_dma);
1798 mcp->mb[6] = MSW(MSD(pd_dma));
1799 mcp->mb[7] = LSW(MSD(pd_dma));
1800 mcp->mb[9] = vha->vp_idx;
1801 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1802 mcp->in_mb = MBX_0;
1803 if (IS_FWI2_CAPABLE(ha)) {
1804 mcp->mb[1] = fcport->loop_id;
1805 mcp->mb[10] = opt;
1806 mcp->out_mb |= MBX_10|MBX_1;
1807 mcp->in_mb |= MBX_1;
1808 } else if (HAS_EXTENDED_IDS(ha)) {
1809 mcp->mb[1] = fcport->loop_id;
1810 mcp->mb[10] = opt;
1811 mcp->out_mb |= MBX_10|MBX_1;
1812 } else {
1813 mcp->mb[1] = fcport->loop_id << 8 | opt;
1814 mcp->out_mb |= MBX_1;
1816 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1817 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1818 mcp->flags = MBX_DMA_IN;
1819 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1820 rval = qla2x00_mailbox_command(vha, mcp);
1821 if (rval != QLA_SUCCESS)
1822 goto gpd_error_out;
1824 if (IS_FWI2_CAPABLE(ha)) {
1825 uint64_t zero = 0;
1826 u8 current_login_state, last_login_state;
1828 pd24 = (struct port_database_24xx *) pd;
1830 /* Check for logged in state. */
1831 if (fcport->fc4f_nvme) {
1832 current_login_state = pd24->current_login_state >> 4;
1833 last_login_state = pd24->last_login_state >> 4;
1834 } else {
1835 current_login_state = pd24->current_login_state & 0xf;
1836 last_login_state = pd24->last_login_state & 0xf;
1838 fcport->current_login_state = pd24->current_login_state;
1839 fcport->last_login_state = pd24->last_login_state;
1841 /* Check for logged in state. */
1842 if (current_login_state != PDS_PRLI_COMPLETE &&
1843 last_login_state != PDS_PRLI_COMPLETE) {
1844 ql_dbg(ql_dbg_mbx, vha, 0x119a,
1845 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
1846 current_login_state, last_login_state,
1847 fcport->loop_id);
1848 rval = QLA_FUNCTION_FAILED;
1850 if (!fcport->query)
1851 goto gpd_error_out;
1854 if (fcport->loop_id == FC_NO_LOOP_ID ||
1855 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1856 memcmp(fcport->port_name, pd24->port_name, 8))) {
1857 /* We lost the device mid way. */
1858 rval = QLA_NOT_LOGGED_IN;
1859 goto gpd_error_out;
1862 /* Names are little-endian. */
1863 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1864 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1866 /* Get port_id of device. */
1867 fcport->d_id.b.domain = pd24->port_id[0];
1868 fcport->d_id.b.area = pd24->port_id[1];
1869 fcport->d_id.b.al_pa = pd24->port_id[2];
1870 fcport->d_id.b.rsvd_1 = 0;
1872 /* If not target must be initiator or unknown type. */
1873 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1874 fcport->port_type = FCT_INITIATOR;
1875 else
1876 fcport->port_type = FCT_TARGET;
1878 /* Passback COS information. */
1879 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1880 FC_COS_CLASS2 : FC_COS_CLASS3;
1882 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1883 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1884 } else {
1885 uint64_t zero = 0;
1887 /* Check for logged in state. */
1888 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1889 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1890 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1891 "Unable to verify login-state (%x/%x) - "
1892 "portid=%02x%02x%02x.\n", pd->master_state,
1893 pd->slave_state, fcport->d_id.b.domain,
1894 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1895 rval = QLA_FUNCTION_FAILED;
1896 goto gpd_error_out;
1899 if (fcport->loop_id == FC_NO_LOOP_ID ||
1900 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1901 memcmp(fcport->port_name, pd->port_name, 8))) {
1902 /* We lost the device mid way. */
1903 rval = QLA_NOT_LOGGED_IN;
1904 goto gpd_error_out;
1907 /* Names are little-endian. */
1908 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1909 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1911 /* Get port_id of device. */
1912 fcport->d_id.b.domain = pd->port_id[0];
1913 fcport->d_id.b.area = pd->port_id[3];
1914 fcport->d_id.b.al_pa = pd->port_id[2];
1915 fcport->d_id.b.rsvd_1 = 0;
1917 /* If not target must be initiator or unknown type. */
1918 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1919 fcport->port_type = FCT_INITIATOR;
1920 else
1921 fcport->port_type = FCT_TARGET;
1923 /* Passback COS information. */
1924 fcport->supported_classes = (pd->options & BIT_4) ?
1925 FC_COS_CLASS2: FC_COS_CLASS3;
1928 gpd_error_out:
1929 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1930 fcport->query = 0;
1932 if (rval != QLA_SUCCESS) {
1933 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1934 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1935 mcp->mb[0], mcp->mb[1]);
1936 } else {
1937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1938 "Done %s.\n", __func__);
1941 return rval;
1945 * qla2x00_get_firmware_state
1946 * Get adapter firmware state.
1948 * Input:
1949 * ha = adapter block pointer.
1950 * dptr = pointer for firmware state.
1951 * TARGET_QUEUE_LOCK must be released.
1952 * ADAPTER_STATE_LOCK must be released.
1954 * Returns:
1955 * qla2x00 local function return status code.
1957 * Context:
1958 * Kernel context.
1961 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1963 int rval;
1964 mbx_cmd_t mc;
1965 mbx_cmd_t *mcp = &mc;
1966 struct qla_hw_data *ha = vha->hw;
1968 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1969 "Entered %s.\n", __func__);
1971 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1972 mcp->out_mb = MBX_0;
1973 if (IS_FWI2_CAPABLE(vha->hw))
1974 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1975 else
1976 mcp->in_mb = MBX_1|MBX_0;
1977 mcp->tov = MBX_TOV_SECONDS;
1978 mcp->flags = 0;
1979 rval = qla2x00_mailbox_command(vha, mcp);
1981 /* Return firmware states. */
1982 states[0] = mcp->mb[1];
1983 if (IS_FWI2_CAPABLE(vha->hw)) {
1984 states[1] = mcp->mb[2];
1985 states[2] = mcp->mb[3]; /* SFP info */
1986 states[3] = mcp->mb[4];
1987 states[4] = mcp->mb[5];
1988 states[5] = mcp->mb[6]; /* DPORT status */
1991 if (rval != QLA_SUCCESS) {
1992 /*EMPTY*/
1993 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1994 } else {
1995 if (IS_QLA27XX(ha)) {
1996 if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
1997 ql_dbg(ql_dbg_mbx, vha, 0x119e,
1998 "Invalid SFP/Validation Failed\n");
2000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
2001 "Done %s.\n", __func__);
2004 return rval;
2008 * qla2x00_get_port_name
2009 * Issue get port name mailbox command.
2010 * Returned name is in big endian format.
2012 * Input:
2013 * ha = adapter block pointer.
2014 * loop_id = loop ID of device.
2015 * name = pointer for name.
2016 * TARGET_QUEUE_LOCK must be released.
2017 * ADAPTER_STATE_LOCK must be released.
2019 * Returns:
2020 * qla2x00 local function return status code.
2022 * Context:
2023 * Kernel context.
2026 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
2027 uint8_t opt)
2029 int rval;
2030 mbx_cmd_t mc;
2031 mbx_cmd_t *mcp = &mc;
2033 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
2034 "Entered %s.\n", __func__);
2036 mcp->mb[0] = MBC_GET_PORT_NAME;
2037 mcp->mb[9] = vha->vp_idx;
2038 mcp->out_mb = MBX_9|MBX_1|MBX_0;
2039 if (HAS_EXTENDED_IDS(vha->hw)) {
2040 mcp->mb[1] = loop_id;
2041 mcp->mb[10] = opt;
2042 mcp->out_mb |= MBX_10;
2043 } else {
2044 mcp->mb[1] = loop_id << 8 | opt;
2047 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2048 mcp->tov = MBX_TOV_SECONDS;
2049 mcp->flags = 0;
2050 rval = qla2x00_mailbox_command(vha, mcp);
2052 if (rval != QLA_SUCCESS) {
2053 /*EMPTY*/
2054 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
2055 } else {
2056 if (name != NULL) {
2057 /* This function returns name in big endian. */
2058 name[0] = MSB(mcp->mb[2]);
2059 name[1] = LSB(mcp->mb[2]);
2060 name[2] = MSB(mcp->mb[3]);
2061 name[3] = LSB(mcp->mb[3]);
2062 name[4] = MSB(mcp->mb[6]);
2063 name[5] = LSB(mcp->mb[6]);
2064 name[6] = MSB(mcp->mb[7]);
2065 name[7] = LSB(mcp->mb[7]);
2068 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
2069 "Done %s.\n", __func__);
2072 return rval;
2076 * qla24xx_link_initialization
2077 * Issue link initialization mailbox command.
2079 * Input:
2080 * ha = adapter block pointer.
2081 * TARGET_QUEUE_LOCK must be released.
2082 * ADAPTER_STATE_LOCK must be released.
2084 * Returns:
2085 * qla2x00 local function return status code.
2087 * Context:
2088 * Kernel context.
2091 qla24xx_link_initialize(scsi_qla_host_t *vha)
2093 int rval;
2094 mbx_cmd_t mc;
2095 mbx_cmd_t *mcp = &mc;
2097 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
2098 "Entered %s.\n", __func__);
2100 if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
2101 return QLA_FUNCTION_FAILED;
2103 mcp->mb[0] = MBC_LINK_INITIALIZATION;
2104 mcp->mb[1] = BIT_4;
2105 if (vha->hw->operating_mode == LOOP)
2106 mcp->mb[1] |= BIT_6;
2107 else
2108 mcp->mb[1] |= BIT_5;
2109 mcp->mb[2] = 0;
2110 mcp->mb[3] = 0;
2111 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2112 mcp->in_mb = MBX_0;
2113 mcp->tov = MBX_TOV_SECONDS;
2114 mcp->flags = 0;
2115 rval = qla2x00_mailbox_command(vha, mcp);
2117 if (rval != QLA_SUCCESS) {
2118 ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
2119 } else {
2120 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
2121 "Done %s.\n", __func__);
2124 return rval;
2128 * qla2x00_lip_reset
2129 * Issue LIP reset mailbox command.
2131 * Input:
2132 * ha = adapter block pointer.
2133 * TARGET_QUEUE_LOCK must be released.
2134 * ADAPTER_STATE_LOCK must be released.
2136 * Returns:
2137 * qla2x00 local function return status code.
2139 * Context:
2140 * Kernel context.
2143 qla2x00_lip_reset(scsi_qla_host_t *vha)
2145 int rval;
2146 mbx_cmd_t mc;
2147 mbx_cmd_t *mcp = &mc;
2149 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
2150 "Entered %s.\n", __func__);
2152 if (IS_CNA_CAPABLE(vha->hw)) {
2153 /* Logout across all FCFs. */
2154 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2155 mcp->mb[1] = BIT_1;
2156 mcp->mb[2] = 0;
2157 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2158 } else if (IS_FWI2_CAPABLE(vha->hw)) {
2159 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2160 mcp->mb[1] = BIT_6;
2161 mcp->mb[2] = 0;
2162 mcp->mb[3] = vha->hw->loop_reset_delay;
2163 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2164 } else {
2165 mcp->mb[0] = MBC_LIP_RESET;
2166 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2167 if (HAS_EXTENDED_IDS(vha->hw)) {
2168 mcp->mb[1] = 0x00ff;
2169 mcp->mb[10] = 0;
2170 mcp->out_mb |= MBX_10;
2171 } else {
2172 mcp->mb[1] = 0xff00;
2174 mcp->mb[2] = vha->hw->loop_reset_delay;
2175 mcp->mb[3] = 0;
2177 mcp->in_mb = MBX_0;
2178 mcp->tov = MBX_TOV_SECONDS;
2179 mcp->flags = 0;
2180 rval = qla2x00_mailbox_command(vha, mcp);
2182 if (rval != QLA_SUCCESS) {
2183 /*EMPTY*/
2184 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
2185 } else {
2186 /*EMPTY*/
2187 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
2188 "Done %s.\n", __func__);
2191 return rval;
2195 * qla2x00_send_sns
2196 * Send SNS command.
2198 * Input:
2199 * ha = adapter block pointer.
2200 * sns = pointer for command.
2201 * cmd_size = command size.
2202 * buf_size = response/command size.
2203 * TARGET_QUEUE_LOCK must be released.
2204 * ADAPTER_STATE_LOCK must be released.
2206 * Returns:
2207 * qla2x00 local function return status code.
2209 * Context:
2210 * Kernel context.
2213 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
2214 uint16_t cmd_size, size_t buf_size)
2216 int rval;
2217 mbx_cmd_t mc;
2218 mbx_cmd_t *mcp = &mc;
2220 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
2221 "Entered %s.\n", __func__);
2223 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
2224 "Retry cnt=%d ratov=%d total tov=%d.\n",
2225 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
2227 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
2228 mcp->mb[1] = cmd_size;
2229 mcp->mb[2] = MSW(sns_phys_address);
2230 mcp->mb[3] = LSW(sns_phys_address);
2231 mcp->mb[6] = MSW(MSD(sns_phys_address));
2232 mcp->mb[7] = LSW(MSD(sns_phys_address));
2233 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2234 mcp->in_mb = MBX_0|MBX_1;
2235 mcp->buf_size = buf_size;
2236 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
2237 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
2238 rval = qla2x00_mailbox_command(vha, mcp);
2240 if (rval != QLA_SUCCESS) {
2241 /*EMPTY*/
2242 ql_dbg(ql_dbg_mbx, vha, 0x105f,
2243 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2244 rval, mcp->mb[0], mcp->mb[1]);
2245 } else {
2246 /*EMPTY*/
2247 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
2248 "Done %s.\n", __func__);
2251 return rval;
2255 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2256 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2258 int rval;
2260 struct logio_entry_24xx *lg;
2261 dma_addr_t lg_dma;
2262 uint32_t iop[2];
2263 struct qla_hw_data *ha = vha->hw;
2264 struct req_que *req;
2266 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
2267 "Entered %s.\n", __func__);
2269 if (vha->vp_idx && vha->qpair)
2270 req = vha->qpair->req;
2271 else
2272 req = ha->req_q_map[0];
2274 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2275 if (lg == NULL) {
2276 ql_log(ql_log_warn, vha, 0x1062,
2277 "Failed to allocate login IOCB.\n");
2278 return QLA_MEMORY_ALLOC_FAILED;
2281 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2282 lg->entry_count = 1;
2283 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2284 lg->nport_handle = cpu_to_le16(loop_id);
2285 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2286 if (opt & BIT_0)
2287 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2288 if (opt & BIT_1)
2289 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2290 lg->port_id[0] = al_pa;
2291 lg->port_id[1] = area;
2292 lg->port_id[2] = domain;
2293 lg->vp_index = vha->vp_idx;
2294 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2295 (ha->r_a_tov / 10 * 2) + 2);
2296 if (rval != QLA_SUCCESS) {
2297 ql_dbg(ql_dbg_mbx, vha, 0x1063,
2298 "Failed to issue login IOCB (%x).\n", rval);
2299 } else if (lg->entry_status != 0) {
2300 ql_dbg(ql_dbg_mbx, vha, 0x1064,
2301 "Failed to complete IOCB -- error status (%x).\n",
2302 lg->entry_status);
2303 rval = QLA_FUNCTION_FAILED;
2304 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2305 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2306 iop[1] = le32_to_cpu(lg->io_parameter[1]);
2308 ql_dbg(ql_dbg_mbx, vha, 0x1065,
2309 "Failed to complete IOCB -- completion status (%x) "
2310 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2311 iop[0], iop[1]);
2313 switch (iop[0]) {
2314 case LSC_SCODE_PORTID_USED:
2315 mb[0] = MBS_PORT_ID_USED;
2316 mb[1] = LSW(iop[1]);
2317 break;
2318 case LSC_SCODE_NPORT_USED:
2319 mb[0] = MBS_LOOP_ID_USED;
2320 break;
2321 case LSC_SCODE_NOLINK:
2322 case LSC_SCODE_NOIOCB:
2323 case LSC_SCODE_NOXCB:
2324 case LSC_SCODE_CMD_FAILED:
2325 case LSC_SCODE_NOFABRIC:
2326 case LSC_SCODE_FW_NOT_READY:
2327 case LSC_SCODE_NOT_LOGGED_IN:
2328 case LSC_SCODE_NOPCB:
2329 case LSC_SCODE_ELS_REJECT:
2330 case LSC_SCODE_CMD_PARAM_ERR:
2331 case LSC_SCODE_NONPORT:
2332 case LSC_SCODE_LOGGED_IN:
2333 case LSC_SCODE_NOFLOGI_ACC:
2334 default:
2335 mb[0] = MBS_COMMAND_ERROR;
2336 break;
2338 } else {
2339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
2340 "Done %s.\n", __func__);
2342 iop[0] = le32_to_cpu(lg->io_parameter[0]);
2344 mb[0] = MBS_COMMAND_COMPLETE;
2345 mb[1] = 0;
2346 if (iop[0] & BIT_4) {
2347 if (iop[0] & BIT_8)
2348 mb[1] |= BIT_1;
2349 } else
2350 mb[1] = BIT_0;
2352 /* Passback COS information. */
2353 mb[10] = 0;
2354 if (lg->io_parameter[7] || lg->io_parameter[8])
2355 mb[10] |= BIT_0; /* Class 2. */
2356 if (lg->io_parameter[9] || lg->io_parameter[10])
2357 mb[10] |= BIT_1; /* Class 3. */
2358 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
2359 mb[10] |= BIT_7; /* Confirmed Completion
2360 * Allowed
2364 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2366 return rval;
2370 * qla2x00_login_fabric
2371 * Issue login fabric port mailbox command.
2373 * Input:
2374 * ha = adapter block pointer.
2375 * loop_id = device loop ID.
2376 * domain = device domain.
2377 * area = device area.
2378 * al_pa = device AL_PA.
2379 * status = pointer for return status.
2380 * opt = command options.
2381 * TARGET_QUEUE_LOCK must be released.
2382 * ADAPTER_STATE_LOCK must be released.
2384 * Returns:
2385 * qla2x00 local function return status code.
2387 * Context:
2388 * Kernel context.
2391 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2392 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
2394 int rval;
2395 mbx_cmd_t mc;
2396 mbx_cmd_t *mcp = &mc;
2397 struct qla_hw_data *ha = vha->hw;
2399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
2400 "Entered %s.\n", __func__);
2402 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
2403 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2404 if (HAS_EXTENDED_IDS(ha)) {
2405 mcp->mb[1] = loop_id;
2406 mcp->mb[10] = opt;
2407 mcp->out_mb |= MBX_10;
2408 } else {
2409 mcp->mb[1] = (loop_id << 8) | opt;
2411 mcp->mb[2] = domain;
2412 mcp->mb[3] = area << 8 | al_pa;
2414 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
2415 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2416 mcp->flags = 0;
2417 rval = qla2x00_mailbox_command(vha, mcp);
2419 /* Return mailbox statuses. */
2420 if (mb != NULL) {
2421 mb[0] = mcp->mb[0];
2422 mb[1] = mcp->mb[1];
2423 mb[2] = mcp->mb[2];
2424 mb[6] = mcp->mb[6];
2425 mb[7] = mcp->mb[7];
2426 /* COS retrieved from Get-Port-Database mailbox command. */
2427 mb[10] = 0;
2430 if (rval != QLA_SUCCESS) {
2431 /* RLU tmp code: need to change main mailbox_command function to
2432 * return ok even when the mailbox completion value is not
2433 * SUCCESS. The caller needs to be responsible to interpret
2434 * the return values of this mailbox command if we're not
2435 * to change too much of the existing code.
2437 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
2438 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
2439 mcp->mb[0] == 0x4006)
2440 rval = QLA_SUCCESS;
2442 /*EMPTY*/
2443 ql_dbg(ql_dbg_mbx, vha, 0x1068,
2444 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
2445 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
2446 } else {
2447 /*EMPTY*/
2448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
2449 "Done %s.\n", __func__);
2452 return rval;
2456 * qla2x00_login_local_device
2457 * Issue login loop port mailbox command.
2459 * Input:
2460 * ha = adapter block pointer.
2461 * loop_id = device loop ID.
2462 * opt = command options.
2464 * Returns:
2465 * Return status code.
2467 * Context:
2468 * Kernel context.
2472 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
2473 uint16_t *mb_ret, uint8_t opt)
2475 int rval;
2476 mbx_cmd_t mc;
2477 mbx_cmd_t *mcp = &mc;
2478 struct qla_hw_data *ha = vha->hw;
2480 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
2481 "Entered %s.\n", __func__);
2483 if (IS_FWI2_CAPABLE(ha))
2484 return qla24xx_login_fabric(vha, fcport->loop_id,
2485 fcport->d_id.b.domain, fcport->d_id.b.area,
2486 fcport->d_id.b.al_pa, mb_ret, opt);
2488 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
2489 if (HAS_EXTENDED_IDS(ha))
2490 mcp->mb[1] = fcport->loop_id;
2491 else
2492 mcp->mb[1] = fcport->loop_id << 8;
2493 mcp->mb[2] = opt;
2494 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2495 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
2496 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2497 mcp->flags = 0;
2498 rval = qla2x00_mailbox_command(vha, mcp);
2500 /* Return mailbox statuses. */
2501 if (mb_ret != NULL) {
2502 mb_ret[0] = mcp->mb[0];
2503 mb_ret[1] = mcp->mb[1];
2504 mb_ret[6] = mcp->mb[6];
2505 mb_ret[7] = mcp->mb[7];
2508 if (rval != QLA_SUCCESS) {
2509 /* AV tmp code: need to change main mailbox_command function to
2510 * return ok even when the mailbox completion value is not
2511 * SUCCESS. The caller needs to be responsible to interpret
2512 * the return values of this mailbox command if we're not
2513 * to change too much of the existing code.
2515 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
2516 rval = QLA_SUCCESS;
2518 ql_dbg(ql_dbg_mbx, vha, 0x106b,
2519 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
2520 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
2521 } else {
2522 /*EMPTY*/
2523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2524 "Done %s.\n", __func__);
2527 return (rval);
2531 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2532 uint8_t area, uint8_t al_pa)
2534 int rval;
2535 struct logio_entry_24xx *lg;
2536 dma_addr_t lg_dma;
2537 struct qla_hw_data *ha = vha->hw;
2538 struct req_que *req;
2540 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2541 "Entered %s.\n", __func__);
2543 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2544 if (lg == NULL) {
2545 ql_log(ql_log_warn, vha, 0x106e,
2546 "Failed to allocate logout IOCB.\n");
2547 return QLA_MEMORY_ALLOC_FAILED;
2550 req = vha->req;
2551 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2552 lg->entry_count = 1;
2553 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2554 lg->nport_handle = cpu_to_le16(loop_id);
2555 lg->control_flags =
2556 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2557 LCF_FREE_NPORT);
2558 lg->port_id[0] = al_pa;
2559 lg->port_id[1] = area;
2560 lg->port_id[2] = domain;
2561 lg->vp_index = vha->vp_idx;
2562 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
2563 (ha->r_a_tov / 10 * 2) + 2);
2564 if (rval != QLA_SUCCESS) {
2565 ql_dbg(ql_dbg_mbx, vha, 0x106f,
2566 "Failed to issue logout IOCB (%x).\n", rval);
2567 } else if (lg->entry_status != 0) {
2568 ql_dbg(ql_dbg_mbx, vha, 0x1070,
2569 "Failed to complete IOCB -- error status (%x).\n",
2570 lg->entry_status);
2571 rval = QLA_FUNCTION_FAILED;
2572 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2573 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2574 "Failed to complete IOCB -- completion status (%x) "
2575 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
2576 le32_to_cpu(lg->io_parameter[0]),
2577 le32_to_cpu(lg->io_parameter[1]));
2578 } else {
2579 /*EMPTY*/
2580 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2581 "Done %s.\n", __func__);
2584 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2586 return rval;
2590 * qla2x00_fabric_logout
2591 * Issue logout fabric port mailbox command.
2593 * Input:
2594 * ha = adapter block pointer.
2595 * loop_id = device loop ID.
2596 * TARGET_QUEUE_LOCK must be released.
2597 * ADAPTER_STATE_LOCK must be released.
2599 * Returns:
2600 * qla2x00 local function return status code.
2602 * Context:
2603 * Kernel context.
2606 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2607 uint8_t area, uint8_t al_pa)
2609 int rval;
2610 mbx_cmd_t mc;
2611 mbx_cmd_t *mcp = &mc;
2613 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2614 "Entered %s.\n", __func__);
2616 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
2617 mcp->out_mb = MBX_1|MBX_0;
2618 if (HAS_EXTENDED_IDS(vha->hw)) {
2619 mcp->mb[1] = loop_id;
2620 mcp->mb[10] = 0;
2621 mcp->out_mb |= MBX_10;
2622 } else {
2623 mcp->mb[1] = loop_id << 8;
2626 mcp->in_mb = MBX_1|MBX_0;
2627 mcp->tov = MBX_TOV_SECONDS;
2628 mcp->flags = 0;
2629 rval = qla2x00_mailbox_command(vha, mcp);
2631 if (rval != QLA_SUCCESS) {
2632 /*EMPTY*/
2633 ql_dbg(ql_dbg_mbx, vha, 0x1074,
2634 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2635 } else {
2636 /*EMPTY*/
2637 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2638 "Done %s.\n", __func__);
2641 return rval;
2645 * qla2x00_full_login_lip
2646 * Issue full login LIP mailbox command.
2648 * Input:
2649 * ha = adapter block pointer.
2650 * TARGET_QUEUE_LOCK must be released.
2651 * ADAPTER_STATE_LOCK must be released.
2653 * Returns:
2654 * qla2x00 local function return status code.
2656 * Context:
2657 * Kernel context.
2660 qla2x00_full_login_lip(scsi_qla_host_t *vha)
2662 int rval;
2663 mbx_cmd_t mc;
2664 mbx_cmd_t *mcp = &mc;
2666 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2667 "Entered %s.\n", __func__);
2669 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2670 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
2671 mcp->mb[2] = 0;
2672 mcp->mb[3] = 0;
2673 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
2674 mcp->in_mb = MBX_0;
2675 mcp->tov = MBX_TOV_SECONDS;
2676 mcp->flags = 0;
2677 rval = qla2x00_mailbox_command(vha, mcp);
2679 if (rval != QLA_SUCCESS) {
2680 /*EMPTY*/
2681 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2682 } else {
2683 /*EMPTY*/
2684 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2685 "Done %s.\n", __func__);
2688 return rval;
2692 * qla2x00_get_id_list
2694 * Input:
2695 * ha = adapter block pointer.
2697 * Returns:
2698 * qla2x00 local function return status code.
2700 * Context:
2701 * Kernel context.
2704 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2705 uint16_t *entries)
2707 int rval;
2708 mbx_cmd_t mc;
2709 mbx_cmd_t *mcp = &mc;
2711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2712 "Entered %s.\n", __func__);
2714 if (id_list == NULL)
2715 return QLA_FUNCTION_FAILED;
2717 mcp->mb[0] = MBC_GET_ID_LIST;
2718 mcp->out_mb = MBX_0;
2719 if (IS_FWI2_CAPABLE(vha->hw)) {
2720 mcp->mb[2] = MSW(id_list_dma);
2721 mcp->mb[3] = LSW(id_list_dma);
2722 mcp->mb[6] = MSW(MSD(id_list_dma));
2723 mcp->mb[7] = LSW(MSD(id_list_dma));
2724 mcp->mb[8] = 0;
2725 mcp->mb[9] = vha->vp_idx;
2726 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2727 } else {
2728 mcp->mb[1] = MSW(id_list_dma);
2729 mcp->mb[2] = LSW(id_list_dma);
2730 mcp->mb[3] = MSW(MSD(id_list_dma));
2731 mcp->mb[6] = LSW(MSD(id_list_dma));
2732 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2734 mcp->in_mb = MBX_1|MBX_0;
2735 mcp->tov = MBX_TOV_SECONDS;
2736 mcp->flags = 0;
2737 rval = qla2x00_mailbox_command(vha, mcp);
2739 if (rval != QLA_SUCCESS) {
2740 /*EMPTY*/
2741 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2742 } else {
2743 *entries = mcp->mb[1];
2744 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2745 "Done %s.\n", __func__);
2748 return rval;
2752 * qla2x00_get_resource_cnts
2753 * Get current firmware resource counts.
2755 * Input:
2756 * ha = adapter block pointer.
2758 * Returns:
2759 * qla2x00 local function return status code.
2761 * Context:
2762 * Kernel context.
2765 qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
2767 struct qla_hw_data *ha = vha->hw;
2768 int rval;
2769 mbx_cmd_t mc;
2770 mbx_cmd_t *mcp = &mc;
2772 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2773 "Entered %s.\n", __func__);
2775 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2776 mcp->out_mb = MBX_0;
2777 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2778 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
2779 mcp->in_mb |= MBX_12;
2780 mcp->tov = MBX_TOV_SECONDS;
2781 mcp->flags = 0;
2782 rval = qla2x00_mailbox_command(vha, mcp);
2784 if (rval != QLA_SUCCESS) {
2785 /*EMPTY*/
2786 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2787 "Failed mb[0]=%x.\n", mcp->mb[0]);
2788 } else {
2789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2790 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2791 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2792 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2793 mcp->mb[11], mcp->mb[12]);
2795 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
2796 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
2797 ha->cur_fw_xcb_count = mcp->mb[3];
2798 ha->orig_fw_xcb_count = mcp->mb[6];
2799 ha->cur_fw_iocb_count = mcp->mb[7];
2800 ha->orig_fw_iocb_count = mcp->mb[10];
2801 if (ha->flags.npiv_supported)
2802 ha->max_npiv_vports = mcp->mb[11];
2803 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2804 ha->fw_max_fcf_count = mcp->mb[12];
2807 return (rval);
2811 * qla2x00_get_fcal_position_map
2812 * Get FCAL (LILP) position map using mailbox command
2814 * Input:
2815 * ha = adapter state pointer.
2816 * pos_map = buffer pointer (can be NULL).
2818 * Returns:
2819 * qla2x00 local function return status code.
2821 * Context:
2822 * Kernel context.
2825 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2827 int rval;
2828 mbx_cmd_t mc;
2829 mbx_cmd_t *mcp = &mc;
2830 char *pmap;
2831 dma_addr_t pmap_dma;
2832 struct qla_hw_data *ha = vha->hw;
2834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2835 "Entered %s.\n", __func__);
2837 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2838 if (pmap == NULL) {
2839 ql_log(ql_log_warn, vha, 0x1080,
2840 "Memory alloc failed.\n");
2841 return QLA_MEMORY_ALLOC_FAILED;
2844 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2845 mcp->mb[2] = MSW(pmap_dma);
2846 mcp->mb[3] = LSW(pmap_dma);
2847 mcp->mb[6] = MSW(MSD(pmap_dma));
2848 mcp->mb[7] = LSW(MSD(pmap_dma));
2849 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2850 mcp->in_mb = MBX_1|MBX_0;
2851 mcp->buf_size = FCAL_MAP_SIZE;
2852 mcp->flags = MBX_DMA_IN;
2853 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2854 rval = qla2x00_mailbox_command(vha, mcp);
2856 if (rval == QLA_SUCCESS) {
2857 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2858 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2859 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2860 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2861 pmap, pmap[0] + 1);
2863 if (pos_map)
2864 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2866 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2868 if (rval != QLA_SUCCESS) {
2869 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2870 } else {
2871 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2872 "Done %s.\n", __func__);
2875 return rval;
2879 * qla2x00_get_link_status
2881 * Input:
2882 * ha = adapter block pointer.
2883 * loop_id = device loop ID.
2884 * ret_buf = pointer to link status return buffer.
2886 * Returns:
2887 * 0 = success.
2888 * BIT_0 = mem alloc error.
2889 * BIT_1 = mailbox error.
2892 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2893 struct link_statistics *stats, dma_addr_t stats_dma)
2895 int rval;
2896 mbx_cmd_t mc;
2897 mbx_cmd_t *mcp = &mc;
2898 uint32_t *iter = (void *)stats;
2899 ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter);
2900 struct qla_hw_data *ha = vha->hw;
2902 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2903 "Entered %s.\n", __func__);
2905 mcp->mb[0] = MBC_GET_LINK_STATUS;
2906 mcp->mb[2] = MSW(LSD(stats_dma));
2907 mcp->mb[3] = LSW(LSD(stats_dma));
2908 mcp->mb[6] = MSW(MSD(stats_dma));
2909 mcp->mb[7] = LSW(MSD(stats_dma));
2910 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2911 mcp->in_mb = MBX_0;
2912 if (IS_FWI2_CAPABLE(ha)) {
2913 mcp->mb[1] = loop_id;
2914 mcp->mb[4] = 0;
2915 mcp->mb[10] = 0;
2916 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2917 mcp->in_mb |= MBX_1;
2918 } else if (HAS_EXTENDED_IDS(ha)) {
2919 mcp->mb[1] = loop_id;
2920 mcp->mb[10] = 0;
2921 mcp->out_mb |= MBX_10|MBX_1;
2922 } else {
2923 mcp->mb[1] = loop_id << 8;
2924 mcp->out_mb |= MBX_1;
2926 mcp->tov = MBX_TOV_SECONDS;
2927 mcp->flags = IOCTL_CMD;
2928 rval = qla2x00_mailbox_command(vha, mcp);
2930 if (rval == QLA_SUCCESS) {
2931 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2932 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2933 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2934 rval = QLA_FUNCTION_FAILED;
2935 } else {
2936 /* Re-endianize - firmware data is le32. */
2937 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2938 "Done %s.\n", __func__);
2939 for ( ; dwords--; iter++)
2940 le32_to_cpus(iter);
2942 } else {
2943 /* Failed. */
2944 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2947 return rval;
2951 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2952 dma_addr_t stats_dma, uint16_t options)
2954 int rval;
2955 mbx_cmd_t mc;
2956 mbx_cmd_t *mcp = &mc;
2957 uint32_t *iter, dwords;
2959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2960 "Entered %s.\n", __func__);
2962 memset(&mc, 0, sizeof(mc));
2963 mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
2964 mc.mb[2] = MSW(stats_dma);
2965 mc.mb[3] = LSW(stats_dma);
2966 mc.mb[6] = MSW(MSD(stats_dma));
2967 mc.mb[7] = LSW(MSD(stats_dma));
2968 mc.mb[8] = sizeof(struct link_statistics) / 4;
2969 mc.mb[9] = cpu_to_le16(vha->vp_idx);
2970 mc.mb[10] = cpu_to_le16(options);
2972 rval = qla24xx_send_mb_cmd(vha, &mc);
2974 if (rval == QLA_SUCCESS) {
2975 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2976 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2977 "Failed mb[0]=%x.\n", mcp->mb[0]);
2978 rval = QLA_FUNCTION_FAILED;
2979 } else {
2980 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2981 "Done %s.\n", __func__);
2982 /* Re-endianize - firmware data is le32. */
2983 dwords = sizeof(struct link_statistics) / 4;
2984 iter = &stats->link_fail_cnt;
2985 for ( ; dwords--; iter++)
2986 le32_to_cpus(iter);
2988 } else {
2989 /* Failed. */
2990 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2993 return rval;
2997 qla24xx_abort_command(srb_t *sp)
2999 int rval;
3000 unsigned long flags = 0;
3002 struct abort_entry_24xx *abt;
3003 dma_addr_t abt_dma;
3004 uint32_t handle;
3005 fc_port_t *fcport = sp->fcport;
3006 struct scsi_qla_host *vha = fcport->vha;
3007 struct qla_hw_data *ha = vha->hw;
3008 struct req_que *req = vha->req;
3010 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
3011 "Entered %s.\n", __func__);
3013 if (vha->flags.qpairs_available && sp->qpair)
3014 req = sp->qpair->req;
3016 if (ql2xasynctmfenable)
3017 return qla24xx_async_abort_command(sp);
3019 spin_lock_irqsave(&ha->hardware_lock, flags);
3020 for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
3021 if (req->outstanding_cmds[handle] == sp)
3022 break;
3024 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3025 if (handle == req->num_outstanding_cmds) {
3026 /* Command not found. */
3027 return QLA_FUNCTION_FAILED;
3030 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3031 if (abt == NULL) {
3032 ql_log(ql_log_warn, vha, 0x108d,
3033 "Failed to allocate abort IOCB.\n");
3034 return QLA_MEMORY_ALLOC_FAILED;
3037 abt->entry_type = ABORT_IOCB_TYPE;
3038 abt->entry_count = 1;
3039 abt->handle = MAKE_HANDLE(req->id, abt->handle);
3040 abt->nport_handle = cpu_to_le16(fcport->loop_id);
3041 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
3042 abt->port_id[0] = fcport->d_id.b.al_pa;
3043 abt->port_id[1] = fcport->d_id.b.area;
3044 abt->port_id[2] = fcport->d_id.b.domain;
3045 abt->vp_index = fcport->vha->vp_idx;
3047 abt->req_que_no = cpu_to_le16(req->id);
3049 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
3050 if (rval != QLA_SUCCESS) {
3051 ql_dbg(ql_dbg_mbx, vha, 0x108e,
3052 "Failed to issue IOCB (%x).\n", rval);
3053 } else if (abt->entry_status != 0) {
3054 ql_dbg(ql_dbg_mbx, vha, 0x108f,
3055 "Failed to complete IOCB -- error status (%x).\n",
3056 abt->entry_status);
3057 rval = QLA_FUNCTION_FAILED;
3058 } else if (abt->nport_handle != cpu_to_le16(0)) {
3059 ql_dbg(ql_dbg_mbx, vha, 0x1090,
3060 "Failed to complete IOCB -- completion status (%x).\n",
3061 le16_to_cpu(abt->nport_handle));
3062 if (abt->nport_handle == CS_IOCB_ERROR)
3063 rval = QLA_FUNCTION_PARAMETER_ERROR;
3064 else
3065 rval = QLA_FUNCTION_FAILED;
3066 } else {
3067 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
3068 "Done %s.\n", __func__);
3071 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3073 return rval;
3076 struct tsk_mgmt_cmd {
3077 union {
3078 struct tsk_mgmt_entry tsk;
3079 struct sts_entry_24xx sts;
3080 } p;
3083 static int
3084 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
3085 uint64_t l, int tag)
3087 int rval, rval2;
3088 struct tsk_mgmt_cmd *tsk;
3089 struct sts_entry_24xx *sts;
3090 dma_addr_t tsk_dma;
3091 scsi_qla_host_t *vha;
3092 struct qla_hw_data *ha;
3093 struct req_que *req;
3094 struct rsp_que *rsp;
3095 struct qla_qpair *qpair;
3097 vha = fcport->vha;
3098 ha = vha->hw;
3099 req = vha->req;
3101 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
3102 "Entered %s.\n", __func__);
3104 if (vha->vp_idx && vha->qpair) {
3105 /* NPIV port */
3106 qpair = vha->qpair;
3107 rsp = qpair->rsp;
3108 req = qpair->req;
3109 } else {
3110 rsp = req->rsp;
3113 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3114 if (tsk == NULL) {
3115 ql_log(ql_log_warn, vha, 0x1093,
3116 "Failed to allocate task management IOCB.\n");
3117 return QLA_MEMORY_ALLOC_FAILED;
3120 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
3121 tsk->p.tsk.entry_count = 1;
3122 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
3123 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
3124 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3125 tsk->p.tsk.control_flags = cpu_to_le32(type);
3126 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
3127 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
3128 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
3129 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
3130 if (type == TCF_LUN_RESET) {
3131 int_to_scsilun(l, &tsk->p.tsk.lun);
3132 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
3133 sizeof(tsk->p.tsk.lun));
3136 sts = &tsk->p.sts;
3137 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
3138 if (rval != QLA_SUCCESS) {
3139 ql_dbg(ql_dbg_mbx, vha, 0x1094,
3140 "Failed to issue %s reset IOCB (%x).\n", name, rval);
3141 } else if (sts->entry_status != 0) {
3142 ql_dbg(ql_dbg_mbx, vha, 0x1095,
3143 "Failed to complete IOCB -- error status (%x).\n",
3144 sts->entry_status);
3145 rval = QLA_FUNCTION_FAILED;
3146 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
3147 ql_dbg(ql_dbg_mbx, vha, 0x1096,
3148 "Failed to complete IOCB -- completion status (%x).\n",
3149 le16_to_cpu(sts->comp_status));
3150 rval = QLA_FUNCTION_FAILED;
3151 } else if (le16_to_cpu(sts->scsi_status) &
3152 SS_RESPONSE_INFO_LEN_VALID) {
3153 if (le32_to_cpu(sts->rsp_data_len) < 4) {
3154 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
3155 "Ignoring inconsistent data length -- not enough "
3156 "response info (%d).\n",
3157 le32_to_cpu(sts->rsp_data_len));
3158 } else if (sts->data[3]) {
3159 ql_dbg(ql_dbg_mbx, vha, 0x1098,
3160 "Failed to complete IOCB -- response (%x).\n",
3161 sts->data[3]);
3162 rval = QLA_FUNCTION_FAILED;
3166 /* Issue marker IOCB. */
3167 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
3168 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
3169 if (rval2 != QLA_SUCCESS) {
3170 ql_dbg(ql_dbg_mbx, vha, 0x1099,
3171 "Failed to issue marker IOCB (%x).\n", rval2);
3172 } else {
3173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
3174 "Done %s.\n", __func__);
3177 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3179 return rval;
3183 qla24xx_abort_target(struct fc_port *fcport, uint64_t l, int tag)
3185 struct qla_hw_data *ha = fcport->vha->hw;
3187 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3188 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
3190 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
3194 qla24xx_lun_reset(struct fc_port *fcport, uint64_t l, int tag)
3196 struct qla_hw_data *ha = fcport->vha->hw;
3198 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3199 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
3201 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
3205 qla2x00_system_error(scsi_qla_host_t *vha)
3207 int rval;
3208 mbx_cmd_t mc;
3209 mbx_cmd_t *mcp = &mc;
3210 struct qla_hw_data *ha = vha->hw;
3212 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3213 return QLA_FUNCTION_FAILED;
3215 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
3216 "Entered %s.\n", __func__);
3218 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
3219 mcp->out_mb = MBX_0;
3220 mcp->in_mb = MBX_0;
3221 mcp->tov = 5;
3222 mcp->flags = 0;
3223 rval = qla2x00_mailbox_command(vha, mcp);
3225 if (rval != QLA_SUCCESS) {
3226 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
3227 } else {
3228 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
3229 "Done %s.\n", __func__);
3232 return rval;
3236 qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
3238 int rval;
3239 mbx_cmd_t mc;
3240 mbx_cmd_t *mcp = &mc;
3242 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3243 !IS_QLA27XX(vha->hw))
3244 return QLA_FUNCTION_FAILED;
3246 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
3247 "Entered %s.\n", __func__);
3249 mcp->mb[0] = MBC_WRITE_SERDES;
3250 mcp->mb[1] = addr;
3251 if (IS_QLA2031(vha->hw))
3252 mcp->mb[2] = data & 0xff;
3253 else
3254 mcp->mb[2] = data;
3256 mcp->mb[3] = 0;
3257 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
3258 mcp->in_mb = MBX_0;
3259 mcp->tov = MBX_TOV_SECONDS;
3260 mcp->flags = 0;
3261 rval = qla2x00_mailbox_command(vha, mcp);
3263 if (rval != QLA_SUCCESS) {
3264 ql_dbg(ql_dbg_mbx, vha, 0x1183,
3265 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3266 } else {
3267 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184,
3268 "Done %s.\n", __func__);
3271 return rval;
3275 qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
3277 int rval;
3278 mbx_cmd_t mc;
3279 mbx_cmd_t *mcp = &mc;
3281 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
3282 !IS_QLA27XX(vha->hw))
3283 return QLA_FUNCTION_FAILED;
3285 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
3286 "Entered %s.\n", __func__);
3288 mcp->mb[0] = MBC_READ_SERDES;
3289 mcp->mb[1] = addr;
3290 mcp->mb[3] = 0;
3291 mcp->out_mb = MBX_3|MBX_1|MBX_0;
3292 mcp->in_mb = MBX_1|MBX_0;
3293 mcp->tov = MBX_TOV_SECONDS;
3294 mcp->flags = 0;
3295 rval = qla2x00_mailbox_command(vha, mcp);
3297 if (IS_QLA2031(vha->hw))
3298 *data = mcp->mb[1] & 0xff;
3299 else
3300 *data = mcp->mb[1];
3302 if (rval != QLA_SUCCESS) {
3303 ql_dbg(ql_dbg_mbx, vha, 0x1186,
3304 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3305 } else {
3306 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187,
3307 "Done %s.\n", __func__);
3310 return rval;
3314 qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
3316 int rval;
3317 mbx_cmd_t mc;
3318 mbx_cmd_t *mcp = &mc;
3320 if (!IS_QLA8044(vha->hw))
3321 return QLA_FUNCTION_FAILED;
3323 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x11a0,
3324 "Entered %s.\n", __func__);
3326 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3327 mcp->mb[1] = HCS_WRITE_SERDES;
3328 mcp->mb[3] = LSW(addr);
3329 mcp->mb[4] = MSW(addr);
3330 mcp->mb[5] = LSW(data);
3331 mcp->mb[6] = MSW(data);
3332 mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
3333 mcp->in_mb = MBX_0;
3334 mcp->tov = MBX_TOV_SECONDS;
3335 mcp->flags = 0;
3336 rval = qla2x00_mailbox_command(vha, mcp);
3338 if (rval != QLA_SUCCESS) {
3339 ql_dbg(ql_dbg_mbx, vha, 0x11a1,
3340 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3341 } else {
3342 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
3343 "Done %s.\n", __func__);
3346 return rval;
3350 qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
3352 int rval;
3353 mbx_cmd_t mc;
3354 mbx_cmd_t *mcp = &mc;
3356 if (!IS_QLA8044(vha->hw))
3357 return QLA_FUNCTION_FAILED;
3359 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
3360 "Entered %s.\n", __func__);
3362 mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
3363 mcp->mb[1] = HCS_READ_SERDES;
3364 mcp->mb[3] = LSW(addr);
3365 mcp->mb[4] = MSW(addr);
3366 mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
3367 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3368 mcp->tov = MBX_TOV_SECONDS;
3369 mcp->flags = 0;
3370 rval = qla2x00_mailbox_command(vha, mcp);
3372 *data = mcp->mb[2] << 16 | mcp->mb[1];
3374 if (rval != QLA_SUCCESS) {
3375 ql_dbg(ql_dbg_mbx, vha, 0x118a,
3376 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3377 } else {
3378 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
3379 "Done %s.\n", __func__);
3382 return rval;
3386 * qla2x00_set_serdes_params() -
3387 * @ha: HA context
3389 * Returns
3392 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
3393 uint16_t sw_em_2g, uint16_t sw_em_4g)
3395 int rval;
3396 mbx_cmd_t mc;
3397 mbx_cmd_t *mcp = &mc;
3399 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
3400 "Entered %s.\n", __func__);
3402 mcp->mb[0] = MBC_SERDES_PARAMS;
3403 mcp->mb[1] = BIT_0;
3404 mcp->mb[2] = sw_em_1g | BIT_15;
3405 mcp->mb[3] = sw_em_2g | BIT_15;
3406 mcp->mb[4] = sw_em_4g | BIT_15;
3407 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3408 mcp->in_mb = MBX_0;
3409 mcp->tov = MBX_TOV_SECONDS;
3410 mcp->flags = 0;
3411 rval = qla2x00_mailbox_command(vha, mcp);
3413 if (rval != QLA_SUCCESS) {
3414 /*EMPTY*/
3415 ql_dbg(ql_dbg_mbx, vha, 0x109f,
3416 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3417 } else {
3418 /*EMPTY*/
3419 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
3420 "Done %s.\n", __func__);
3423 return rval;
3427 qla2x00_stop_firmware(scsi_qla_host_t *vha)
3429 int rval;
3430 mbx_cmd_t mc;
3431 mbx_cmd_t *mcp = &mc;
3433 if (!IS_FWI2_CAPABLE(vha->hw))
3434 return QLA_FUNCTION_FAILED;
3436 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
3437 "Entered %s.\n", __func__);
3439 mcp->mb[0] = MBC_STOP_FIRMWARE;
3440 mcp->mb[1] = 0;
3441 mcp->out_mb = MBX_1|MBX_0;
3442 mcp->in_mb = MBX_0;
3443 mcp->tov = 5;
3444 mcp->flags = 0;
3445 rval = qla2x00_mailbox_command(vha, mcp);
3447 if (rval != QLA_SUCCESS) {
3448 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
3449 if (mcp->mb[0] == MBS_INVALID_COMMAND)
3450 rval = QLA_INVALID_COMMAND;
3451 } else {
3452 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
3453 "Done %s.\n", __func__);
3456 return rval;
3460 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
3461 uint16_t buffers)
3463 int rval;
3464 mbx_cmd_t mc;
3465 mbx_cmd_t *mcp = &mc;
3467 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
3468 "Entered %s.\n", __func__);
3470 if (!IS_FWI2_CAPABLE(vha->hw))
3471 return QLA_FUNCTION_FAILED;
3473 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3474 return QLA_FUNCTION_FAILED;
3476 mcp->mb[0] = MBC_TRACE_CONTROL;
3477 mcp->mb[1] = TC_EFT_ENABLE;
3478 mcp->mb[2] = LSW(eft_dma);
3479 mcp->mb[3] = MSW(eft_dma);
3480 mcp->mb[4] = LSW(MSD(eft_dma));
3481 mcp->mb[5] = MSW(MSD(eft_dma));
3482 mcp->mb[6] = buffers;
3483 mcp->mb[7] = TC_AEN_DISABLE;
3484 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3485 mcp->in_mb = MBX_1|MBX_0;
3486 mcp->tov = MBX_TOV_SECONDS;
3487 mcp->flags = 0;
3488 rval = qla2x00_mailbox_command(vha, mcp);
3489 if (rval != QLA_SUCCESS) {
3490 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
3491 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3492 rval, mcp->mb[0], mcp->mb[1]);
3493 } else {
3494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
3495 "Done %s.\n", __func__);
3498 return rval;
3502 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
3504 int rval;
3505 mbx_cmd_t mc;
3506 mbx_cmd_t *mcp = &mc;
3508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
3509 "Entered %s.\n", __func__);
3511 if (!IS_FWI2_CAPABLE(vha->hw))
3512 return QLA_FUNCTION_FAILED;
3514 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3515 return QLA_FUNCTION_FAILED;
3517 mcp->mb[0] = MBC_TRACE_CONTROL;
3518 mcp->mb[1] = TC_EFT_DISABLE;
3519 mcp->out_mb = MBX_1|MBX_0;
3520 mcp->in_mb = MBX_1|MBX_0;
3521 mcp->tov = MBX_TOV_SECONDS;
3522 mcp->flags = 0;
3523 rval = qla2x00_mailbox_command(vha, mcp);
3524 if (rval != QLA_SUCCESS) {
3525 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
3526 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3527 rval, mcp->mb[0], mcp->mb[1]);
3528 } else {
3529 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
3530 "Done %s.\n", __func__);
3533 return rval;
3537 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
3538 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
3540 int rval;
3541 mbx_cmd_t mc;
3542 mbx_cmd_t *mcp = &mc;
3544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
3545 "Entered %s.\n", __func__);
3547 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
3548 !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
3549 return QLA_FUNCTION_FAILED;
3551 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3552 return QLA_FUNCTION_FAILED;
3554 mcp->mb[0] = MBC_TRACE_CONTROL;
3555 mcp->mb[1] = TC_FCE_ENABLE;
3556 mcp->mb[2] = LSW(fce_dma);
3557 mcp->mb[3] = MSW(fce_dma);
3558 mcp->mb[4] = LSW(MSD(fce_dma));
3559 mcp->mb[5] = MSW(MSD(fce_dma));
3560 mcp->mb[6] = buffers;
3561 mcp->mb[7] = TC_AEN_DISABLE;
3562 mcp->mb[8] = 0;
3563 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
3564 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
3565 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3566 MBX_1|MBX_0;
3567 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3568 mcp->tov = MBX_TOV_SECONDS;
3569 mcp->flags = 0;
3570 rval = qla2x00_mailbox_command(vha, mcp);
3571 if (rval != QLA_SUCCESS) {
3572 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
3573 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3574 rval, mcp->mb[0], mcp->mb[1]);
3575 } else {
3576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
3577 "Done %s.\n", __func__);
3579 if (mb)
3580 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
3581 if (dwords)
3582 *dwords = buffers;
3585 return rval;
3589 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
3591 int rval;
3592 mbx_cmd_t mc;
3593 mbx_cmd_t *mcp = &mc;
3595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
3596 "Entered %s.\n", __func__);
3598 if (!IS_FWI2_CAPABLE(vha->hw))
3599 return QLA_FUNCTION_FAILED;
3601 if (unlikely(pci_channel_offline(vha->hw->pdev)))
3602 return QLA_FUNCTION_FAILED;
3604 mcp->mb[0] = MBC_TRACE_CONTROL;
3605 mcp->mb[1] = TC_FCE_DISABLE;
3606 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
3607 mcp->out_mb = MBX_2|MBX_1|MBX_0;
3608 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
3609 MBX_1|MBX_0;
3610 mcp->tov = MBX_TOV_SECONDS;
3611 mcp->flags = 0;
3612 rval = qla2x00_mailbox_command(vha, mcp);
3613 if (rval != QLA_SUCCESS) {
3614 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
3615 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3616 rval, mcp->mb[0], mcp->mb[1]);
3617 } else {
3618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
3619 "Done %s.\n", __func__);
3621 if (wr)
3622 *wr = (uint64_t) mcp->mb[5] << 48 |
3623 (uint64_t) mcp->mb[4] << 32 |
3624 (uint64_t) mcp->mb[3] << 16 |
3625 (uint64_t) mcp->mb[2];
3626 if (rd)
3627 *rd = (uint64_t) mcp->mb[9] << 48 |
3628 (uint64_t) mcp->mb[8] << 32 |
3629 (uint64_t) mcp->mb[7] << 16 |
3630 (uint64_t) mcp->mb[6];
3633 return rval;
3637 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3638 uint16_t *port_speed, uint16_t *mb)
3640 int rval;
3641 mbx_cmd_t mc;
3642 mbx_cmd_t *mcp = &mc;
3644 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3645 "Entered %s.\n", __func__);
3647 if (!IS_IIDMA_CAPABLE(vha->hw))
3648 return QLA_FUNCTION_FAILED;
3650 mcp->mb[0] = MBC_PORT_PARAMS;
3651 mcp->mb[1] = loop_id;
3652 mcp->mb[2] = mcp->mb[3] = 0;
3653 mcp->mb[9] = vha->vp_idx;
3654 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3655 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3656 mcp->tov = MBX_TOV_SECONDS;
3657 mcp->flags = 0;
3658 rval = qla2x00_mailbox_command(vha, mcp);
3660 /* Return mailbox statuses. */
3661 if (mb != NULL) {
3662 mb[0] = mcp->mb[0];
3663 mb[1] = mcp->mb[1];
3664 mb[3] = mcp->mb[3];
3667 if (rval != QLA_SUCCESS) {
3668 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
3669 } else {
3670 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3671 "Done %s.\n", __func__);
3672 if (port_speed)
3673 *port_speed = mcp->mb[3];
3676 return rval;
3680 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
3681 uint16_t port_speed, uint16_t *mb)
3683 int rval;
3684 mbx_cmd_t mc;
3685 mbx_cmd_t *mcp = &mc;
3687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3688 "Entered %s.\n", __func__);
3690 if (!IS_IIDMA_CAPABLE(vha->hw))
3691 return QLA_FUNCTION_FAILED;
3693 mcp->mb[0] = MBC_PORT_PARAMS;
3694 mcp->mb[1] = loop_id;
3695 mcp->mb[2] = BIT_0;
3696 if (IS_CNA_CAPABLE(vha->hw))
3697 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
3698 else
3699 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
3700 mcp->mb[9] = vha->vp_idx;
3701 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
3702 mcp->in_mb = MBX_3|MBX_1|MBX_0;
3703 mcp->tov = MBX_TOV_SECONDS;
3704 mcp->flags = 0;
3705 rval = qla2x00_mailbox_command(vha, mcp);
3707 /* Return mailbox statuses. */
3708 if (mb != NULL) {
3709 mb[0] = mcp->mb[0];
3710 mb[1] = mcp->mb[1];
3711 mb[3] = mcp->mb[3];
3714 if (rval != QLA_SUCCESS) {
3715 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3716 "Failed=%x.\n", rval);
3717 } else {
3718 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3719 "Done %s.\n", __func__);
3722 return rval;
3725 void
3726 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3727 struct vp_rpt_id_entry_24xx *rptid_entry)
3729 struct qla_hw_data *ha = vha->hw;
3730 scsi_qla_host_t *vp = NULL;
3731 unsigned long flags;
3732 int found;
3733 port_id_t id;
3735 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3736 "Entered %s.\n", __func__);
3738 if (rptid_entry->entry_status != 0)
3739 return;
3741 id.b.domain = rptid_entry->port_id[2];
3742 id.b.area = rptid_entry->port_id[1];
3743 id.b.al_pa = rptid_entry->port_id[0];
3744 id.b.rsvd_1 = 0;
3746 if (rptid_entry->format == 0) {
3747 /* loop */
3748 ql_dbg(ql_dbg_async, vha, 0x10b7,
3749 "Format 0 : Number of VPs setup %d, number of "
3750 "VPs acquired %d.\n", rptid_entry->vp_setup,
3751 rptid_entry->vp_acquired);
3752 ql_dbg(ql_dbg_async, vha, 0x10b8,
3753 "Primary port id %02x%02x%02x.\n",
3754 rptid_entry->port_id[2], rptid_entry->port_id[1],
3755 rptid_entry->port_id[0]);
3757 qlt_update_host_map(vha, id);
3759 } else if (rptid_entry->format == 1) {
3760 /* fabric */
3761 ql_dbg(ql_dbg_async, vha, 0x10b9,
3762 "Format 1: VP[%d] enabled - status %d - with "
3763 "port id %02x%02x%02x.\n", rptid_entry->vp_idx,
3764 rptid_entry->vp_status,
3765 rptid_entry->port_id[2], rptid_entry->port_id[1],
3766 rptid_entry->port_id[0]);
3767 ql_dbg(ql_dbg_async, vha, 0x5075,
3768 "Format 1: Remote WWPN %8phC.\n",
3769 rptid_entry->u.f1.port_name);
3771 ql_dbg(ql_dbg_async, vha, 0x5075,
3772 "Format 1: WWPN %8phC.\n",
3773 vha->port_name);
3775 /* N2N. direct connect */
3776 if (IS_QLA27XX(ha) &&
3777 ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) {
3778 /* if our portname is higher then initiate N2N login */
3779 if (wwn_to_u64(vha->port_name) >
3780 wwn_to_u64(rptid_entry->u.f1.port_name)) {
3781 // ??? qlt_update_host_map(vha, id);
3782 vha->n2n_id = 0x1;
3783 ql_dbg(ql_dbg_async, vha, 0x5075,
3784 "Format 1: Setting n2n_update_needed for id %d\n",
3785 vha->n2n_id);
3786 } else {
3787 ql_dbg(ql_dbg_async, vha, 0x5075,
3788 "Format 1: Remote login - Waiting for WWPN %8phC.\n",
3789 rptid_entry->u.f1.port_name);
3792 memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name,
3793 WWN_SIZE);
3794 set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags);
3795 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3796 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3797 return;
3800 /* buffer to buffer credit flag */
3801 vha->flags.bbcr_enable = (rptid_entry->u.f1.bbcr & 0xf) != 0;
3803 if (rptid_entry->vp_idx == 0) {
3804 if (rptid_entry->vp_status == VP_STAT_COMPL) {
3805 /* FA-WWN is only for physical port */
3806 if (qla_ini_mode_enabled(vha) &&
3807 ha->flags.fawwpn_enabled &&
3808 (rptid_entry->u.f1.flags &
3809 BIT_6)) {
3810 memcpy(vha->port_name,
3811 rptid_entry->u.f1.port_name,
3812 WWN_SIZE);
3815 qlt_update_host_map(vha, id);
3818 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
3819 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
3820 } else {
3821 if (rptid_entry->vp_status != VP_STAT_COMPL &&
3822 rptid_entry->vp_status != VP_STAT_ID_CHG) {
3823 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
3824 "Could not acquire ID for VP[%d].\n",
3825 rptid_entry->vp_idx);
3826 return;
3829 found = 0;
3830 spin_lock_irqsave(&ha->vport_slock, flags);
3831 list_for_each_entry(vp, &ha->vp_list, list) {
3832 if (rptid_entry->vp_idx == vp->vp_idx) {
3833 found = 1;
3834 break;
3837 spin_unlock_irqrestore(&ha->vport_slock, flags);
3839 if (!found)
3840 return;
3842 qlt_update_host_map(vp, id);
3845 * Cannot configure here as we are still sitting on the
3846 * response queue. Handle it in dpc context.
3848 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
3849 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
3850 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
3852 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
3853 qla2xxx_wake_dpc(vha);
3854 } else if (rptid_entry->format == 2) {
3855 ql_dbg(ql_dbg_async, vha, 0x505f,
3856 "RIDA: format 2/N2N Primary port id %02x%02x%02x.\n",
3857 rptid_entry->port_id[2], rptid_entry->port_id[1],
3858 rptid_entry->port_id[0]);
3860 ql_dbg(ql_dbg_async, vha, 0x5075,
3861 "N2N: Remote WWPN %8phC.\n",
3862 rptid_entry->u.f2.port_name);
3864 /* N2N. direct connect */
3865 vha->d_id.b.domain = rptid_entry->port_id[2];
3866 vha->d_id.b.area = rptid_entry->port_id[1];
3867 vha->d_id.b.al_pa = rptid_entry->port_id[0];
3869 spin_lock_irqsave(&ha->vport_slock, flags);
3870 qlt_update_vp_map(vha, SET_AL_PA);
3871 spin_unlock_irqrestore(&ha->vport_slock, flags);
3876 * qla24xx_modify_vp_config
3877 * Change VP configuration for vha
3879 * Input:
3880 * vha = adapter block pointer.
3882 * Returns:
3883 * qla2xxx local function return status code.
3885 * Context:
3886 * Kernel context.
3889 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3891 int rval;
3892 struct vp_config_entry_24xx *vpmod;
3893 dma_addr_t vpmod_dma;
3894 struct qla_hw_data *ha = vha->hw;
3895 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3897 /* This can be called by the parent */
3899 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3900 "Entered %s.\n", __func__);
3902 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3903 if (!vpmod) {
3904 ql_log(ql_log_warn, vha, 0x10bc,
3905 "Failed to allocate modify VP IOCB.\n");
3906 return QLA_MEMORY_ALLOC_FAILED;
3909 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
3910 vpmod->entry_count = 1;
3911 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
3912 vpmod->vp_count = 1;
3913 vpmod->vp_index1 = vha->vp_idx;
3914 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3916 qlt_modify_vp_config(vha, vpmod);
3918 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3919 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3920 vpmod->entry_count = 1;
3922 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
3923 if (rval != QLA_SUCCESS) {
3924 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
3925 "Failed to issue VP config IOCB (%x).\n", rval);
3926 } else if (vpmod->comp_status != 0) {
3927 ql_dbg(ql_dbg_mbx, vha, 0x10be,
3928 "Failed to complete IOCB -- error status (%x).\n",
3929 vpmod->comp_status);
3930 rval = QLA_FUNCTION_FAILED;
3931 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3932 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3933 "Failed to complete IOCB -- completion status (%x).\n",
3934 le16_to_cpu(vpmod->comp_status));
3935 rval = QLA_FUNCTION_FAILED;
3936 } else {
3937 /* EMPTY */
3938 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3939 "Done %s.\n", __func__);
3940 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3942 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
3944 return rval;
3948 * qla24xx_control_vp
3949 * Enable a virtual port for given host
3951 * Input:
3952 * ha = adapter block pointer.
3953 * vhba = virtual adapter (unused)
3954 * index = index number for enabled VP
3956 * Returns:
3957 * qla2xxx local function return status code.
3959 * Context:
3960 * Kernel context.
3963 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3965 int rval;
3966 int map, pos;
3967 struct vp_ctrl_entry_24xx *vce;
3968 dma_addr_t vce_dma;
3969 struct qla_hw_data *ha = vha->hw;
3970 int vp_index = vha->vp_idx;
3971 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3973 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3974 "Entered %s enabling index %d.\n", __func__, vp_index);
3976 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3977 return QLA_PARAMETER_ERROR;
3979 vce = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3980 if (!vce) {
3981 ql_log(ql_log_warn, vha, 0x10c2,
3982 "Failed to allocate VP control IOCB.\n");
3983 return QLA_MEMORY_ALLOC_FAILED;
3986 vce->entry_type = VP_CTRL_IOCB_TYPE;
3987 vce->entry_count = 1;
3988 vce->command = cpu_to_le16(cmd);
3989 vce->vp_count = cpu_to_le16(1);
3991 /* index map in firmware starts with 1; decrement index
3992 * this is ok as we never use index 0
3994 map = (vp_index - 1) / 8;
3995 pos = (vp_index - 1) & 7;
3996 mutex_lock(&ha->vport_lock);
3997 vce->vp_idx_map[map] |= 1 << pos;
3998 mutex_unlock(&ha->vport_lock);
4000 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
4001 if (rval != QLA_SUCCESS) {
4002 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
4003 "Failed to issue VP control IOCB (%x).\n", rval);
4004 } else if (vce->entry_status != 0) {
4005 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
4006 "Failed to complete IOCB -- error status (%x).\n",
4007 vce->entry_status);
4008 rval = QLA_FUNCTION_FAILED;
4009 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
4010 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
4011 "Failed to complete IOCB -- completion status (%x).\n",
4012 le16_to_cpu(vce->comp_status));
4013 rval = QLA_FUNCTION_FAILED;
4014 } else {
4015 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
4016 "Done %s.\n", __func__);
4019 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
4021 return rval;
4025 * qla2x00_send_change_request
4026 * Receive or disable RSCN request from fabric controller
4028 * Input:
4029 * ha = adapter block pointer
4030 * format = registration format:
4031 * 0 - Reserved
4032 * 1 - Fabric detected registration
4033 * 2 - N_port detected registration
4034 * 3 - Full registration
4035 * FF - clear registration
4036 * vp_idx = Virtual port index
4038 * Returns:
4039 * qla2x00 local function return status code.
4041 * Context:
4042 * Kernel Context
4046 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
4047 uint16_t vp_idx)
4049 int rval;
4050 mbx_cmd_t mc;
4051 mbx_cmd_t *mcp = &mc;
4053 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
4054 "Entered %s.\n", __func__);
4056 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
4057 mcp->mb[1] = format;
4058 mcp->mb[9] = vp_idx;
4059 mcp->out_mb = MBX_9|MBX_1|MBX_0;
4060 mcp->in_mb = MBX_0|MBX_1;
4061 mcp->tov = MBX_TOV_SECONDS;
4062 mcp->flags = 0;
4063 rval = qla2x00_mailbox_command(vha, mcp);
4065 if (rval == QLA_SUCCESS) {
4066 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
4067 rval = BIT_1;
4069 } else
4070 rval = BIT_1;
4072 return rval;
4076 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
4077 uint32_t size)
4079 int rval;
4080 mbx_cmd_t mc;
4081 mbx_cmd_t *mcp = &mc;
4083 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
4084 "Entered %s.\n", __func__);
4086 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
4087 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
4088 mcp->mb[8] = MSW(addr);
4089 mcp->out_mb = MBX_8|MBX_0;
4090 } else {
4091 mcp->mb[0] = MBC_DUMP_RISC_RAM;
4092 mcp->out_mb = MBX_0;
4094 mcp->mb[1] = LSW(addr);
4095 mcp->mb[2] = MSW(req_dma);
4096 mcp->mb[3] = LSW(req_dma);
4097 mcp->mb[6] = MSW(MSD(req_dma));
4098 mcp->mb[7] = LSW(MSD(req_dma));
4099 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
4100 if (IS_FWI2_CAPABLE(vha->hw)) {
4101 mcp->mb[4] = MSW(size);
4102 mcp->mb[5] = LSW(size);
4103 mcp->out_mb |= MBX_5|MBX_4;
4104 } else {
4105 mcp->mb[4] = LSW(size);
4106 mcp->out_mb |= MBX_4;
4109 mcp->in_mb = MBX_0;
4110 mcp->tov = MBX_TOV_SECONDS;
4111 mcp->flags = 0;
4112 rval = qla2x00_mailbox_command(vha, mcp);
4114 if (rval != QLA_SUCCESS) {
4115 ql_dbg(ql_dbg_mbx, vha, 0x1008,
4116 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4117 } else {
4118 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
4119 "Done %s.\n", __func__);
4122 return rval;
4124 /* 84XX Support **************************************************************/
4126 struct cs84xx_mgmt_cmd {
4127 union {
4128 struct verify_chip_entry_84xx req;
4129 struct verify_chip_rsp_84xx rsp;
4130 } p;
4134 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
4136 int rval, retry;
4137 struct cs84xx_mgmt_cmd *mn;
4138 dma_addr_t mn_dma;
4139 uint16_t options;
4140 unsigned long flags;
4141 struct qla_hw_data *ha = vha->hw;
4143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
4144 "Entered %s.\n", __func__);
4146 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4147 if (mn == NULL) {
4148 return QLA_MEMORY_ALLOC_FAILED;
4151 /* Force Update? */
4152 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4153 /* Diagnostic firmware? */
4154 /* options |= MENLO_DIAG_FW; */
4155 /* We update the firmware with only one data sequence. */
4156 options |= VCO_END_OF_DATA;
4158 do {
4159 retry = 0;
4160 memset(mn, 0, sizeof(*mn));
4161 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
4162 mn->p.req.entry_count = 1;
4163 mn->p.req.options = cpu_to_le16(options);
4165 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
4166 "Dump of Verify Request.\n");
4167 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
4168 (uint8_t *)mn, sizeof(*mn));
4170 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
4171 if (rval != QLA_SUCCESS) {
4172 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
4173 "Failed to issue verify IOCB (%x).\n", rval);
4174 goto verify_done;
4177 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
4178 "Dump of Verify Response.\n");
4179 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
4180 (uint8_t *)mn, sizeof(*mn));
4182 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
4183 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
4184 le16_to_cpu(mn->p.rsp.failure_code) : 0;
4185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
4186 "cs=%x fc=%x.\n", status[0], status[1]);
4188 if (status[0] != CS_COMPLETE) {
4189 rval = QLA_FUNCTION_FAILED;
4190 if (!(options & VCO_DONT_UPDATE_FW)) {
4191 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
4192 "Firmware update failed. Retrying "
4193 "without update firmware.\n");
4194 options |= VCO_DONT_UPDATE_FW;
4195 options &= ~VCO_FORCE_UPDATE;
4196 retry = 1;
4198 } else {
4199 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
4200 "Firmware updated to %x.\n",
4201 le32_to_cpu(mn->p.rsp.fw_ver));
4203 /* NOTE: we only update OP firmware. */
4204 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4205 ha->cs84xx->op_fw_version =
4206 le32_to_cpu(mn->p.rsp.fw_ver);
4207 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4208 flags);
4210 } while (retry);
4212 verify_done:
4213 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4215 if (rval != QLA_SUCCESS) {
4216 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
4217 "Failed=%x.\n", rval);
4218 } else {
4219 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
4220 "Done %s.\n", __func__);
4223 return rval;
4227 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
4229 int rval;
4230 unsigned long flags;
4231 mbx_cmd_t mc;
4232 mbx_cmd_t *mcp = &mc;
4233 struct qla_hw_data *ha = vha->hw;
4235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
4236 "Entered %s.\n", __func__);
4238 if (IS_SHADOW_REG_CAPABLE(ha))
4239 req->options |= BIT_13;
4241 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4242 mcp->mb[1] = req->options;
4243 mcp->mb[2] = MSW(LSD(req->dma));
4244 mcp->mb[3] = LSW(LSD(req->dma));
4245 mcp->mb[6] = MSW(MSD(req->dma));
4246 mcp->mb[7] = LSW(MSD(req->dma));
4247 mcp->mb[5] = req->length;
4248 if (req->rsp)
4249 mcp->mb[10] = req->rsp->id;
4250 mcp->mb[12] = req->qos;
4251 mcp->mb[11] = req->vp_idx;
4252 mcp->mb[13] = req->rid;
4253 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4254 mcp->mb[15] = 0;
4256 mcp->mb[4] = req->id;
4257 /* que in ptr index */
4258 mcp->mb[8] = 0;
4259 /* que out ptr index */
4260 mcp->mb[9] = *req->out_ptr = 0;
4261 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
4262 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4263 mcp->in_mb = MBX_0;
4264 mcp->flags = MBX_DMA_OUT;
4265 mcp->tov = MBX_TOV_SECONDS * 2;
4267 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
4268 mcp->in_mb |= MBX_1;
4269 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4270 mcp->out_mb |= MBX_15;
4271 /* debug q create issue in SR-IOV */
4272 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4275 spin_lock_irqsave(&ha->hardware_lock, flags);
4276 if (!(req->options & BIT_0)) {
4277 WRT_REG_DWORD(req->req_q_in, 0);
4278 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4279 WRT_REG_DWORD(req->req_q_out, 0);
4281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4283 rval = qla2x00_mailbox_command(vha, mcp);
4284 if (rval != QLA_SUCCESS) {
4285 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
4286 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4287 } else {
4288 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
4289 "Done %s.\n", __func__);
4292 return rval;
4296 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
4298 int rval;
4299 unsigned long flags;
4300 mbx_cmd_t mc;
4301 mbx_cmd_t *mcp = &mc;
4302 struct qla_hw_data *ha = vha->hw;
4304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
4305 "Entered %s.\n", __func__);
4307 if (IS_SHADOW_REG_CAPABLE(ha))
4308 rsp->options |= BIT_13;
4310 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
4311 mcp->mb[1] = rsp->options;
4312 mcp->mb[2] = MSW(LSD(rsp->dma));
4313 mcp->mb[3] = LSW(LSD(rsp->dma));
4314 mcp->mb[6] = MSW(MSD(rsp->dma));
4315 mcp->mb[7] = LSW(MSD(rsp->dma));
4316 mcp->mb[5] = rsp->length;
4317 mcp->mb[14] = rsp->msix->entry;
4318 mcp->mb[13] = rsp->rid;
4319 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
4320 mcp->mb[15] = 0;
4322 mcp->mb[4] = rsp->id;
4323 /* que in ptr index */
4324 mcp->mb[8] = *rsp->in_ptr = 0;
4325 /* que out ptr index */
4326 mcp->mb[9] = 0;
4327 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
4328 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4329 mcp->in_mb = MBX_0;
4330 mcp->flags = MBX_DMA_OUT;
4331 mcp->tov = MBX_TOV_SECONDS * 2;
4333 if (IS_QLA81XX(ha)) {
4334 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
4335 mcp->in_mb |= MBX_1;
4336 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
4337 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
4338 mcp->in_mb |= MBX_1;
4339 /* debug q create issue in SR-IOV */
4340 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
4343 spin_lock_irqsave(&ha->hardware_lock, flags);
4344 if (!(rsp->options & BIT_0)) {
4345 WRT_REG_DWORD(rsp->rsp_q_out, 0);
4346 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
4347 WRT_REG_DWORD(rsp->rsp_q_in, 0);
4350 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4352 rval = qla2x00_mailbox_command(vha, mcp);
4353 if (rval != QLA_SUCCESS) {
4354 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
4355 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4356 } else {
4357 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
4358 "Done %s.\n", __func__);
4361 return rval;
4365 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
4367 int rval;
4368 mbx_cmd_t mc;
4369 mbx_cmd_t *mcp = &mc;
4371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
4372 "Entered %s.\n", __func__);
4374 mcp->mb[0] = MBC_IDC_ACK;
4375 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
4376 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4377 mcp->in_mb = MBX_0;
4378 mcp->tov = MBX_TOV_SECONDS;
4379 mcp->flags = 0;
4380 rval = qla2x00_mailbox_command(vha, mcp);
4382 if (rval != QLA_SUCCESS) {
4383 ql_dbg(ql_dbg_mbx, vha, 0x10da,
4384 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4385 } else {
4386 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
4387 "Done %s.\n", __func__);
4390 return rval;
4394 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
4396 int rval;
4397 mbx_cmd_t mc;
4398 mbx_cmd_t *mcp = &mc;
4400 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
4401 "Entered %s.\n", __func__);
4403 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4404 !IS_QLA27XX(vha->hw))
4405 return QLA_FUNCTION_FAILED;
4407 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4408 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
4409 mcp->out_mb = MBX_1|MBX_0;
4410 mcp->in_mb = MBX_1|MBX_0;
4411 mcp->tov = MBX_TOV_SECONDS;
4412 mcp->flags = 0;
4413 rval = qla2x00_mailbox_command(vha, mcp);
4415 if (rval != QLA_SUCCESS) {
4416 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
4417 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4418 rval, mcp->mb[0], mcp->mb[1]);
4419 } else {
4420 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
4421 "Done %s.\n", __func__);
4422 *sector_size = mcp->mb[1];
4425 return rval;
4429 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
4431 int rval;
4432 mbx_cmd_t mc;
4433 mbx_cmd_t *mcp = &mc;
4435 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4436 !IS_QLA27XX(vha->hw))
4437 return QLA_FUNCTION_FAILED;
4439 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
4440 "Entered %s.\n", __func__);
4442 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4443 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
4444 FAC_OPT_CMD_WRITE_PROTECT;
4445 mcp->out_mb = MBX_1|MBX_0;
4446 mcp->in_mb = MBX_1|MBX_0;
4447 mcp->tov = MBX_TOV_SECONDS;
4448 mcp->flags = 0;
4449 rval = qla2x00_mailbox_command(vha, mcp);
4451 if (rval != QLA_SUCCESS) {
4452 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
4453 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4454 rval, mcp->mb[0], mcp->mb[1]);
4455 } else {
4456 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
4457 "Done %s.\n", __func__);
4460 return rval;
4464 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
4466 int rval;
4467 mbx_cmd_t mc;
4468 mbx_cmd_t *mcp = &mc;
4470 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
4471 !IS_QLA27XX(vha->hw))
4472 return QLA_FUNCTION_FAILED;
4474 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
4475 "Entered %s.\n", __func__);
4477 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
4478 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
4479 mcp->mb[2] = LSW(start);
4480 mcp->mb[3] = MSW(start);
4481 mcp->mb[4] = LSW(finish);
4482 mcp->mb[5] = MSW(finish);
4483 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4484 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4485 mcp->tov = MBX_TOV_SECONDS;
4486 mcp->flags = 0;
4487 rval = qla2x00_mailbox_command(vha, mcp);
4489 if (rval != QLA_SUCCESS) {
4490 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
4491 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4492 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4493 } else {
4494 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
4495 "Done %s.\n", __func__);
4498 return rval;
4502 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
4504 int rval = 0;
4505 mbx_cmd_t mc;
4506 mbx_cmd_t *mcp = &mc;
4508 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
4509 "Entered %s.\n", __func__);
4511 mcp->mb[0] = MBC_RESTART_MPI_FW;
4512 mcp->out_mb = MBX_0;
4513 mcp->in_mb = MBX_0|MBX_1;
4514 mcp->tov = MBX_TOV_SECONDS;
4515 mcp->flags = 0;
4516 rval = qla2x00_mailbox_command(vha, mcp);
4518 if (rval != QLA_SUCCESS) {
4519 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
4520 "Failed=%x mb[0]=%x mb[1]=%x.\n",
4521 rval, mcp->mb[0], mcp->mb[1]);
4522 } else {
4523 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
4524 "Done %s.\n", __func__);
4527 return rval;
4531 qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4533 int rval;
4534 mbx_cmd_t mc;
4535 mbx_cmd_t *mcp = &mc;
4536 int i;
4537 int len;
4538 uint16_t *str;
4539 struct qla_hw_data *ha = vha->hw;
4541 if (!IS_P3P_TYPE(ha))
4542 return QLA_FUNCTION_FAILED;
4544 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
4545 "Entered %s.\n", __func__);
4547 str = (void *)version;
4548 len = strlen(version);
4550 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4551 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
4552 mcp->out_mb = MBX_1|MBX_0;
4553 for (i = 4; i < 16 && len; i++, str++, len -= 2) {
4554 mcp->mb[i] = cpu_to_le16p(str);
4555 mcp->out_mb |= 1<<i;
4557 for (; i < 16; i++) {
4558 mcp->mb[i] = 0;
4559 mcp->out_mb |= 1<<i;
4561 mcp->in_mb = MBX_1|MBX_0;
4562 mcp->tov = MBX_TOV_SECONDS;
4563 mcp->flags = 0;
4564 rval = qla2x00_mailbox_command(vha, mcp);
4566 if (rval != QLA_SUCCESS) {
4567 ql_dbg(ql_dbg_mbx, vha, 0x117c,
4568 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4569 } else {
4570 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
4571 "Done %s.\n", __func__);
4574 return rval;
4578 qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
4580 int rval;
4581 mbx_cmd_t mc;
4582 mbx_cmd_t *mcp = &mc;
4583 int len;
4584 uint16_t dwlen;
4585 uint8_t *str;
4586 dma_addr_t str_dma;
4587 struct qla_hw_data *ha = vha->hw;
4589 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4590 IS_P3P_TYPE(ha))
4591 return QLA_FUNCTION_FAILED;
4593 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
4594 "Entered %s.\n", __func__);
4596 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4597 if (!str) {
4598 ql_log(ql_log_warn, vha, 0x117f,
4599 "Failed to allocate driver version param.\n");
4600 return QLA_MEMORY_ALLOC_FAILED;
4603 memcpy(str, "\x7\x3\x11\x0", 4);
4604 dwlen = str[0];
4605 len = dwlen * 4 - 4;
4606 memset(str + 4, 0, len);
4607 if (len > strlen(version))
4608 len = strlen(version);
4609 memcpy(str + 4, version, len);
4611 mcp->mb[0] = MBC_SET_RNID_PARAMS;
4612 mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
4613 mcp->mb[2] = MSW(LSD(str_dma));
4614 mcp->mb[3] = LSW(LSD(str_dma));
4615 mcp->mb[6] = MSW(MSD(str_dma));
4616 mcp->mb[7] = LSW(MSD(str_dma));
4617 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4618 mcp->in_mb = MBX_1|MBX_0;
4619 mcp->tov = MBX_TOV_SECONDS;
4620 mcp->flags = 0;
4621 rval = qla2x00_mailbox_command(vha, mcp);
4623 if (rval != QLA_SUCCESS) {
4624 ql_dbg(ql_dbg_mbx, vha, 0x1180,
4625 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4626 } else {
4627 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
4628 "Done %s.\n", __func__);
4631 dma_pool_free(ha->s_dma_pool, str, str_dma);
4633 return rval;
4637 qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma,
4638 void *buf, uint16_t bufsiz)
4640 int rval, i;
4641 mbx_cmd_t mc;
4642 mbx_cmd_t *mcp = &mc;
4643 uint32_t *bp;
4645 if (!IS_FWI2_CAPABLE(vha->hw))
4646 return QLA_FUNCTION_FAILED;
4648 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4649 "Entered %s.\n", __func__);
4651 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4652 mcp->mb[1] = RNID_TYPE_PORT_LOGIN << 8;
4653 mcp->mb[2] = MSW(buf_dma);
4654 mcp->mb[3] = LSW(buf_dma);
4655 mcp->mb[6] = MSW(MSD(buf_dma));
4656 mcp->mb[7] = LSW(MSD(buf_dma));
4657 mcp->mb[8] = bufsiz/4;
4658 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4659 mcp->in_mb = MBX_1|MBX_0;
4660 mcp->tov = MBX_TOV_SECONDS;
4661 mcp->flags = 0;
4662 rval = qla2x00_mailbox_command(vha, mcp);
4664 if (rval != QLA_SUCCESS) {
4665 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4666 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4667 } else {
4668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4669 "Done %s.\n", __func__);
4670 bp = (uint32_t *) buf;
4671 for (i = 0; i < (bufsiz-4)/4; i++, bp++)
4672 *bp = cpu_to_be32(*bp);
4675 return rval;
4678 static int
4679 qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
4681 int rval;
4682 mbx_cmd_t mc;
4683 mbx_cmd_t *mcp = &mc;
4685 if (!IS_FWI2_CAPABLE(vha->hw))
4686 return QLA_FUNCTION_FAILED;
4688 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
4689 "Entered %s.\n", __func__);
4691 mcp->mb[0] = MBC_GET_RNID_PARAMS;
4692 mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
4693 mcp->out_mb = MBX_1|MBX_0;
4694 mcp->in_mb = MBX_1|MBX_0;
4695 mcp->tov = MBX_TOV_SECONDS;
4696 mcp->flags = 0;
4697 rval = qla2x00_mailbox_command(vha, mcp);
4698 *temp = mcp->mb[1];
4700 if (rval != QLA_SUCCESS) {
4701 ql_dbg(ql_dbg_mbx, vha, 0x115a,
4702 "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
4703 } else {
4704 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
4705 "Done %s.\n", __func__);
4708 return rval;
4712 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4713 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4715 int rval;
4716 mbx_cmd_t mc;
4717 mbx_cmd_t *mcp = &mc;
4718 struct qla_hw_data *ha = vha->hw;
4720 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
4721 "Entered %s.\n", __func__);
4723 if (!IS_FWI2_CAPABLE(ha))
4724 return QLA_FUNCTION_FAILED;
4726 if (len == 1)
4727 opt |= BIT_0;
4729 mcp->mb[0] = MBC_READ_SFP;
4730 mcp->mb[1] = dev;
4731 mcp->mb[2] = MSW(sfp_dma);
4732 mcp->mb[3] = LSW(sfp_dma);
4733 mcp->mb[6] = MSW(MSD(sfp_dma));
4734 mcp->mb[7] = LSW(MSD(sfp_dma));
4735 mcp->mb[8] = len;
4736 mcp->mb[9] = off;
4737 mcp->mb[10] = opt;
4738 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4739 mcp->in_mb = MBX_1|MBX_0;
4740 mcp->tov = MBX_TOV_SECONDS;
4741 mcp->flags = 0;
4742 rval = qla2x00_mailbox_command(vha, mcp);
4744 if (opt & BIT_0)
4745 *sfp = mcp->mb[1];
4747 if (rval != QLA_SUCCESS) {
4748 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
4749 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4750 if (mcp->mb[0] == MBS_COMMAND_ERROR &&
4751 mcp->mb[1] == 0x22)
4752 /* sfp is not there */
4753 rval = QLA_INTERFACE_ERROR;
4754 } else {
4755 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
4756 "Done %s.\n", __func__);
4759 return rval;
4763 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
4764 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
4766 int rval;
4767 mbx_cmd_t mc;
4768 mbx_cmd_t *mcp = &mc;
4769 struct qla_hw_data *ha = vha->hw;
4771 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
4772 "Entered %s.\n", __func__);
4774 if (!IS_FWI2_CAPABLE(ha))
4775 return QLA_FUNCTION_FAILED;
4777 if (len == 1)
4778 opt |= BIT_0;
4780 if (opt & BIT_0)
4781 len = *sfp;
4783 mcp->mb[0] = MBC_WRITE_SFP;
4784 mcp->mb[1] = dev;
4785 mcp->mb[2] = MSW(sfp_dma);
4786 mcp->mb[3] = LSW(sfp_dma);
4787 mcp->mb[6] = MSW(MSD(sfp_dma));
4788 mcp->mb[7] = LSW(MSD(sfp_dma));
4789 mcp->mb[8] = len;
4790 mcp->mb[9] = off;
4791 mcp->mb[10] = opt;
4792 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4793 mcp->in_mb = MBX_1|MBX_0;
4794 mcp->tov = MBX_TOV_SECONDS;
4795 mcp->flags = 0;
4796 rval = qla2x00_mailbox_command(vha, mcp);
4798 if (rval != QLA_SUCCESS) {
4799 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
4800 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4801 } else {
4802 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
4803 "Done %s.\n", __func__);
4806 return rval;
4810 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
4811 uint16_t size_in_bytes, uint16_t *actual_size)
4813 int rval;
4814 mbx_cmd_t mc;
4815 mbx_cmd_t *mcp = &mc;
4817 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
4818 "Entered %s.\n", __func__);
4820 if (!IS_CNA_CAPABLE(vha->hw))
4821 return QLA_FUNCTION_FAILED;
4823 mcp->mb[0] = MBC_GET_XGMAC_STATS;
4824 mcp->mb[2] = MSW(stats_dma);
4825 mcp->mb[3] = LSW(stats_dma);
4826 mcp->mb[6] = MSW(MSD(stats_dma));
4827 mcp->mb[7] = LSW(MSD(stats_dma));
4828 mcp->mb[8] = size_in_bytes >> 2;
4829 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
4830 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4831 mcp->tov = MBX_TOV_SECONDS;
4832 mcp->flags = 0;
4833 rval = qla2x00_mailbox_command(vha, mcp);
4835 if (rval != QLA_SUCCESS) {
4836 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
4837 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4838 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4839 } else {
4840 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
4841 "Done %s.\n", __func__);
4844 *actual_size = mcp->mb[2] << 2;
4847 return rval;
4851 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
4852 uint16_t size)
4854 int rval;
4855 mbx_cmd_t mc;
4856 mbx_cmd_t *mcp = &mc;
4858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
4859 "Entered %s.\n", __func__);
4861 if (!IS_CNA_CAPABLE(vha->hw))
4862 return QLA_FUNCTION_FAILED;
4864 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
4865 mcp->mb[1] = 0;
4866 mcp->mb[2] = MSW(tlv_dma);
4867 mcp->mb[3] = LSW(tlv_dma);
4868 mcp->mb[6] = MSW(MSD(tlv_dma));
4869 mcp->mb[7] = LSW(MSD(tlv_dma));
4870 mcp->mb[8] = size;
4871 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
4872 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4873 mcp->tov = MBX_TOV_SECONDS;
4874 mcp->flags = 0;
4875 rval = qla2x00_mailbox_command(vha, mcp);
4877 if (rval != QLA_SUCCESS) {
4878 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
4879 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
4880 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
4881 } else {
4882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
4883 "Done %s.\n", __func__);
4886 return rval;
4890 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
4892 int rval;
4893 mbx_cmd_t mc;
4894 mbx_cmd_t *mcp = &mc;
4896 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4897 "Entered %s.\n", __func__);
4899 if (!IS_FWI2_CAPABLE(vha->hw))
4900 return QLA_FUNCTION_FAILED;
4902 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
4903 mcp->mb[1] = LSW(risc_addr);
4904 mcp->mb[8] = MSW(risc_addr);
4905 mcp->out_mb = MBX_8|MBX_1|MBX_0;
4906 mcp->in_mb = MBX_3|MBX_2|MBX_0;
4907 mcp->tov = 30;
4908 mcp->flags = 0;
4909 rval = qla2x00_mailbox_command(vha, mcp);
4910 if (rval != QLA_SUCCESS) {
4911 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
4912 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4913 } else {
4914 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4915 "Done %s.\n", __func__);
4916 *data = mcp->mb[3] << 16 | mcp->mb[2];
4919 return rval;
4923 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4924 uint16_t *mresp)
4926 int rval;
4927 mbx_cmd_t mc;
4928 mbx_cmd_t *mcp = &mc;
4930 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4931 "Entered %s.\n", __func__);
4933 memset(mcp->mb, 0 , sizeof(mcp->mb));
4934 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
4935 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
4937 /* transfer count */
4938 mcp->mb[10] = LSW(mreq->transfer_size);
4939 mcp->mb[11] = MSW(mreq->transfer_size);
4941 /* send data address */
4942 mcp->mb[14] = LSW(mreq->send_dma);
4943 mcp->mb[15] = MSW(mreq->send_dma);
4944 mcp->mb[20] = LSW(MSD(mreq->send_dma));
4945 mcp->mb[21] = MSW(MSD(mreq->send_dma));
4947 /* receive data address */
4948 mcp->mb[16] = LSW(mreq->rcv_dma);
4949 mcp->mb[17] = MSW(mreq->rcv_dma);
4950 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
4951 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
4953 /* Iteration count */
4954 mcp->mb[18] = LSW(mreq->iteration_count);
4955 mcp->mb[19] = MSW(mreq->iteration_count);
4957 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
4958 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
4959 if (IS_CNA_CAPABLE(vha->hw))
4960 mcp->out_mb |= MBX_2;
4961 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
4963 mcp->buf_size = mreq->transfer_size;
4964 mcp->tov = MBX_TOV_SECONDS;
4965 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4967 rval = qla2x00_mailbox_command(vha, mcp);
4969 if (rval != QLA_SUCCESS) {
4970 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
4971 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
4972 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
4973 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
4974 } else {
4975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4976 "Done %s.\n", __func__);
4979 /* Copy mailbox information */
4980 memcpy( mresp, mcp->mb, 64);
4981 return rval;
4985 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
4986 uint16_t *mresp)
4988 int rval;
4989 mbx_cmd_t mc;
4990 mbx_cmd_t *mcp = &mc;
4991 struct qla_hw_data *ha = vha->hw;
4993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4994 "Entered %s.\n", __func__);
4996 memset(mcp->mb, 0 , sizeof(mcp->mb));
4997 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
4998 /* BIT_6 specifies 64bit address */
4999 mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
5000 if (IS_CNA_CAPABLE(ha)) {
5001 mcp->mb[2] = vha->fcoe_fcf_idx;
5003 mcp->mb[16] = LSW(mreq->rcv_dma);
5004 mcp->mb[17] = MSW(mreq->rcv_dma);
5005 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
5006 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
5008 mcp->mb[10] = LSW(mreq->transfer_size);
5010 mcp->mb[14] = LSW(mreq->send_dma);
5011 mcp->mb[15] = MSW(mreq->send_dma);
5012 mcp->mb[20] = LSW(MSD(mreq->send_dma));
5013 mcp->mb[21] = MSW(MSD(mreq->send_dma));
5015 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
5016 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
5017 if (IS_CNA_CAPABLE(ha))
5018 mcp->out_mb |= MBX_2;
5020 mcp->in_mb = MBX_0;
5021 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5022 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5023 mcp->in_mb |= MBX_1;
5024 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
5025 mcp->in_mb |= MBX_3;
5027 mcp->tov = MBX_TOV_SECONDS;
5028 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5029 mcp->buf_size = mreq->transfer_size;
5031 rval = qla2x00_mailbox_command(vha, mcp);
5033 if (rval != QLA_SUCCESS) {
5034 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
5035 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5036 rval, mcp->mb[0], mcp->mb[1]);
5037 } else {
5038 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
5039 "Done %s.\n", __func__);
5042 /* Copy mailbox information */
5043 memcpy(mresp, mcp->mb, 64);
5044 return rval;
5048 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
5050 int rval;
5051 mbx_cmd_t mc;
5052 mbx_cmd_t *mcp = &mc;
5054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
5055 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
5057 mcp->mb[0] = MBC_ISP84XX_RESET;
5058 mcp->mb[1] = enable_diagnostic;
5059 mcp->out_mb = MBX_1|MBX_0;
5060 mcp->in_mb = MBX_1|MBX_0;
5061 mcp->tov = MBX_TOV_SECONDS;
5062 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5063 rval = qla2x00_mailbox_command(vha, mcp);
5065 if (rval != QLA_SUCCESS)
5066 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
5067 else
5068 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
5069 "Done %s.\n", __func__);
5071 return rval;
5075 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
5077 int rval;
5078 mbx_cmd_t mc;
5079 mbx_cmd_t *mcp = &mc;
5081 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
5082 "Entered %s.\n", __func__);
5084 if (!IS_FWI2_CAPABLE(vha->hw))
5085 return QLA_FUNCTION_FAILED;
5087 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
5088 mcp->mb[1] = LSW(risc_addr);
5089 mcp->mb[2] = LSW(data);
5090 mcp->mb[3] = MSW(data);
5091 mcp->mb[8] = MSW(risc_addr);
5092 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
5093 mcp->in_mb = MBX_0;
5094 mcp->tov = 30;
5095 mcp->flags = 0;
5096 rval = qla2x00_mailbox_command(vha, mcp);
5097 if (rval != QLA_SUCCESS) {
5098 ql_dbg(ql_dbg_mbx, vha, 0x1101,
5099 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5100 } else {
5101 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
5102 "Done %s.\n", __func__);
5105 return rval;
5109 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
5111 int rval;
5112 uint32_t stat, timer;
5113 uint16_t mb0 = 0;
5114 struct qla_hw_data *ha = vha->hw;
5115 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5117 rval = QLA_SUCCESS;
5119 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
5120 "Entered %s.\n", __func__);
5122 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5124 /* Write the MBC data to the registers */
5125 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
5126 WRT_REG_WORD(&reg->mailbox1, mb[0]);
5127 WRT_REG_WORD(&reg->mailbox2, mb[1]);
5128 WRT_REG_WORD(&reg->mailbox3, mb[2]);
5129 WRT_REG_WORD(&reg->mailbox4, mb[3]);
5131 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
5133 /* Poll for MBC interrupt */
5134 for (timer = 6000000; timer; timer--) {
5135 /* Check for pending interrupts. */
5136 stat = RD_REG_DWORD(&reg->host_status);
5137 if (stat & HSRX_RISC_INT) {
5138 stat &= 0xff;
5140 if (stat == 0x1 || stat == 0x2 ||
5141 stat == 0x10 || stat == 0x11) {
5142 set_bit(MBX_INTERRUPT,
5143 &ha->mbx_cmd_flags);
5144 mb0 = RD_REG_WORD(&reg->mailbox0);
5145 WRT_REG_DWORD(&reg->hccr,
5146 HCCRX_CLR_RISC_INT);
5147 RD_REG_DWORD(&reg->hccr);
5148 break;
5151 udelay(5);
5154 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5155 rval = mb0 & MBS_MASK;
5156 else
5157 rval = QLA_FUNCTION_FAILED;
5159 if (rval != QLA_SUCCESS) {
5160 ql_dbg(ql_dbg_mbx, vha, 0x1104,
5161 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
5162 } else {
5163 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
5164 "Done %s.\n", __func__);
5167 return rval;
5171 qla2x00_get_data_rate(scsi_qla_host_t *vha)
5173 int rval;
5174 mbx_cmd_t mc;
5175 mbx_cmd_t *mcp = &mc;
5176 struct qla_hw_data *ha = vha->hw;
5178 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
5179 "Entered %s.\n", __func__);
5181 if (!IS_FWI2_CAPABLE(ha))
5182 return QLA_FUNCTION_FAILED;
5184 mcp->mb[0] = MBC_DATA_RATE;
5185 mcp->mb[1] = 0;
5186 mcp->out_mb = MBX_1|MBX_0;
5187 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5188 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
5189 mcp->in_mb |= MBX_3;
5190 mcp->tov = MBX_TOV_SECONDS;
5191 mcp->flags = 0;
5192 rval = qla2x00_mailbox_command(vha, mcp);
5193 if (rval != QLA_SUCCESS) {
5194 ql_dbg(ql_dbg_mbx, vha, 0x1107,
5195 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5196 } else {
5197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
5198 "Done %s.\n", __func__);
5199 if (mcp->mb[1] != 0x7)
5200 ha->link_data_rate = mcp->mb[1];
5203 return rval;
5207 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5209 int rval;
5210 mbx_cmd_t mc;
5211 mbx_cmd_t *mcp = &mc;
5212 struct qla_hw_data *ha = vha->hw;
5214 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
5215 "Entered %s.\n", __func__);
5217 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5218 !IS_QLA27XX(ha))
5219 return QLA_FUNCTION_FAILED;
5220 mcp->mb[0] = MBC_GET_PORT_CONFIG;
5221 mcp->out_mb = MBX_0;
5222 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5223 mcp->tov = MBX_TOV_SECONDS;
5224 mcp->flags = 0;
5226 rval = qla2x00_mailbox_command(vha, mcp);
5228 if (rval != QLA_SUCCESS) {
5229 ql_dbg(ql_dbg_mbx, vha, 0x110a,
5230 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5231 } else {
5232 /* Copy all bits to preserve original value */
5233 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
5235 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
5236 "Done %s.\n", __func__);
5238 return rval;
5242 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
5244 int rval;
5245 mbx_cmd_t mc;
5246 mbx_cmd_t *mcp = &mc;
5248 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
5249 "Entered %s.\n", __func__);
5251 mcp->mb[0] = MBC_SET_PORT_CONFIG;
5252 /* Copy all bits to preserve original setting */
5253 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
5254 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5255 mcp->in_mb = MBX_0;
5256 mcp->tov = MBX_TOV_SECONDS;
5257 mcp->flags = 0;
5258 rval = qla2x00_mailbox_command(vha, mcp);
5260 if (rval != QLA_SUCCESS) {
5261 ql_dbg(ql_dbg_mbx, vha, 0x110d,
5262 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5263 } else
5264 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
5265 "Done %s.\n", __func__);
5267 return rval;
5272 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
5273 uint16_t *mb)
5275 int rval;
5276 mbx_cmd_t mc;
5277 mbx_cmd_t *mcp = &mc;
5278 struct qla_hw_data *ha = vha->hw;
5280 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
5281 "Entered %s.\n", __func__);
5283 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5284 return QLA_FUNCTION_FAILED;
5286 mcp->mb[0] = MBC_PORT_PARAMS;
5287 mcp->mb[1] = loop_id;
5288 if (ha->flags.fcp_prio_enabled)
5289 mcp->mb[2] = BIT_1;
5290 else
5291 mcp->mb[2] = BIT_2;
5292 mcp->mb[4] = priority & 0xf;
5293 mcp->mb[9] = vha->vp_idx;
5294 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5295 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5296 mcp->tov = 30;
5297 mcp->flags = 0;
5298 rval = qla2x00_mailbox_command(vha, mcp);
5299 if (mb != NULL) {
5300 mb[0] = mcp->mb[0];
5301 mb[1] = mcp->mb[1];
5302 mb[3] = mcp->mb[3];
5303 mb[4] = mcp->mb[4];
5306 if (rval != QLA_SUCCESS) {
5307 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
5308 } else {
5309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
5310 "Done %s.\n", __func__);
5313 return rval;
5317 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
5319 int rval = QLA_FUNCTION_FAILED;
5320 struct qla_hw_data *ha = vha->hw;
5321 uint8_t byte;
5323 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5324 ql_dbg(ql_dbg_mbx, vha, 0x1150,
5325 "Thermal not supported by this card.\n");
5326 return rval;
5329 if (IS_QLA25XX(ha)) {
5330 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5331 ha->pdev->subsystem_device == 0x0175) {
5332 rval = qla2x00_read_sfp(vha, 0, &byte,
5333 0x98, 0x1, 1, BIT_13|BIT_0);
5334 *temp = byte;
5335 return rval;
5337 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5338 ha->pdev->subsystem_device == 0x338e) {
5339 rval = qla2x00_read_sfp(vha, 0, &byte,
5340 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
5341 *temp = byte;
5342 return rval;
5344 ql_dbg(ql_dbg_mbx, vha, 0x10c9,
5345 "Thermal not supported by this card.\n");
5346 return rval;
5349 if (IS_QLA82XX(ha)) {
5350 *temp = qla82xx_read_temperature(vha);
5351 rval = QLA_SUCCESS;
5352 return rval;
5353 } else if (IS_QLA8044(ha)) {
5354 *temp = qla8044_read_temperature(vha);
5355 rval = QLA_SUCCESS;
5356 return rval;
5359 rval = qla2x00_read_asic_temperature(vha, temp);
5360 return rval;
5364 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
5366 int rval;
5367 struct qla_hw_data *ha = vha->hw;
5368 mbx_cmd_t mc;
5369 mbx_cmd_t *mcp = &mc;
5371 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
5372 "Entered %s.\n", __func__);
5374 if (!IS_FWI2_CAPABLE(ha))
5375 return QLA_FUNCTION_FAILED;
5377 memset(mcp, 0, sizeof(mbx_cmd_t));
5378 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5379 mcp->mb[1] = 1;
5381 mcp->out_mb = MBX_1|MBX_0;
5382 mcp->in_mb = MBX_0;
5383 mcp->tov = 30;
5384 mcp->flags = 0;
5386 rval = qla2x00_mailbox_command(vha, mcp);
5387 if (rval != QLA_SUCCESS) {
5388 ql_dbg(ql_dbg_mbx, vha, 0x1016,
5389 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5390 } else {
5391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
5392 "Done %s.\n", __func__);
5395 return rval;
5399 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
5401 int rval;
5402 struct qla_hw_data *ha = vha->hw;
5403 mbx_cmd_t mc;
5404 mbx_cmd_t *mcp = &mc;
5406 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
5407 "Entered %s.\n", __func__);
5409 if (!IS_P3P_TYPE(ha))
5410 return QLA_FUNCTION_FAILED;
5412 memset(mcp, 0, sizeof(mbx_cmd_t));
5413 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
5414 mcp->mb[1] = 0;
5416 mcp->out_mb = MBX_1|MBX_0;
5417 mcp->in_mb = MBX_0;
5418 mcp->tov = 30;
5419 mcp->flags = 0;
5421 rval = qla2x00_mailbox_command(vha, mcp);
5422 if (rval != QLA_SUCCESS) {
5423 ql_dbg(ql_dbg_mbx, vha, 0x100c,
5424 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5425 } else {
5426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
5427 "Done %s.\n", __func__);
5430 return rval;
5434 qla82xx_md_get_template_size(scsi_qla_host_t *vha)
5436 struct qla_hw_data *ha = vha->hw;
5437 mbx_cmd_t mc;
5438 mbx_cmd_t *mcp = &mc;
5439 int rval = QLA_FUNCTION_FAILED;
5441 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
5442 "Entered %s.\n", __func__);
5444 memset(mcp->mb, 0 , sizeof(mcp->mb));
5445 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5446 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5447 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
5448 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
5450 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5451 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
5452 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5454 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5455 mcp->tov = MBX_TOV_SECONDS;
5456 rval = qla2x00_mailbox_command(vha, mcp);
5458 /* Always copy back return mailbox values. */
5459 if (rval != QLA_SUCCESS) {
5460 ql_dbg(ql_dbg_mbx, vha, 0x1120,
5461 "mailbox command FAILED=0x%x, subcode=%x.\n",
5462 (mcp->mb[1] << 16) | mcp->mb[0],
5463 (mcp->mb[3] << 16) | mcp->mb[2]);
5464 } else {
5465 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
5466 "Done %s.\n", __func__);
5467 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5468 if (!ha->md_template_size) {
5469 ql_dbg(ql_dbg_mbx, vha, 0x1122,
5470 "Null template size obtained.\n");
5471 rval = QLA_FUNCTION_FAILED;
5474 return rval;
5478 qla82xx_md_get_template(scsi_qla_host_t *vha)
5480 struct qla_hw_data *ha = vha->hw;
5481 mbx_cmd_t mc;
5482 mbx_cmd_t *mcp = &mc;
5483 int rval = QLA_FUNCTION_FAILED;
5485 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
5486 "Entered %s.\n", __func__);
5488 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5489 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5490 if (!ha->md_tmplt_hdr) {
5491 ql_log(ql_log_warn, vha, 0x1124,
5492 "Unable to allocate memory for Minidump template.\n");
5493 return rval;
5496 memset(mcp->mb, 0 , sizeof(mcp->mb));
5497 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5498 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5499 mcp->mb[2] = LSW(RQST_TMPLT);
5500 mcp->mb[3] = MSW(RQST_TMPLT);
5501 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5502 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5503 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5504 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5505 mcp->mb[8] = LSW(ha->md_template_size);
5506 mcp->mb[9] = MSW(ha->md_template_size);
5508 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5509 mcp->tov = MBX_TOV_SECONDS;
5510 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5511 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5512 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5513 rval = qla2x00_mailbox_command(vha, mcp);
5515 if (rval != QLA_SUCCESS) {
5516 ql_dbg(ql_dbg_mbx, vha, 0x1125,
5517 "mailbox command FAILED=0x%x, subcode=%x.\n",
5518 ((mcp->mb[1] << 16) | mcp->mb[0]),
5519 ((mcp->mb[3] << 16) | mcp->mb[2]));
5520 } else
5521 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
5522 "Done %s.\n", __func__);
5523 return rval;
5527 qla8044_md_get_template(scsi_qla_host_t *vha)
5529 struct qla_hw_data *ha = vha->hw;
5530 mbx_cmd_t mc;
5531 mbx_cmd_t *mcp = &mc;
5532 int rval = QLA_FUNCTION_FAILED;
5533 int offset = 0, size = MINIDUMP_SIZE_36K;
5534 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
5535 "Entered %s.\n", __func__);
5537 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5538 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5539 if (!ha->md_tmplt_hdr) {
5540 ql_log(ql_log_warn, vha, 0xb11b,
5541 "Unable to allocate memory for Minidump template.\n");
5542 return rval;
5545 memset(mcp->mb, 0 , sizeof(mcp->mb));
5546 while (offset < ha->md_template_size) {
5547 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5548 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
5549 mcp->mb[2] = LSW(RQST_TMPLT);
5550 mcp->mb[3] = MSW(RQST_TMPLT);
5551 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
5552 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
5553 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
5554 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
5555 mcp->mb[8] = LSW(size);
5556 mcp->mb[9] = MSW(size);
5557 mcp->mb[10] = offset & 0x0000FFFF;
5558 mcp->mb[11] = offset & 0xFFFF0000;
5559 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
5560 mcp->tov = MBX_TOV_SECONDS;
5561 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
5562 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5563 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5564 rval = qla2x00_mailbox_command(vha, mcp);
5566 if (rval != QLA_SUCCESS) {
5567 ql_dbg(ql_dbg_mbx, vha, 0xb11c,
5568 "mailbox command FAILED=0x%x, subcode=%x.\n",
5569 ((mcp->mb[1] << 16) | mcp->mb[0]),
5570 ((mcp->mb[3] << 16) | mcp->mb[2]));
5571 return rval;
5572 } else
5573 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
5574 "Done %s.\n", __func__);
5575 offset = offset + size;
5577 return rval;
5581 qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5583 int rval;
5584 struct qla_hw_data *ha = vha->hw;
5585 mbx_cmd_t mc;
5586 mbx_cmd_t *mcp = &mc;
5588 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5589 return QLA_FUNCTION_FAILED;
5591 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
5592 "Entered %s.\n", __func__);
5594 memset(mcp, 0, sizeof(mbx_cmd_t));
5595 mcp->mb[0] = MBC_SET_LED_CONFIG;
5596 mcp->mb[1] = led_cfg[0];
5597 mcp->mb[2] = led_cfg[1];
5598 if (IS_QLA8031(ha)) {
5599 mcp->mb[3] = led_cfg[2];
5600 mcp->mb[4] = led_cfg[3];
5601 mcp->mb[5] = led_cfg[4];
5602 mcp->mb[6] = led_cfg[5];
5605 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5606 if (IS_QLA8031(ha))
5607 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5608 mcp->in_mb = MBX_0;
5609 mcp->tov = 30;
5610 mcp->flags = 0;
5612 rval = qla2x00_mailbox_command(vha, mcp);
5613 if (rval != QLA_SUCCESS) {
5614 ql_dbg(ql_dbg_mbx, vha, 0x1134,
5615 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5616 } else {
5617 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
5618 "Done %s.\n", __func__);
5621 return rval;
5625 qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
5627 int rval;
5628 struct qla_hw_data *ha = vha->hw;
5629 mbx_cmd_t mc;
5630 mbx_cmd_t *mcp = &mc;
5632 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
5633 return QLA_FUNCTION_FAILED;
5635 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
5636 "Entered %s.\n", __func__);
5638 memset(mcp, 0, sizeof(mbx_cmd_t));
5639 mcp->mb[0] = MBC_GET_LED_CONFIG;
5641 mcp->out_mb = MBX_0;
5642 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5643 if (IS_QLA8031(ha))
5644 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
5645 mcp->tov = 30;
5646 mcp->flags = 0;
5648 rval = qla2x00_mailbox_command(vha, mcp);
5649 if (rval != QLA_SUCCESS) {
5650 ql_dbg(ql_dbg_mbx, vha, 0x1137,
5651 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5652 } else {
5653 led_cfg[0] = mcp->mb[1];
5654 led_cfg[1] = mcp->mb[2];
5655 if (IS_QLA8031(ha)) {
5656 led_cfg[2] = mcp->mb[3];
5657 led_cfg[3] = mcp->mb[4];
5658 led_cfg[4] = mcp->mb[5];
5659 led_cfg[5] = mcp->mb[6];
5661 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
5662 "Done %s.\n", __func__);
5665 return rval;
5669 qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
5671 int rval;
5672 struct qla_hw_data *ha = vha->hw;
5673 mbx_cmd_t mc;
5674 mbx_cmd_t *mcp = &mc;
5676 if (!IS_P3P_TYPE(ha))
5677 return QLA_FUNCTION_FAILED;
5679 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
5680 "Entered %s.\n", __func__);
5682 memset(mcp, 0, sizeof(mbx_cmd_t));
5683 mcp->mb[0] = MBC_SET_LED_CONFIG;
5684 if (enable)
5685 mcp->mb[7] = 0xE;
5686 else
5687 mcp->mb[7] = 0xD;
5689 mcp->out_mb = MBX_7|MBX_0;
5690 mcp->in_mb = MBX_0;
5691 mcp->tov = MBX_TOV_SECONDS;
5692 mcp->flags = 0;
5694 rval = qla2x00_mailbox_command(vha, mcp);
5695 if (rval != QLA_SUCCESS) {
5696 ql_dbg(ql_dbg_mbx, vha, 0x1128,
5697 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5698 } else {
5699 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
5700 "Done %s.\n", __func__);
5703 return rval;
5707 qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
5709 int rval;
5710 struct qla_hw_data *ha = vha->hw;
5711 mbx_cmd_t mc;
5712 mbx_cmd_t *mcp = &mc;
5714 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5715 return QLA_FUNCTION_FAILED;
5717 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
5718 "Entered %s.\n", __func__);
5720 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
5721 mcp->mb[1] = LSW(reg);
5722 mcp->mb[2] = MSW(reg);
5723 mcp->mb[3] = LSW(data);
5724 mcp->mb[4] = MSW(data);
5725 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
5727 mcp->in_mb = MBX_1|MBX_0;
5728 mcp->tov = MBX_TOV_SECONDS;
5729 mcp->flags = 0;
5730 rval = qla2x00_mailbox_command(vha, mcp);
5732 if (rval != QLA_SUCCESS) {
5733 ql_dbg(ql_dbg_mbx, vha, 0x1131,
5734 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5735 } else {
5736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
5737 "Done %s.\n", __func__);
5740 return rval;
5744 qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
5746 int rval;
5747 struct qla_hw_data *ha = vha->hw;
5748 mbx_cmd_t mc;
5749 mbx_cmd_t *mcp = &mc;
5751 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5752 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
5753 "Implicit LOGO Unsupported.\n");
5754 return QLA_FUNCTION_FAILED;
5758 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
5759 "Entering %s.\n", __func__);
5761 /* Perform Implicit LOGO. */
5762 mcp->mb[0] = MBC_PORT_LOGOUT;
5763 mcp->mb[1] = fcport->loop_id;
5764 mcp->mb[10] = BIT_15;
5765 mcp->out_mb = MBX_10|MBX_1|MBX_0;
5766 mcp->in_mb = MBX_0;
5767 mcp->tov = MBX_TOV_SECONDS;
5768 mcp->flags = 0;
5769 rval = qla2x00_mailbox_command(vha, mcp);
5770 if (rval != QLA_SUCCESS)
5771 ql_dbg(ql_dbg_mbx, vha, 0x113d,
5772 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5773 else
5774 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
5775 "Done %s.\n", __func__);
5777 return rval;
5781 qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
5783 int rval;
5784 mbx_cmd_t mc;
5785 mbx_cmd_t *mcp = &mc;
5786 struct qla_hw_data *ha = vha->hw;
5787 unsigned long retry_max_time = jiffies + (2 * HZ);
5789 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5790 return QLA_FUNCTION_FAILED;
5792 ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
5794 retry_rd_reg:
5795 mcp->mb[0] = MBC_READ_REMOTE_REG;
5796 mcp->mb[1] = LSW(reg);
5797 mcp->mb[2] = MSW(reg);
5798 mcp->out_mb = MBX_2|MBX_1|MBX_0;
5799 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
5800 mcp->tov = MBX_TOV_SECONDS;
5801 mcp->flags = 0;
5802 rval = qla2x00_mailbox_command(vha, mcp);
5804 if (rval != QLA_SUCCESS) {
5805 ql_dbg(ql_dbg_mbx, vha, 0x114c,
5806 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5807 rval, mcp->mb[0], mcp->mb[1]);
5808 } else {
5809 *data = (mcp->mb[3] | (mcp->mb[4] << 16));
5810 if (*data == QLA8XXX_BAD_VALUE) {
5812 * During soft-reset CAMRAM register reads might
5813 * return 0xbad0bad0. So retry for MAX of 2 sec
5814 * while reading camram registers.
5816 if (time_after(jiffies, retry_max_time)) {
5817 ql_dbg(ql_dbg_mbx, vha, 0x1141,
5818 "Failure to read CAMRAM register. "
5819 "data=0x%x.\n", *data);
5820 return QLA_FUNCTION_FAILED;
5822 msleep(100);
5823 goto retry_rd_reg;
5825 ql_dbg(ql_dbg_mbx, vha, 0x1142, "Done %s.\n", __func__);
5828 return rval;
5832 qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5834 int rval;
5835 mbx_cmd_t mc;
5836 mbx_cmd_t *mcp = &mc;
5837 struct qla_hw_data *ha = vha->hw;
5839 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5840 return QLA_FUNCTION_FAILED;
5842 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
5844 mcp->mb[0] = MBC_RESTART_NIC_FIRMWARE;
5845 mcp->out_mb = MBX_0;
5846 mcp->in_mb = MBX_1|MBX_0;
5847 mcp->tov = MBX_TOV_SECONDS;
5848 mcp->flags = 0;
5849 rval = qla2x00_mailbox_command(vha, mcp);
5851 if (rval != QLA_SUCCESS) {
5852 ql_dbg(ql_dbg_mbx, vha, 0x1144,
5853 "Failed=%x mb[0]=%x mb[1]=%x.\n",
5854 rval, mcp->mb[0], mcp->mb[1]);
5855 ha->isp_ops->fw_dump(vha, 0);
5856 } else {
5857 ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__);
5860 return rval;
5864 qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options,
5865 uint32_t start_addr, uint32_t end_addr, uint16_t *sector_size)
5867 int rval;
5868 mbx_cmd_t mc;
5869 mbx_cmd_t *mcp = &mc;
5870 uint8_t subcode = (uint8_t)options;
5871 struct qla_hw_data *ha = vha->hw;
5873 if (!IS_QLA8031(ha))
5874 return QLA_FUNCTION_FAILED;
5876 ql_dbg(ql_dbg_mbx, vha, 0x1146, "Entered %s.\n", __func__);
5878 mcp->mb[0] = MBC_SET_ACCESS_CONTROL;
5879 mcp->mb[1] = options;
5880 mcp->out_mb = MBX_1|MBX_0;
5881 if (subcode & BIT_2) {
5882 mcp->mb[2] = LSW(start_addr);
5883 mcp->mb[3] = MSW(start_addr);
5884 mcp->mb[4] = LSW(end_addr);
5885 mcp->mb[5] = MSW(end_addr);
5886 mcp->out_mb |= MBX_5|MBX_4|MBX_3|MBX_2;
5888 mcp->in_mb = MBX_2|MBX_1|MBX_0;
5889 if (!(subcode & (BIT_2 | BIT_5)))
5890 mcp->in_mb |= MBX_4|MBX_3;
5891 mcp->tov = MBX_TOV_SECONDS;
5892 mcp->flags = 0;
5893 rval = qla2x00_mailbox_command(vha, mcp);
5895 if (rval != QLA_SUCCESS) {
5896 ql_dbg(ql_dbg_mbx, vha, 0x1147,
5897 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n",
5898 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3],
5899 mcp->mb[4]);
5900 ha->isp_ops->fw_dump(vha, 0);
5901 } else {
5902 if (subcode & BIT_5)
5903 *sector_size = mcp->mb[1];
5904 else if (subcode & (BIT_6 | BIT_7)) {
5905 ql_dbg(ql_dbg_mbx, vha, 0x1148,
5906 "Driver-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5907 } else if (subcode & (BIT_3 | BIT_4)) {
5908 ql_dbg(ql_dbg_mbx, vha, 0x1149,
5909 "Flash-lock id=%x%x", mcp->mb[4], mcp->mb[3]);
5911 ql_dbg(ql_dbg_mbx, vha, 0x114a, "Done %s.\n", __func__);
5914 return rval;
5918 qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
5919 uint32_t size)
5921 int rval;
5922 mbx_cmd_t mc;
5923 mbx_cmd_t *mcp = &mc;
5925 if (!IS_MCTP_CAPABLE(vha->hw))
5926 return QLA_FUNCTION_FAILED;
5928 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114f,
5929 "Entered %s.\n", __func__);
5931 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
5932 mcp->mb[1] = LSW(addr);
5933 mcp->mb[2] = MSW(req_dma);
5934 mcp->mb[3] = LSW(req_dma);
5935 mcp->mb[4] = MSW(size);
5936 mcp->mb[5] = LSW(size);
5937 mcp->mb[6] = MSW(MSD(req_dma));
5938 mcp->mb[7] = LSW(MSD(req_dma));
5939 mcp->mb[8] = MSW(addr);
5940 /* Setting RAM ID to valid */
5941 mcp->mb[10] |= BIT_7;
5942 /* For MCTP RAM ID is 0x40 */
5943 mcp->mb[10] |= 0x40;
5945 mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|
5946 MBX_0;
5948 mcp->in_mb = MBX_0;
5949 mcp->tov = MBX_TOV_SECONDS;
5950 mcp->flags = 0;
5951 rval = qla2x00_mailbox_command(vha, mcp);
5953 if (rval != QLA_SUCCESS) {
5954 ql_dbg(ql_dbg_mbx, vha, 0x114e,
5955 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
5956 } else {
5957 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x114d,
5958 "Done %s.\n", __func__);
5961 return rval;
5965 qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
5966 void *dd_buf, uint size, uint options)
5968 int rval;
5969 mbx_cmd_t mc;
5970 mbx_cmd_t *mcp = &mc;
5971 dma_addr_t dd_dma;
5973 if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
5974 return QLA_FUNCTION_FAILED;
5976 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
5977 "Entered %s.\n", __func__);
5979 dd_dma = dma_map_single(&vha->hw->pdev->dev,
5980 dd_buf, size, DMA_FROM_DEVICE);
5981 if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) {
5982 ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n");
5983 return QLA_MEMORY_ALLOC_FAILED;
5986 memset(dd_buf, 0, size);
5988 mcp->mb[0] = MBC_DPORT_DIAGNOSTICS;
5989 mcp->mb[1] = options;
5990 mcp->mb[2] = MSW(LSD(dd_dma));
5991 mcp->mb[3] = LSW(LSD(dd_dma));
5992 mcp->mb[6] = MSW(MSD(dd_dma));
5993 mcp->mb[7] = LSW(MSD(dd_dma));
5994 mcp->mb[8] = size;
5995 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
5996 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
5997 mcp->buf_size = size;
5998 mcp->flags = MBX_DMA_IN;
5999 mcp->tov = MBX_TOV_SECONDS * 4;
6000 rval = qla2x00_mailbox_command(vha, mcp);
6002 if (rval != QLA_SUCCESS) {
6003 ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval);
6004 } else {
6005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196,
6006 "Done %s.\n", __func__);
6009 dma_unmap_single(&vha->hw->pdev->dev, dd_dma,
6010 size, DMA_FROM_DEVICE);
6012 return rval;
6015 static void qla2x00_async_mb_sp_done(void *s, int res)
6017 struct srb *sp = s;
6019 sp->u.iocb_cmd.u.mbx.rc = res;
6021 complete(&sp->u.iocb_cmd.u.mbx.comp);
6022 /* don't free sp here. Let the caller do the free */
6026 * This mailbox uses the iocb interface to send MB command.
6027 * This allows non-critial (non chip setup) command to go
6028 * out in parrallel.
6030 int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
6032 int rval = QLA_FUNCTION_FAILED;
6033 srb_t *sp;
6034 struct srb_iocb *c;
6036 if (!vha->hw->flags.fw_started)
6037 goto done;
6039 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
6040 if (!sp)
6041 goto done;
6043 sp->type = SRB_MB_IOCB;
6044 sp->name = mb_to_str(mcp->mb[0]);
6046 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
6048 memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
6050 c = &sp->u.iocb_cmd;
6051 c->timeout = qla2x00_async_iocb_timeout;
6052 init_completion(&c->u.mbx.comp);
6054 sp->done = qla2x00_async_mb_sp_done;
6056 rval = qla2x00_start_sp(sp);
6057 if (rval != QLA_SUCCESS) {
6058 ql_dbg(ql_dbg_mbx, vha, 0x1018,
6059 "%s: %s Failed submission. %x.\n",
6060 __func__, sp->name, rval);
6061 goto done_free_sp;
6064 ql_dbg(ql_dbg_mbx, vha, 0x113f, "MB:%s hndl %x submitted\n",
6065 sp->name, sp->handle);
6067 wait_for_completion(&c->u.mbx.comp);
6068 memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
6070 rval = c->u.mbx.rc;
6071 switch (rval) {
6072 case QLA_FUNCTION_TIMEOUT:
6073 ql_dbg(ql_dbg_mbx, vha, 0x1140, "%s: %s Timeout. %x.\n",
6074 __func__, sp->name, rval);
6075 break;
6076 case QLA_SUCCESS:
6077 ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n",
6078 __func__, sp->name);
6079 sp->free(sp);
6080 break;
6081 default:
6082 ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n",
6083 __func__, sp->name, rval);
6084 sp->free(sp);
6085 break;
6088 return rval;
6090 done_free_sp:
6091 sp->free(sp);
6092 done:
6093 return rval;
6097 * qla24xx_gpdb_wait
6098 * NOTE: Do not call this routine from DPC thread
6100 int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
6102 int rval = QLA_FUNCTION_FAILED;
6103 dma_addr_t pd_dma;
6104 struct port_database_24xx *pd;
6105 struct qla_hw_data *ha = vha->hw;
6106 mbx_cmd_t mc;
6108 if (!vha->hw->flags.fw_started)
6109 goto done;
6111 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6112 if (pd == NULL) {
6113 ql_log(ql_log_warn, vha, 0xd047,
6114 "Failed to allocate port database structure.\n");
6115 goto done_free_sp;
6118 memset(&mc, 0, sizeof(mc));
6119 mc.mb[0] = MBC_GET_PORT_DATABASE;
6120 mc.mb[1] = cpu_to_le16(fcport->loop_id);
6121 mc.mb[2] = MSW(pd_dma);
6122 mc.mb[3] = LSW(pd_dma);
6123 mc.mb[6] = MSW(MSD(pd_dma));
6124 mc.mb[7] = LSW(MSD(pd_dma));
6125 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6126 mc.mb[10] = cpu_to_le16((uint16_t)opt);
6128 rval = qla24xx_send_mb_cmd(vha, &mc);
6129 if (rval != QLA_SUCCESS) {
6130 ql_dbg(ql_dbg_mbx, vha, 0x1193,
6131 "%s: %8phC fail\n", __func__, fcport->port_name);
6132 goto done_free_sp;
6135 rval = __qla24xx_parse_gpdb(vha, fcport, pd);
6137 ql_dbg(ql_dbg_mbx, vha, 0x1197, "%s: %8phC done\n",
6138 __func__, fcport->port_name);
6140 done_free_sp:
6141 if (pd)
6142 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6143 done:
6144 return rval;
6147 int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
6148 struct port_database_24xx *pd)
6150 int rval = QLA_SUCCESS;
6151 uint64_t zero = 0;
6152 u8 current_login_state, last_login_state;
6154 if (fcport->fc4f_nvme) {
6155 current_login_state = pd->current_login_state >> 4;
6156 last_login_state = pd->last_login_state >> 4;
6157 } else {
6158 current_login_state = pd->current_login_state & 0xf;
6159 last_login_state = pd->last_login_state & 0xf;
6162 /* Check for logged in state. */
6163 if (current_login_state != PDS_PRLI_COMPLETE &&
6164 last_login_state != PDS_PRLI_COMPLETE) {
6165 ql_dbg(ql_dbg_mbx, vha, 0x119a,
6166 "Unable to verify login-state (%x/%x) for loop_id %x.\n",
6167 current_login_state, last_login_state, fcport->loop_id);
6168 rval = QLA_FUNCTION_FAILED;
6169 goto gpd_error_out;
6172 if (fcport->loop_id == FC_NO_LOOP_ID ||
6173 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
6174 memcmp(fcport->port_name, pd->port_name, 8))) {
6175 /* We lost the device mid way. */
6176 rval = QLA_NOT_LOGGED_IN;
6177 goto gpd_error_out;
6180 /* Names are little-endian. */
6181 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
6182 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
6184 /* Get port_id of device. */
6185 fcport->d_id.b.domain = pd->port_id[0];
6186 fcport->d_id.b.area = pd->port_id[1];
6187 fcport->d_id.b.al_pa = pd->port_id[2];
6188 fcport->d_id.b.rsvd_1 = 0;
6190 if (fcport->fc4f_nvme) {
6191 fcport->nvme_prli_service_param =
6192 pd->prli_nvme_svc_param_word_3;
6193 fcport->port_type = FCT_NVME;
6194 } else {
6195 /* If not target must be initiator or unknown type. */
6196 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
6197 fcport->port_type = FCT_INITIATOR;
6198 else
6199 fcport->port_type = FCT_TARGET;
6201 /* Passback COS information. */
6202 fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
6203 FC_COS_CLASS2 : FC_COS_CLASS3;
6205 if (pd->prli_svc_param_word_3[0] & BIT_7) {
6206 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
6207 fcport->conf_compl_supported = 1;
6210 gpd_error_out:
6211 return rval;
6215 * qla24xx_gidlist__wait
6216 * NOTE: don't call this routine from DPC thread.
6218 int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
6219 void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
6221 int rval = QLA_FUNCTION_FAILED;
6222 mbx_cmd_t mc;
6224 if (!vha->hw->flags.fw_started)
6225 goto done;
6227 memset(&mc, 0, sizeof(mc));
6228 mc.mb[0] = MBC_GET_ID_LIST;
6229 mc.mb[2] = MSW(id_list_dma);
6230 mc.mb[3] = LSW(id_list_dma);
6231 mc.mb[6] = MSW(MSD(id_list_dma));
6232 mc.mb[7] = LSW(MSD(id_list_dma));
6233 mc.mb[8] = 0;
6234 mc.mb[9] = cpu_to_le16(vha->vp_idx);
6236 rval = qla24xx_send_mb_cmd(vha, &mc);
6237 if (rval != QLA_SUCCESS) {
6238 ql_dbg(ql_dbg_mbx, vha, 0x119b,
6239 "%s: fail\n", __func__);
6240 } else {
6241 *entries = mc.mb[1];
6242 ql_dbg(ql_dbg_mbx, vha, 0x119c,
6243 "%s: done\n", __func__);
6245 done:
6246 return rval;
6249 int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
6251 int rval;
6252 mbx_cmd_t mc;
6253 mbx_cmd_t *mcp = &mc;
6255 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
6256 "Entered %s\n", __func__);
6258 memset(mcp->mb, 0 , sizeof(mcp->mb));
6259 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6260 mcp->mb[1] = cpu_to_le16(1);
6261 mcp->mb[2] = cpu_to_le16(value);
6262 mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
6263 mcp->in_mb = MBX_2 | MBX_0;
6264 mcp->tov = MBX_TOV_SECONDS;
6265 mcp->flags = 0;
6267 rval = qla2x00_mailbox_command(vha, mcp);
6269 ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
6270 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6272 return rval;
6275 int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
6277 int rval;
6278 mbx_cmd_t mc;
6279 mbx_cmd_t *mcp = &mc;
6281 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
6282 "Entered %s\n", __func__);
6284 memset(mcp->mb, 0, sizeof(mcp->mb));
6285 mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
6286 mcp->mb[1] = cpu_to_le16(0);
6287 mcp->out_mb = MBX_1 | MBX_0;
6288 mcp->in_mb = MBX_2 | MBX_0;
6289 mcp->tov = MBX_TOV_SECONDS;
6290 mcp->flags = 0;
6292 rval = qla2x00_mailbox_command(vha, mcp);
6293 if (rval == QLA_SUCCESS)
6294 *value = mc.mb[2];
6296 ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
6297 (rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
6299 return rval;
6303 qla2x00_read_sfp_dev(struct scsi_qla_host *vha, char *buf, int count)
6305 struct qla_hw_data *ha = vha->hw;
6306 uint16_t iter, addr, offset;
6307 dma_addr_t phys_addr;
6308 int rval, c;
6309 u8 *sfp_data;
6311 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6312 addr = 0xa0;
6313 phys_addr = ha->sfp_data_dma;
6314 sfp_data = ha->sfp_data;
6315 offset = c = 0;
6317 for (iter = 0; iter < SFP_DEV_SIZE / SFP_BLOCK_SIZE; iter++) {
6318 if (iter == 4) {
6319 /* Skip to next device address. */
6320 addr = 0xa2;
6321 offset = 0;
6324 rval = qla2x00_read_sfp(vha, phys_addr, sfp_data,
6325 addr, offset, SFP_BLOCK_SIZE, BIT_1);
6326 if (rval != QLA_SUCCESS) {
6327 ql_log(ql_log_warn, vha, 0x706d,
6328 "Unable to read SFP data (%x/%x/%x).\n", rval,
6329 addr, offset);
6331 return rval;
6334 if (buf && (c < count)) {
6335 u16 sz;
6337 if ((count - c) >= SFP_BLOCK_SIZE)
6338 sz = SFP_BLOCK_SIZE;
6339 else
6340 sz = count - c;
6342 memcpy(buf, sfp_data, sz);
6343 buf += SFP_BLOCK_SIZE;
6344 c += sz;
6346 phys_addr += SFP_BLOCK_SIZE;
6347 sfp_data += SFP_BLOCK_SIZE;
6348 offset += SFP_BLOCK_SIZE;
6351 return rval;