include: replace linux/module.h with "struct module" wherever possible
[linux-2.6/next.git] / drivers / scsi / qla2xxx / qla_mbx.c
blobf7604ea1af836b58e10d587be15d15283301e3da
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/delay.h>
10 #include <linux/gfp.h>
14 * qla2x00_mailbox_command
15 * Issue mailbox command and waits for completion.
17 * Input:
18 * ha = adapter block pointer.
19 * mcp = driver internal mbx struct pointer.
21 * Output:
22 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
24 * Returns:
25 * 0 : QLA_SUCCESS = cmd performed success
26 * 1 : QLA_FUNCTION_FAILED (error encountered)
27 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
29 * Context:
30 * Kernel context.
32 static int
33 qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
35 int rval;
36 unsigned long flags = 0;
37 device_reg_t __iomem *reg;
38 uint8_t abort_active;
39 uint8_t io_lock_on;
40 uint16_t command = 0;
41 uint16_t *iptr;
42 uint16_t __iomem *optr;
43 uint32_t cnt;
44 uint32_t mboxes;
45 unsigned long wait_time;
46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__);
51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n");
55 return QLA_FUNCTION_TIMEOUT;
58 if (vha->device_flags & DFLG_DEV_FAILED) {
59 ql_log(ql_log_warn, base_vha, 0x1002,
60 "Device in failed state, exiting.\n");
61 return QLA_FUNCTION_TIMEOUT;
64 reg = ha->iobase;
65 io_lock_on = base_vha->flags.init_done;
67 rval = QLA_SUCCESS;
68 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
71 if (ha->flags.pci_channel_io_perm_failure) {
72 ql_log(ql_log_warn, base_vha, 0x1003,
73 "Perm failure on EEH timeout MBX, exiting.\n");
74 return QLA_FUNCTION_TIMEOUT;
77 if (ha->flags.isp82xx_fw_hung) {
78 /* Setting Link-Down error */
79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
82 rval = QLA_FUNCTION_FAILED;
83 goto premature_exit;
87 * Wait for active mailbox commands to finish by waiting at most tov
88 * seconds. This is to serialize actual issuing of mailbox cmds during
89 * non ISP abort time.
91 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
92 /* Timeout occurred. Return error. */
93 ql_log(ql_log_warn, base_vha, 0x1005,
94 "Cmd access timeout, Exiting.\n");
95 return QLA_FUNCTION_TIMEOUT;
98 ha->flags.mbox_busy = 1;
99 /* Save mailbox command for debug */
100 ha->mcp = mcp;
102 ql_dbg(ql_dbg_mbx, base_vha, 0x1006,
103 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
105 spin_lock_irqsave(&ha->hardware_lock, flags);
107 /* Load mailbox registers. */
108 if (IS_QLA82XX(ha))
109 optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
110 else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
111 optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
112 else
113 optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
115 iptr = mcp->mb;
116 command = mcp->mb[0];
117 mboxes = mcp->out_mb;
119 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
120 if (IS_QLA2200(ha) && cnt == 8)
121 optr =
122 (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
123 if (mboxes & BIT_0)
124 WRT_REG_WORD(optr, *iptr);
126 mboxes >>= 1;
127 optr++;
128 iptr++;
131 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111,
132 "Loaded MBX registers (displayed in bytes) =.\n");
133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112,
134 (uint8_t *)mcp->mb, 16);
135 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113,
136 ".\n");
137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114,
138 ((uint8_t *)mcp->mb + 0x10), 16);
139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115,
140 ".\n");
141 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116,
142 ((uint8_t *)mcp->mb + 0x20), 8);
143 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117,
144 "I/O Address = %p.\n", optr);
145 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e);
147 /* Issue set host interrupt command to send cmd out. */
148 ha->flags.mbox_int = 0;
149 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
151 /* Unlock mbx registers and wait for interrupt */
152 ql_dbg(ql_dbg_mbx, base_vha, 0x100f,
153 "Going to unlock irq & waiting for interrupts. "
154 "jiffies=%lx.\n", jiffies);
156 /* Wait for mbx cmd completion until timeout */
158 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
159 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
161 if (IS_QLA82XX(ha)) {
162 if (RD_REG_DWORD(&reg->isp82.hint) &
163 HINT_MBX_INT_PENDING) {
164 spin_unlock_irqrestore(&ha->hardware_lock,
165 flags);
166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
167 "Pending mailbox timeout, exiting.\n");
168 rval = QLA_FUNCTION_TIMEOUT;
169 goto premature_exit;
171 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
172 } else if (IS_FWI2_CAPABLE(ha))
173 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
174 else
175 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
176 spin_unlock_irqrestore(&ha->hardware_lock, flags);
178 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
182 } else {
183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011,
184 "Cmd=%x Polling Mode.\n", command);
186 if (IS_QLA82XX(ha)) {
187 if (RD_REG_DWORD(&reg->isp82.hint) &
188 HINT_MBX_INT_PENDING) {
189 spin_unlock_irqrestore(&ha->hardware_lock,
190 flags);
191 ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
192 "Pending mailbox timeout, exiting.\n");
193 rval = QLA_FUNCTION_TIMEOUT;
194 goto premature_exit;
196 WRT_REG_DWORD(&reg->isp82.hint, HINT_MBX_INT_PENDING);
197 } else if (IS_FWI2_CAPABLE(ha))
198 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_SET_HOST_INT);
199 else
200 WRT_REG_WORD(&reg->isp.hccr, HCCR_SET_HOST_INT);
201 spin_unlock_irqrestore(&ha->hardware_lock, flags);
203 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
204 while (!ha->flags.mbox_int) {
205 if (time_after(jiffies, wait_time))
206 break;
208 /* Check for pending interrupts. */
209 qla2x00_poll(ha->rsp_q_map[0]);
211 if (!ha->flags.mbox_int &&
212 !(IS_QLA2200(ha) &&
213 command == MBC_LOAD_RISC_RAM_EXTENDED))
214 msleep(10);
215 } /* while */
216 ql_dbg(ql_dbg_mbx, base_vha, 0x1013,
217 "Waited %d sec.\n",
218 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
221 /* Check whether we timed out */
222 if (ha->flags.mbox_int) {
223 uint16_t *iptr2;
225 ql_dbg(ql_dbg_mbx, base_vha, 0x1014,
226 "Cmd=%x completed.\n", command);
228 /* Got interrupt. Clear the flag. */
229 ha->flags.mbox_int = 0;
230 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
232 if (ha->flags.isp82xx_fw_hung) {
233 ha->flags.mbox_busy = 0;
234 /* Setting Link-Down error */
235 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
236 ha->mcp = NULL;
237 rval = QLA_FUNCTION_FAILED;
238 ql_log(ql_log_warn, base_vha, 0x1015,
239 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
240 goto premature_exit;
243 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE)
244 rval = QLA_FUNCTION_FAILED;
246 /* Load return mailbox registers. */
247 iptr2 = mcp->mb;
248 iptr = (uint16_t *)&ha->mailbox_out[0];
249 mboxes = mcp->in_mb;
250 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
251 if (mboxes & BIT_0)
252 *iptr2 = *iptr;
254 mboxes >>= 1;
255 iptr2++;
256 iptr++;
258 } else {
260 uint16_t mb0;
261 uint32_t ictrl;
263 if (IS_FWI2_CAPABLE(ha)) {
264 mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
265 ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
266 } else {
267 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
268 ictrl = RD_REG_WORD(&reg->isp.ictrl);
270 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119,
271 "MBX Command timeout for cmd %x.\n", command);
272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a,
273 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b,
275 "mb[0] = 0x%x.\n", mb0);
276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019);
278 rval = QLA_FUNCTION_TIMEOUT;
281 ha->flags.mbox_busy = 0;
283 /* Clean up */
284 ha->mcp = NULL;
286 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
287 ql_dbg(ql_dbg_mbx, base_vha, 0x101a,
288 "Checking for additional resp interrupt.\n");
290 /* polling mode for non isp_abort commands. */
291 qla2x00_poll(ha->rsp_q_map[0]);
294 if (rval == QLA_FUNCTION_TIMEOUT &&
295 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
296 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
297 ha->flags.eeh_busy) {
298 /* not in dpc. schedule it for dpc to take over. */
299 ql_dbg(ql_dbg_mbx, base_vha, 0x101b,
300 "Timeout, schedule isp_abort_needed.\n");
302 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
303 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
304 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
306 ql_log(ql_log_info, base_vha, 0x101c,
307 "Mailbox cmd timeout occured. "
308 "Scheduling ISP abort eeh_busy=0x%x.\n",
309 ha->flags.eeh_busy);
310 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
311 qla2xxx_wake_dpc(vha);
313 } else if (!abort_active) {
314 /* call abort directly since we are in the DPC thread */
315 ql_dbg(ql_dbg_mbx, base_vha, 0x101d,
316 "Timeout, calling abort_isp.\n");
318 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
319 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
320 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
322 ql_log(ql_log_info, base_vha, 0x101e,
323 "Mailbox cmd timeout occured. "
324 "Scheduling ISP abort.\n");
326 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
327 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
328 if (ha->isp_ops->abort_isp(vha)) {
329 /* Failed. retry later. */
330 set_bit(ISP_ABORT_NEEDED,
331 &vha->dpc_flags);
333 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
334 ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
335 "Finished abort_isp.\n");
340 premature_exit:
341 /* Allow next mbx cmd to come in. */
342 complete(&ha->mbx_cmd_comp);
344 if (rval) {
345 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
346 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
347 mcp->mb[0], mcp->mb[1], mcp->mb[2], command);
348 } else {
349 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
352 return rval;
356 qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
357 uint32_t risc_code_size)
359 int rval;
360 struct qla_hw_data *ha = vha->hw;
361 mbx_cmd_t mc;
362 mbx_cmd_t *mcp = &mc;
364 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
366 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
367 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
368 mcp->mb[8] = MSW(risc_addr);
369 mcp->out_mb = MBX_8|MBX_0;
370 } else {
371 mcp->mb[0] = MBC_LOAD_RISC_RAM;
372 mcp->out_mb = MBX_0;
374 mcp->mb[1] = LSW(risc_addr);
375 mcp->mb[2] = MSW(req_dma);
376 mcp->mb[3] = LSW(req_dma);
377 mcp->mb[6] = MSW(MSD(req_dma));
378 mcp->mb[7] = LSW(MSD(req_dma));
379 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
380 if (IS_FWI2_CAPABLE(ha)) {
381 mcp->mb[4] = MSW(risc_code_size);
382 mcp->mb[5] = LSW(risc_code_size);
383 mcp->out_mb |= MBX_5|MBX_4;
384 } else {
385 mcp->mb[4] = LSW(risc_code_size);
386 mcp->out_mb |= MBX_4;
389 mcp->in_mb = MBX_0;
390 mcp->tov = MBX_TOV_SECONDS;
391 mcp->flags = 0;
392 rval = qla2x00_mailbox_command(vha, mcp);
394 if (rval != QLA_SUCCESS) {
395 ql_dbg(ql_dbg_mbx, vha, 0x1023,
396 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
397 } else {
398 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
401 return rval;
404 #define EXTENDED_BB_CREDITS BIT_0
406 * qla2x00_execute_fw
407 * Start adapter firmware.
409 * Input:
410 * ha = adapter block pointer.
411 * TARGET_QUEUE_LOCK must be released.
412 * ADAPTER_STATE_LOCK must be released.
414 * Returns:
415 * qla2x00 local function return status code.
417 * Context:
418 * Kernel context.
421 qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
423 int rval;
424 struct qla_hw_data *ha = vha->hw;
425 mbx_cmd_t mc;
426 mbx_cmd_t *mcp = &mc;
428 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
430 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
431 mcp->out_mb = MBX_0;
432 mcp->in_mb = MBX_0;
433 if (IS_FWI2_CAPABLE(ha)) {
434 mcp->mb[1] = MSW(risc_addr);
435 mcp->mb[2] = LSW(risc_addr);
436 mcp->mb[3] = 0;
437 if (IS_QLA81XX(ha)) {
438 struct nvram_81xx *nv = ha->nvram;
439 mcp->mb[4] = (nv->enhanced_features &
440 EXTENDED_BB_CREDITS);
441 } else
442 mcp->mb[4] = 0;
443 mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
444 mcp->in_mb |= MBX_1;
445 } else {
446 mcp->mb[1] = LSW(risc_addr);
447 mcp->out_mb |= MBX_1;
448 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
449 mcp->mb[2] = 0;
450 mcp->out_mb |= MBX_2;
454 mcp->tov = MBX_TOV_SECONDS;
455 mcp->flags = 0;
456 rval = qla2x00_mailbox_command(vha, mcp);
458 if (rval != QLA_SUCCESS) {
459 ql_dbg(ql_dbg_mbx, vha, 0x1026,
460 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
461 } else {
462 if (IS_FWI2_CAPABLE(ha)) {
463 ql_dbg(ql_dbg_mbx, vha, 0x1027,
464 "Done exchanges=%x.\n", mcp->mb[1]);
465 } else {
466 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
470 return rval;
474 * qla2x00_get_fw_version
475 * Get firmware version.
477 * Input:
478 * ha: adapter state pointer.
479 * major: pointer for major number.
480 * minor: pointer for minor number.
481 * subminor: pointer for subminor number.
483 * Returns:
484 * qla2x00 local function return status code.
486 * Context:
487 * Kernel context.
490 qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
491 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
492 uint32_t *mpi_caps, uint8_t *phy)
494 int rval;
495 mbx_cmd_t mc;
496 mbx_cmd_t *mcp = &mc;
498 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
500 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
501 mcp->out_mb = MBX_0;
502 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
503 if (IS_QLA81XX(vha->hw))
504 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
505 mcp->flags = 0;
506 mcp->tov = MBX_TOV_SECONDS;
507 rval = qla2x00_mailbox_command(vha, mcp);
508 if (rval != QLA_SUCCESS)
509 goto failed;
511 /* Return mailbox data. */
512 *major = mcp->mb[1];
513 *minor = mcp->mb[2];
514 *subminor = mcp->mb[3];
515 *attributes = mcp->mb[6];
516 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
517 *memory = 0x1FFFF; /* Defaults to 128KB. */
518 else
519 *memory = (mcp->mb[5] << 16) | mcp->mb[4];
520 if (IS_QLA81XX(vha->hw)) {
521 mpi[0] = mcp->mb[10] & 0xff;
522 mpi[1] = mcp->mb[11] >> 8;
523 mpi[2] = mcp->mb[11] & 0xff;
524 *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13];
525 phy[0] = mcp->mb[8] & 0xff;
526 phy[1] = mcp->mb[9] >> 8;
527 phy[2] = mcp->mb[9] & 0xff;
529 failed:
530 if (rval != QLA_SUCCESS) {
531 /*EMPTY*/
532 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
533 } else {
534 /*EMPTY*/
535 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
537 return rval;
541 * qla2x00_get_fw_options
542 * Set firmware options.
544 * Input:
545 * ha = adapter block pointer.
546 * fwopt = pointer for firmware options.
548 * Returns:
549 * qla2x00 local function return status code.
551 * Context:
552 * Kernel context.
555 qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
557 int rval;
558 mbx_cmd_t mc;
559 mbx_cmd_t *mcp = &mc;
561 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
563 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
564 mcp->out_mb = MBX_0;
565 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
566 mcp->tov = MBX_TOV_SECONDS;
567 mcp->flags = 0;
568 rval = qla2x00_mailbox_command(vha, mcp);
570 if (rval != QLA_SUCCESS) {
571 /*EMPTY*/
572 ql_dbg(ql_dbg_mbx, vha, 0x102d, "Failed=%x.\n", rval);
573 } else {
574 fwopts[0] = mcp->mb[0];
575 fwopts[1] = mcp->mb[1];
576 fwopts[2] = mcp->mb[2];
577 fwopts[3] = mcp->mb[3];
579 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
582 return rval;
587 * qla2x00_set_fw_options
588 * Set firmware options.
590 * Input:
591 * ha = adapter block pointer.
592 * fwopt = pointer for firmware options.
594 * Returns:
595 * qla2x00 local function return status code.
597 * Context:
598 * Kernel context.
601 qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
603 int rval;
604 mbx_cmd_t mc;
605 mbx_cmd_t *mcp = &mc;
607 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
609 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
610 mcp->mb[1] = fwopts[1];
611 mcp->mb[2] = fwopts[2];
612 mcp->mb[3] = fwopts[3];
613 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
614 mcp->in_mb = MBX_0;
615 if (IS_FWI2_CAPABLE(vha->hw)) {
616 mcp->in_mb |= MBX_1;
617 } else {
618 mcp->mb[10] = fwopts[10];
619 mcp->mb[11] = fwopts[11];
620 mcp->mb[12] = 0; /* Undocumented, but used */
621 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
623 mcp->tov = MBX_TOV_SECONDS;
624 mcp->flags = 0;
625 rval = qla2x00_mailbox_command(vha, mcp);
627 fwopts[0] = mcp->mb[0];
629 if (rval != QLA_SUCCESS) {
630 /*EMPTY*/
631 ql_dbg(ql_dbg_mbx, vha, 0x1030,
632 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
633 } else {
634 /*EMPTY*/
635 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
638 return rval;
642 * qla2x00_mbx_reg_test
643 * Mailbox register wrap test.
645 * Input:
646 * ha = adapter block pointer.
647 * TARGET_QUEUE_LOCK must be released.
648 * ADAPTER_STATE_LOCK must be released.
650 * Returns:
651 * qla2x00 local function return status code.
653 * Context:
654 * Kernel context.
657 qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
659 int rval;
660 mbx_cmd_t mc;
661 mbx_cmd_t *mcp = &mc;
663 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
665 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
666 mcp->mb[1] = 0xAAAA;
667 mcp->mb[2] = 0x5555;
668 mcp->mb[3] = 0xAA55;
669 mcp->mb[4] = 0x55AA;
670 mcp->mb[5] = 0xA5A5;
671 mcp->mb[6] = 0x5A5A;
672 mcp->mb[7] = 0x2525;
673 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
674 mcp->in_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
675 mcp->tov = MBX_TOV_SECONDS;
676 mcp->flags = 0;
677 rval = qla2x00_mailbox_command(vha, mcp);
679 if (rval == QLA_SUCCESS) {
680 if (mcp->mb[1] != 0xAAAA || mcp->mb[2] != 0x5555 ||
681 mcp->mb[3] != 0xAA55 || mcp->mb[4] != 0x55AA)
682 rval = QLA_FUNCTION_FAILED;
683 if (mcp->mb[5] != 0xA5A5 || mcp->mb[6] != 0x5A5A ||
684 mcp->mb[7] != 0x2525)
685 rval = QLA_FUNCTION_FAILED;
688 if (rval != QLA_SUCCESS) {
689 /*EMPTY*/
690 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
691 } else {
692 /*EMPTY*/
693 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
696 return rval;
700 * qla2x00_verify_checksum
701 * Verify firmware checksum.
703 * Input:
704 * ha = adapter block pointer.
705 * TARGET_QUEUE_LOCK must be released.
706 * ADAPTER_STATE_LOCK must be released.
708 * Returns:
709 * qla2x00 local function return status code.
711 * Context:
712 * Kernel context.
715 qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
717 int rval;
718 mbx_cmd_t mc;
719 mbx_cmd_t *mcp = &mc;
721 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
723 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
724 mcp->out_mb = MBX_0;
725 mcp->in_mb = MBX_0;
726 if (IS_FWI2_CAPABLE(vha->hw)) {
727 mcp->mb[1] = MSW(risc_addr);
728 mcp->mb[2] = LSW(risc_addr);
729 mcp->out_mb |= MBX_2|MBX_1;
730 mcp->in_mb |= MBX_2|MBX_1;
731 } else {
732 mcp->mb[1] = LSW(risc_addr);
733 mcp->out_mb |= MBX_1;
734 mcp->in_mb |= MBX_1;
737 mcp->tov = MBX_TOV_SECONDS;
738 mcp->flags = 0;
739 rval = qla2x00_mailbox_command(vha, mcp);
741 if (rval != QLA_SUCCESS) {
742 ql_dbg(ql_dbg_mbx, vha, 0x1036,
743 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
744 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
745 } else {
746 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
749 return rval;
753 * qla2x00_issue_iocb
754 * Issue IOCB using mailbox command
756 * Input:
757 * ha = adapter state pointer.
758 * buffer = buffer pointer.
759 * phys_addr = physical address of buffer.
760 * size = size of buffer.
761 * TARGET_QUEUE_LOCK must be released.
762 * ADAPTER_STATE_LOCK must be released.
764 * Returns:
765 * qla2x00 local function return status code.
767 * Context:
768 * Kernel context.
771 qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
772 dma_addr_t phys_addr, size_t size, uint32_t tov)
774 int rval;
775 mbx_cmd_t mc;
776 mbx_cmd_t *mcp = &mc;
778 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
780 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
781 mcp->mb[1] = 0;
782 mcp->mb[2] = MSW(phys_addr);
783 mcp->mb[3] = LSW(phys_addr);
784 mcp->mb[6] = MSW(MSD(phys_addr));
785 mcp->mb[7] = LSW(MSD(phys_addr));
786 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
787 mcp->in_mb = MBX_2|MBX_0;
788 mcp->tov = tov;
789 mcp->flags = 0;
790 rval = qla2x00_mailbox_command(vha, mcp);
792 if (rval != QLA_SUCCESS) {
793 /*EMPTY*/
794 ql_dbg(ql_dbg_mbx, vha, 0x1039, "Failed=%x.\n", rval);
795 } else {
796 sts_entry_t *sts_entry = (sts_entry_t *) buffer;
798 /* Mask reserved bits. */
799 sts_entry->entry_status &=
800 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
801 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
804 return rval;
808 qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
809 size_t size)
811 return qla2x00_issue_iocb_timeout(vha, buffer, phys_addr, size,
812 MBX_TOV_SECONDS);
816 * qla2x00_abort_command
817 * Abort command aborts a specified IOCB.
819 * Input:
820 * ha = adapter block pointer.
821 * sp = SB structure pointer.
823 * Returns:
824 * qla2x00 local function return status code.
826 * Context:
827 * Kernel context.
830 qla2x00_abort_command(srb_t *sp)
832 unsigned long flags = 0;
833 int rval;
834 uint32_t handle = 0;
835 mbx_cmd_t mc;
836 mbx_cmd_t *mcp = &mc;
837 fc_port_t *fcport = sp->fcport;
838 scsi_qla_host_t *vha = fcport->vha;
839 struct qla_hw_data *ha = vha->hw;
840 struct req_que *req = vha->req;
842 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
844 spin_lock_irqsave(&ha->hardware_lock, flags);
845 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
846 if (req->outstanding_cmds[handle] == sp)
847 break;
849 spin_unlock_irqrestore(&ha->hardware_lock, flags);
851 if (handle == MAX_OUTSTANDING_COMMANDS) {
852 /* command not found */
853 return QLA_FUNCTION_FAILED;
856 mcp->mb[0] = MBC_ABORT_COMMAND;
857 if (HAS_EXTENDED_IDS(ha))
858 mcp->mb[1] = fcport->loop_id;
859 else
860 mcp->mb[1] = fcport->loop_id << 8;
861 mcp->mb[2] = (uint16_t)handle;
862 mcp->mb[3] = (uint16_t)(handle >> 16);
863 mcp->mb[6] = (uint16_t)sp->cmd->device->lun;
864 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
865 mcp->in_mb = MBX_0;
866 mcp->tov = MBX_TOV_SECONDS;
867 mcp->flags = 0;
868 rval = qla2x00_mailbox_command(vha, mcp);
870 if (rval != QLA_SUCCESS) {
871 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
872 } else {
873 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
876 return rval;
880 qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
882 int rval, rval2;
883 mbx_cmd_t mc;
884 mbx_cmd_t *mcp = &mc;
885 scsi_qla_host_t *vha;
886 struct req_que *req;
887 struct rsp_que *rsp;
889 l = l;
890 vha = fcport->vha;
892 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
894 req = vha->hw->req_q_map[0];
895 rsp = req->rsp;
896 mcp->mb[0] = MBC_ABORT_TARGET;
897 mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
898 if (HAS_EXTENDED_IDS(vha->hw)) {
899 mcp->mb[1] = fcport->loop_id;
900 mcp->mb[10] = 0;
901 mcp->out_mb |= MBX_10;
902 } else {
903 mcp->mb[1] = fcport->loop_id << 8;
905 mcp->mb[2] = vha->hw->loop_reset_delay;
906 mcp->mb[9] = vha->vp_idx;
908 mcp->in_mb = MBX_0;
909 mcp->tov = MBX_TOV_SECONDS;
910 mcp->flags = 0;
911 rval = qla2x00_mailbox_command(vha, mcp);
912 if (rval != QLA_SUCCESS) {
913 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
916 /* Issue marker IOCB. */
917 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, 0,
918 MK_SYNC_ID);
919 if (rval2 != QLA_SUCCESS) {
920 ql_dbg(ql_dbg_mbx, vha, 0x1040,
921 "Failed to issue marker IOCB (%x).\n", rval2);
922 } else {
923 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
926 return rval;
930 qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
932 int rval, rval2;
933 mbx_cmd_t mc;
934 mbx_cmd_t *mcp = &mc;
935 scsi_qla_host_t *vha;
936 struct req_que *req;
937 struct rsp_que *rsp;
939 vha = fcport->vha;
941 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
943 req = vha->hw->req_q_map[0];
944 rsp = req->rsp;
945 mcp->mb[0] = MBC_LUN_RESET;
946 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
947 if (HAS_EXTENDED_IDS(vha->hw))
948 mcp->mb[1] = fcport->loop_id;
949 else
950 mcp->mb[1] = fcport->loop_id << 8;
951 mcp->mb[2] = l;
952 mcp->mb[3] = 0;
953 mcp->mb[9] = vha->vp_idx;
955 mcp->in_mb = MBX_0;
956 mcp->tov = MBX_TOV_SECONDS;
957 mcp->flags = 0;
958 rval = qla2x00_mailbox_command(vha, mcp);
959 if (rval != QLA_SUCCESS) {
960 ql_dbg(ql_dbg_mbx, vha, 0x1043, "Failed=%x.\n", rval);
963 /* Issue marker IOCB. */
964 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
965 MK_SYNC_ID_LUN);
966 if (rval2 != QLA_SUCCESS) {
967 ql_dbg(ql_dbg_mbx, vha, 0x1044,
968 "Failed to issue marker IOCB (%x).\n", rval2);
969 } else {
970 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
973 return rval;
977 * qla2x00_get_adapter_id
978 * Get adapter ID and topology.
980 * Input:
981 * ha = adapter block pointer.
982 * id = pointer for loop ID.
983 * al_pa = pointer for AL_PA.
984 * area = pointer for area.
985 * domain = pointer for domain.
986 * top = pointer for topology.
987 * TARGET_QUEUE_LOCK must be released.
988 * ADAPTER_STATE_LOCK must be released.
990 * Returns:
991 * qla2x00 local function return status code.
993 * Context:
994 * Kernel context.
997 qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
998 uint8_t *area, uint8_t *domain, uint16_t *top, uint16_t *sw_cap)
1000 int rval;
1001 mbx_cmd_t mc;
1002 mbx_cmd_t *mcp = &mc;
1004 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
1006 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1007 mcp->mb[9] = vha->vp_idx;
1008 mcp->out_mb = MBX_9|MBX_0;
1009 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1010 if (IS_QLA8XXX_TYPE(vha->hw))
1011 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1012 mcp->tov = MBX_TOV_SECONDS;
1013 mcp->flags = 0;
1014 rval = qla2x00_mailbox_command(vha, mcp);
1015 if (mcp->mb[0] == MBS_COMMAND_ERROR)
1016 rval = QLA_COMMAND_ERROR;
1017 else if (mcp->mb[0] == MBS_INVALID_COMMAND)
1018 rval = QLA_INVALID_COMMAND;
1020 /* Return data. */
1021 *id = mcp->mb[1];
1022 *al_pa = LSB(mcp->mb[2]);
1023 *area = MSB(mcp->mb[2]);
1024 *domain = LSB(mcp->mb[3]);
1025 *top = mcp->mb[6];
1026 *sw_cap = mcp->mb[7];
1028 if (rval != QLA_SUCCESS) {
1029 /*EMPTY*/
1030 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1031 } else {
1032 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1034 if (IS_QLA8XXX_TYPE(vha->hw)) {
1035 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1036 vha->fcoe_fcf_idx = mcp->mb[10];
1037 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
1038 vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
1039 vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
1040 vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
1041 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1042 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1046 return rval;
1050 * qla2x00_get_retry_cnt
1051 * Get current firmware login retry count and delay.
1053 * Input:
1054 * ha = adapter block pointer.
1055 * retry_cnt = pointer to login retry count.
1056 * tov = pointer to login timeout value.
1058 * Returns:
1059 * qla2x00 local function return status code.
1061 * Context:
1062 * Kernel context.
1065 qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1066 uint16_t *r_a_tov)
1068 int rval;
1069 uint16_t ratov;
1070 mbx_cmd_t mc;
1071 mbx_cmd_t *mcp = &mc;
1073 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
1075 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1076 mcp->out_mb = MBX_0;
1077 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1078 mcp->tov = MBX_TOV_SECONDS;
1079 mcp->flags = 0;
1080 rval = qla2x00_mailbox_command(vha, mcp);
1082 if (rval != QLA_SUCCESS) {
1083 /*EMPTY*/
1084 ql_dbg(ql_dbg_mbx, vha, 0x104a,
1085 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1086 } else {
1087 /* Convert returned data and check our values. */
1088 *r_a_tov = mcp->mb[3] / 2;
1089 ratov = (mcp->mb[3]/2) / 10; /* mb[3] value is in 100ms */
1090 if (mcp->mb[1] * ratov > (*retry_cnt) * (*tov)) {
1091 /* Update to the larger values */
1092 *retry_cnt = (uint8_t)mcp->mb[1];
1093 *tov = ratov;
1096 ql_dbg(ql_dbg_mbx, vha, 0x104b,
1097 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1100 return rval;
1104 * qla2x00_init_firmware
1105 * Initialize adapter firmware.
1107 * Input:
1108 * ha = adapter block pointer.
1109 * dptr = Initialization control block pointer.
1110 * size = size of initialization control block.
1111 * TARGET_QUEUE_LOCK must be released.
1112 * ADAPTER_STATE_LOCK must be released.
1114 * Returns:
1115 * qla2x00 local function return status code.
1117 * Context:
1118 * Kernel context.
1121 qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1123 int rval;
1124 mbx_cmd_t mc;
1125 mbx_cmd_t *mcp = &mc;
1126 struct qla_hw_data *ha = vha->hw;
1128 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
1130 if (IS_QLA82XX(ha) && ql2xdbwr)
1131 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
1132 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1134 if (ha->flags.npiv_supported)
1135 mcp->mb[0] = MBC_MID_INITIALIZE_FIRMWARE;
1136 else
1137 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
1139 mcp->mb[1] = 0;
1140 mcp->mb[2] = MSW(ha->init_cb_dma);
1141 mcp->mb[3] = LSW(ha->init_cb_dma);
1142 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1143 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1144 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1145 if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) {
1146 mcp->mb[1] = BIT_0;
1147 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1148 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1149 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1150 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1151 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1152 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1154 mcp->in_mb = MBX_0;
1155 mcp->buf_size = size;
1156 mcp->flags = MBX_DMA_OUT;
1157 mcp->tov = MBX_TOV_SECONDS;
1158 rval = qla2x00_mailbox_command(vha, mcp);
1160 if (rval != QLA_SUCCESS) {
1161 /*EMPTY*/
1162 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1163 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
1164 } else {
1165 /*EMPTY*/
1166 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
1169 return rval;
1173 * qla2x00_get_port_database
1174 * Issue normal/enhanced get port database mailbox command
1175 * and copy device name as necessary.
1177 * Input:
1178 * ha = adapter state pointer.
1179 * dev = structure pointer.
1180 * opt = enhanced cmd option byte.
1182 * Returns:
1183 * qla2x00 local function return status code.
1185 * Context:
1186 * Kernel context.
1189 qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1191 int rval;
1192 mbx_cmd_t mc;
1193 mbx_cmd_t *mcp = &mc;
1194 port_database_t *pd;
1195 struct port_database_24xx *pd24;
1196 dma_addr_t pd_dma;
1197 struct qla_hw_data *ha = vha->hw;
1199 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
1201 pd24 = NULL;
1202 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1203 if (pd == NULL) {
1204 ql_log(ql_log_warn, vha, 0x1050,
1205 "Failed to allocate port database structure.\n");
1206 return QLA_MEMORY_ALLOC_FAILED;
1208 memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
1210 mcp->mb[0] = MBC_GET_PORT_DATABASE;
1211 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1212 mcp->mb[0] = MBC_ENHANCED_GET_PORT_DATABASE;
1213 mcp->mb[2] = MSW(pd_dma);
1214 mcp->mb[3] = LSW(pd_dma);
1215 mcp->mb[6] = MSW(MSD(pd_dma));
1216 mcp->mb[7] = LSW(MSD(pd_dma));
1217 mcp->mb[9] = vha->vp_idx;
1218 mcp->out_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
1219 mcp->in_mb = MBX_0;
1220 if (IS_FWI2_CAPABLE(ha)) {
1221 mcp->mb[1] = fcport->loop_id;
1222 mcp->mb[10] = opt;
1223 mcp->out_mb |= MBX_10|MBX_1;
1224 mcp->in_mb |= MBX_1;
1225 } else if (HAS_EXTENDED_IDS(ha)) {
1226 mcp->mb[1] = fcport->loop_id;
1227 mcp->mb[10] = opt;
1228 mcp->out_mb |= MBX_10|MBX_1;
1229 } else {
1230 mcp->mb[1] = fcport->loop_id << 8 | opt;
1231 mcp->out_mb |= MBX_1;
1233 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
1234 PORT_DATABASE_24XX_SIZE : PORT_DATABASE_SIZE;
1235 mcp->flags = MBX_DMA_IN;
1236 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1237 rval = qla2x00_mailbox_command(vha, mcp);
1238 if (rval != QLA_SUCCESS)
1239 goto gpd_error_out;
1241 if (IS_FWI2_CAPABLE(ha)) {
1242 pd24 = (struct port_database_24xx *) pd;
1244 /* Check for logged in state. */
1245 if (pd24->current_login_state != PDS_PRLI_COMPLETE &&
1246 pd24->last_login_state != PDS_PRLI_COMPLETE) {
1247 ql_dbg(ql_dbg_mbx, vha, 0x1051,
1248 "Unable to verify login-state (%x/%x) for "
1249 "loop_id %x.\n", pd24->current_login_state,
1250 pd24->last_login_state, fcport->loop_id);
1251 rval = QLA_FUNCTION_FAILED;
1252 goto gpd_error_out;
1255 /* Names are little-endian. */
1256 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1257 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
1259 /* Get port_id of device. */
1260 fcport->d_id.b.domain = pd24->port_id[0];
1261 fcport->d_id.b.area = pd24->port_id[1];
1262 fcport->d_id.b.al_pa = pd24->port_id[2];
1263 fcport->d_id.b.rsvd_1 = 0;
1265 /* If not target must be initiator or unknown type. */
1266 if ((pd24->prli_svc_param_word_3[0] & BIT_4) == 0)
1267 fcport->port_type = FCT_INITIATOR;
1268 else
1269 fcport->port_type = FCT_TARGET;
1270 } else {
1271 /* Check for logged in state. */
1272 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1273 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
1274 ql_dbg(ql_dbg_mbx, vha, 0x100a,
1275 "Unable to verify login-state (%x/%x) - "
1276 "portid=%02x%02x%02x.\n", pd->master_state,
1277 pd->slave_state, fcport->d_id.b.domain,
1278 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1279 rval = QLA_FUNCTION_FAILED;
1280 goto gpd_error_out;
1283 /* Names are little-endian. */
1284 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1285 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
1287 /* Get port_id of device. */
1288 fcport->d_id.b.domain = pd->port_id[0];
1289 fcport->d_id.b.area = pd->port_id[3];
1290 fcport->d_id.b.al_pa = pd->port_id[2];
1291 fcport->d_id.b.rsvd_1 = 0;
1293 /* If not target must be initiator or unknown type. */
1294 if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
1295 fcport->port_type = FCT_INITIATOR;
1296 else
1297 fcport->port_type = FCT_TARGET;
1299 /* Passback COS information. */
1300 fcport->supported_classes = (pd->options & BIT_4) ?
1301 FC_COS_CLASS2: FC_COS_CLASS3;
1304 gpd_error_out:
1305 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
1307 if (rval != QLA_SUCCESS) {
1308 ql_dbg(ql_dbg_mbx, vha, 0x1052,
1309 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1310 mcp->mb[0], mcp->mb[1]);
1311 } else {
1312 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
1315 return rval;
1319 * qla2x00_get_firmware_state
1320 * Get adapter firmware state.
1322 * Input:
1323 * ha = adapter block pointer.
1324 * dptr = pointer for firmware state.
1325 * TARGET_QUEUE_LOCK must be released.
1326 * ADAPTER_STATE_LOCK must be released.
1328 * Returns:
1329 * qla2x00 local function return status code.
1331 * Context:
1332 * Kernel context.
1335 qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1337 int rval;
1338 mbx_cmd_t mc;
1339 mbx_cmd_t *mcp = &mc;
1341 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
1343 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1344 mcp->out_mb = MBX_0;
1345 if (IS_FWI2_CAPABLE(vha->hw))
1346 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1347 else
1348 mcp->in_mb = MBX_1|MBX_0;
1349 mcp->tov = MBX_TOV_SECONDS;
1350 mcp->flags = 0;
1351 rval = qla2x00_mailbox_command(vha, mcp);
1353 /* Return firmware states. */
1354 states[0] = mcp->mb[1];
1355 if (IS_FWI2_CAPABLE(vha->hw)) {
1356 states[1] = mcp->mb[2];
1357 states[2] = mcp->mb[3];
1358 states[3] = mcp->mb[4];
1359 states[4] = mcp->mb[5];
1362 if (rval != QLA_SUCCESS) {
1363 /*EMPTY*/
1364 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1365 } else {
1366 /*EMPTY*/
1367 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
1370 return rval;
1374 * qla2x00_get_port_name
1375 * Issue get port name mailbox command.
1376 * Returned name is in big endian format.
1378 * Input:
1379 * ha = adapter block pointer.
1380 * loop_id = loop ID of device.
1381 * name = pointer for name.
1382 * TARGET_QUEUE_LOCK must be released.
1383 * ADAPTER_STATE_LOCK must be released.
1385 * Returns:
1386 * qla2x00 local function return status code.
1388 * Context:
1389 * Kernel context.
1392 qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1393 uint8_t opt)
1395 int rval;
1396 mbx_cmd_t mc;
1397 mbx_cmd_t *mcp = &mc;
1399 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
1401 mcp->mb[0] = MBC_GET_PORT_NAME;
1402 mcp->mb[9] = vha->vp_idx;
1403 mcp->out_mb = MBX_9|MBX_1|MBX_0;
1404 if (HAS_EXTENDED_IDS(vha->hw)) {
1405 mcp->mb[1] = loop_id;
1406 mcp->mb[10] = opt;
1407 mcp->out_mb |= MBX_10;
1408 } else {
1409 mcp->mb[1] = loop_id << 8 | opt;
1412 mcp->in_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1413 mcp->tov = MBX_TOV_SECONDS;
1414 mcp->flags = 0;
1415 rval = qla2x00_mailbox_command(vha, mcp);
1417 if (rval != QLA_SUCCESS) {
1418 /*EMPTY*/
1419 ql_dbg(ql_dbg_mbx, vha, 0x1058, "Failed=%x.\n", rval);
1420 } else {
1421 if (name != NULL) {
1422 /* This function returns name in big endian. */
1423 name[0] = MSB(mcp->mb[2]);
1424 name[1] = LSB(mcp->mb[2]);
1425 name[2] = MSB(mcp->mb[3]);
1426 name[3] = LSB(mcp->mb[3]);
1427 name[4] = MSB(mcp->mb[6]);
1428 name[5] = LSB(mcp->mb[6]);
1429 name[6] = MSB(mcp->mb[7]);
1430 name[7] = LSB(mcp->mb[7]);
1433 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
1436 return rval;
1440 * qla2x00_lip_reset
1441 * Issue LIP reset mailbox command.
1443 * Input:
1444 * ha = adapter block pointer.
1445 * TARGET_QUEUE_LOCK must be released.
1446 * ADAPTER_STATE_LOCK must be released.
1448 * Returns:
1449 * qla2x00 local function return status code.
1451 * Context:
1452 * Kernel context.
1455 qla2x00_lip_reset(scsi_qla_host_t *vha)
1457 int rval;
1458 mbx_cmd_t mc;
1459 mbx_cmd_t *mcp = &mc;
1461 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1463 if (IS_QLA8XXX_TYPE(vha->hw)) {
1464 /* Logout across all FCFs. */
1465 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1466 mcp->mb[1] = BIT_1;
1467 mcp->mb[2] = 0;
1468 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1469 } else if (IS_FWI2_CAPABLE(vha->hw)) {
1470 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1471 mcp->mb[1] = BIT_6;
1472 mcp->mb[2] = 0;
1473 mcp->mb[3] = vha->hw->loop_reset_delay;
1474 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1475 } else {
1476 mcp->mb[0] = MBC_LIP_RESET;
1477 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1478 if (HAS_EXTENDED_IDS(vha->hw)) {
1479 mcp->mb[1] = 0x00ff;
1480 mcp->mb[10] = 0;
1481 mcp->out_mb |= MBX_10;
1482 } else {
1483 mcp->mb[1] = 0xff00;
1485 mcp->mb[2] = vha->hw->loop_reset_delay;
1486 mcp->mb[3] = 0;
1488 mcp->in_mb = MBX_0;
1489 mcp->tov = MBX_TOV_SECONDS;
1490 mcp->flags = 0;
1491 rval = qla2x00_mailbox_command(vha, mcp);
1493 if (rval != QLA_SUCCESS) {
1494 /*EMPTY*/
1495 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1496 } else {
1497 /*EMPTY*/
1498 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
1501 return rval;
1505 * qla2x00_send_sns
1506 * Send SNS command.
1508 * Input:
1509 * ha = adapter block pointer.
1510 * sns = pointer for command.
1511 * cmd_size = command size.
1512 * buf_size = response/command size.
1513 * TARGET_QUEUE_LOCK must be released.
1514 * ADAPTER_STATE_LOCK must be released.
1516 * Returns:
1517 * qla2x00 local function return status code.
1519 * Context:
1520 * Kernel context.
1523 qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1524 uint16_t cmd_size, size_t buf_size)
1526 int rval;
1527 mbx_cmd_t mc;
1528 mbx_cmd_t *mcp = &mc;
1530 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
1532 ql_dbg(ql_dbg_mbx, vha, 0x105e,
1533 "Retry cnt=%d ratov=%d total tov=%d.\n",
1534 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1536 mcp->mb[0] = MBC_SEND_SNS_COMMAND;
1537 mcp->mb[1] = cmd_size;
1538 mcp->mb[2] = MSW(sns_phys_address);
1539 mcp->mb[3] = LSW(sns_phys_address);
1540 mcp->mb[6] = MSW(MSD(sns_phys_address));
1541 mcp->mb[7] = LSW(MSD(sns_phys_address));
1542 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1543 mcp->in_mb = MBX_0|MBX_1;
1544 mcp->buf_size = buf_size;
1545 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN;
1546 mcp->tov = (vha->hw->login_timeout * 2) + (vha->hw->login_timeout / 2);
1547 rval = qla2x00_mailbox_command(vha, mcp);
1549 if (rval != QLA_SUCCESS) {
1550 /*EMPTY*/
1551 ql_dbg(ql_dbg_mbx, vha, 0x105f,
1552 "Failed=%x mb[0]=%x mb[1]=%x.\n",
1553 rval, mcp->mb[0], mcp->mb[1]);
1554 } else {
1555 /*EMPTY*/
1556 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
1559 return rval;
1563 qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1564 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1566 int rval;
1568 struct logio_entry_24xx *lg;
1569 dma_addr_t lg_dma;
1570 uint32_t iop[2];
1571 struct qla_hw_data *ha = vha->hw;
1572 struct req_que *req;
1573 struct rsp_que *rsp;
1575 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
1577 if (ha->flags.cpu_affinity_enabled)
1578 req = ha->req_q_map[0];
1579 else
1580 req = vha->req;
1581 rsp = req->rsp;
1583 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1584 if (lg == NULL) {
1585 ql_log(ql_log_warn, vha, 0x1062,
1586 "Failed to allocate login IOCB.\n");
1587 return QLA_MEMORY_ALLOC_FAILED;
1589 memset(lg, 0, sizeof(struct logio_entry_24xx));
1591 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1592 lg->entry_count = 1;
1593 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1594 lg->nport_handle = cpu_to_le16(loop_id);
1595 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
1596 if (opt & BIT_0)
1597 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI);
1598 if (opt & BIT_1)
1599 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI);
1600 lg->port_id[0] = al_pa;
1601 lg->port_id[1] = area;
1602 lg->port_id[2] = domain;
1603 lg->vp_index = vha->vp_idx;
1604 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1605 if (rval != QLA_SUCCESS) {
1606 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1607 "Failed to issue login IOCB (%x).\n", rval);
1608 } else if (lg->entry_status != 0) {
1609 ql_dbg(ql_dbg_mbx, vha, 0x1064,
1610 "Failed to complete IOCB -- error status (%x).\n",
1611 lg->entry_status);
1612 rval = QLA_FUNCTION_FAILED;
1613 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1614 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1615 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1617 ql_dbg(ql_dbg_mbx, vha, 0x1065,
1618 "Failed to complete IOCB -- completion status (%x) "
1619 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1620 iop[0], iop[1]);
1622 switch (iop[0]) {
1623 case LSC_SCODE_PORTID_USED:
1624 mb[0] = MBS_PORT_ID_USED;
1625 mb[1] = LSW(iop[1]);
1626 break;
1627 case LSC_SCODE_NPORT_USED:
1628 mb[0] = MBS_LOOP_ID_USED;
1629 break;
1630 case LSC_SCODE_NOLINK:
1631 case LSC_SCODE_NOIOCB:
1632 case LSC_SCODE_NOXCB:
1633 case LSC_SCODE_CMD_FAILED:
1634 case LSC_SCODE_NOFABRIC:
1635 case LSC_SCODE_FW_NOT_READY:
1636 case LSC_SCODE_NOT_LOGGED_IN:
1637 case LSC_SCODE_NOPCB:
1638 case LSC_SCODE_ELS_REJECT:
1639 case LSC_SCODE_CMD_PARAM_ERR:
1640 case LSC_SCODE_NONPORT:
1641 case LSC_SCODE_LOGGED_IN:
1642 case LSC_SCODE_NOFLOGI_ACC:
1643 default:
1644 mb[0] = MBS_COMMAND_ERROR;
1645 break;
1647 } else {
1648 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
1650 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1652 mb[0] = MBS_COMMAND_COMPLETE;
1653 mb[1] = 0;
1654 if (iop[0] & BIT_4) {
1655 if (iop[0] & BIT_8)
1656 mb[1] |= BIT_1;
1657 } else
1658 mb[1] = BIT_0;
1660 /* Passback COS information. */
1661 mb[10] = 0;
1662 if (lg->io_parameter[7] || lg->io_parameter[8])
1663 mb[10] |= BIT_0; /* Class 2. */
1664 if (lg->io_parameter[9] || lg->io_parameter[10])
1665 mb[10] |= BIT_1; /* Class 3. */
1668 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1670 return rval;
1674 * qla2x00_login_fabric
1675 * Issue login fabric port mailbox command.
1677 * Input:
1678 * ha = adapter block pointer.
1679 * loop_id = device loop ID.
1680 * domain = device domain.
1681 * area = device area.
1682 * al_pa = device AL_PA.
1683 * status = pointer for return status.
1684 * opt = command options.
1685 * TARGET_QUEUE_LOCK must be released.
1686 * ADAPTER_STATE_LOCK must be released.
1688 * Returns:
1689 * qla2x00 local function return status code.
1691 * Context:
1692 * Kernel context.
1695 qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1696 uint8_t area, uint8_t al_pa, uint16_t *mb, uint8_t opt)
1698 int rval;
1699 mbx_cmd_t mc;
1700 mbx_cmd_t *mcp = &mc;
1701 struct qla_hw_data *ha = vha->hw;
1703 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
1705 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1706 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1707 if (HAS_EXTENDED_IDS(ha)) {
1708 mcp->mb[1] = loop_id;
1709 mcp->mb[10] = opt;
1710 mcp->out_mb |= MBX_10;
1711 } else {
1712 mcp->mb[1] = (loop_id << 8) | opt;
1714 mcp->mb[2] = domain;
1715 mcp->mb[3] = area << 8 | al_pa;
1717 mcp->in_mb = MBX_7|MBX_6|MBX_2|MBX_1|MBX_0;
1718 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1719 mcp->flags = 0;
1720 rval = qla2x00_mailbox_command(vha, mcp);
1722 /* Return mailbox statuses. */
1723 if (mb != NULL) {
1724 mb[0] = mcp->mb[0];
1725 mb[1] = mcp->mb[1];
1726 mb[2] = mcp->mb[2];
1727 mb[6] = mcp->mb[6];
1728 mb[7] = mcp->mb[7];
1729 /* COS retrieved from Get-Port-Database mailbox command. */
1730 mb[10] = 0;
1733 if (rval != QLA_SUCCESS) {
1734 /* RLU tmp code: need to change main mailbox_command function to
1735 * return ok even when the mailbox completion value is not
1736 * SUCCESS. The caller needs to be responsible to interpret
1737 * the return values of this mailbox command if we're not
1738 * to change too much of the existing code.
1740 if (mcp->mb[0] == 0x4001 || mcp->mb[0] == 0x4002 ||
1741 mcp->mb[0] == 0x4003 || mcp->mb[0] == 0x4005 ||
1742 mcp->mb[0] == 0x4006)
1743 rval = QLA_SUCCESS;
1745 /*EMPTY*/
1746 ql_dbg(ql_dbg_mbx, vha, 0x1068,
1747 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
1748 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1749 } else {
1750 /*EMPTY*/
1751 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
1754 return rval;
1758 * qla2x00_login_local_device
1759 * Issue login loop port mailbox command.
1761 * Input:
1762 * ha = adapter block pointer.
1763 * loop_id = device loop ID.
1764 * opt = command options.
1766 * Returns:
1767 * Return status code.
1769 * Context:
1770 * Kernel context.
1774 qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1775 uint16_t *mb_ret, uint8_t opt)
1777 int rval;
1778 mbx_cmd_t mc;
1779 mbx_cmd_t *mcp = &mc;
1780 struct qla_hw_data *ha = vha->hw;
1782 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
1784 if (IS_FWI2_CAPABLE(ha))
1785 return qla24xx_login_fabric(vha, fcport->loop_id,
1786 fcport->d_id.b.domain, fcport->d_id.b.area,
1787 fcport->d_id.b.al_pa, mb_ret, opt);
1789 mcp->mb[0] = MBC_LOGIN_LOOP_PORT;
1790 if (HAS_EXTENDED_IDS(ha))
1791 mcp->mb[1] = fcport->loop_id;
1792 else
1793 mcp->mb[1] = fcport->loop_id << 8;
1794 mcp->mb[2] = opt;
1795 mcp->out_mb = MBX_2|MBX_1|MBX_0;
1796 mcp->in_mb = MBX_7|MBX_6|MBX_1|MBX_0;
1797 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
1798 mcp->flags = 0;
1799 rval = qla2x00_mailbox_command(vha, mcp);
1801 /* Return mailbox statuses. */
1802 if (mb_ret != NULL) {
1803 mb_ret[0] = mcp->mb[0];
1804 mb_ret[1] = mcp->mb[1];
1805 mb_ret[6] = mcp->mb[6];
1806 mb_ret[7] = mcp->mb[7];
1809 if (rval != QLA_SUCCESS) {
1810 /* AV tmp code: need to change main mailbox_command function to
1811 * return ok even when the mailbox completion value is not
1812 * SUCCESS. The caller needs to be responsible to interpret
1813 * the return values of this mailbox command if we're not
1814 * to change too much of the existing code.
1816 if (mcp->mb[0] == 0x4005 || mcp->mb[0] == 0x4006)
1817 rval = QLA_SUCCESS;
1819 ql_dbg(ql_dbg_mbx, vha, 0x106b,
1820 "Failed=%x mb[0]=%x mb[1]=%x mb[6]=%x mb[7]=%x.\n",
1821 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1822 } else {
1823 /*EMPTY*/
1824 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
1827 return (rval);
1831 qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1832 uint8_t area, uint8_t al_pa)
1834 int rval;
1835 struct logio_entry_24xx *lg;
1836 dma_addr_t lg_dma;
1837 struct qla_hw_data *ha = vha->hw;
1838 struct req_que *req;
1839 struct rsp_que *rsp;
1841 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
1843 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1844 if (lg == NULL) {
1845 ql_log(ql_log_warn, vha, 0x106e,
1846 "Failed to allocate logout IOCB.\n");
1847 return QLA_MEMORY_ALLOC_FAILED;
1849 memset(lg, 0, sizeof(struct logio_entry_24xx));
1851 if (ql2xmaxqueues > 1)
1852 req = ha->req_q_map[0];
1853 else
1854 req = vha->req;
1855 rsp = req->rsp;
1856 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1857 lg->entry_count = 1;
1858 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1859 lg->nport_handle = cpu_to_le16(loop_id);
1860 lg->control_flags =
1861 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
1862 LCF_FREE_NPORT);
1863 lg->port_id[0] = al_pa;
1864 lg->port_id[1] = area;
1865 lg->port_id[2] = domain;
1866 lg->vp_index = vha->vp_idx;
1868 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0);
1869 if (rval != QLA_SUCCESS) {
1870 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1871 "Failed to issue logout IOCB (%x).\n", rval);
1872 } else if (lg->entry_status != 0) {
1873 ql_dbg(ql_dbg_mbx, vha, 0x1070,
1874 "Failed to complete IOCB -- error status (%x).\n",
1875 lg->entry_status);
1876 rval = QLA_FUNCTION_FAILED;
1877 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1878 ql_dbg(ql_dbg_mbx, vha, 0x1071,
1879 "Failed to complete IOCB -- completion status (%x) "
1880 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
1881 le32_to_cpu(lg->io_parameter[0]),
1882 le32_to_cpu(lg->io_parameter[1]));
1883 } else {
1884 /*EMPTY*/
1885 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
1888 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
1890 return rval;
1894 * qla2x00_fabric_logout
1895 * Issue logout fabric port mailbox command.
1897 * Input:
1898 * ha = adapter block pointer.
1899 * loop_id = device loop ID.
1900 * TARGET_QUEUE_LOCK must be released.
1901 * ADAPTER_STATE_LOCK must be released.
1903 * Returns:
1904 * qla2x00 local function return status code.
1906 * Context:
1907 * Kernel context.
1910 qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1911 uint8_t area, uint8_t al_pa)
1913 int rval;
1914 mbx_cmd_t mc;
1915 mbx_cmd_t *mcp = &mc;
1917 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
1919 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1920 mcp->out_mb = MBX_1|MBX_0;
1921 if (HAS_EXTENDED_IDS(vha->hw)) {
1922 mcp->mb[1] = loop_id;
1923 mcp->mb[10] = 0;
1924 mcp->out_mb |= MBX_10;
1925 } else {
1926 mcp->mb[1] = loop_id << 8;
1929 mcp->in_mb = MBX_1|MBX_0;
1930 mcp->tov = MBX_TOV_SECONDS;
1931 mcp->flags = 0;
1932 rval = qla2x00_mailbox_command(vha, mcp);
1934 if (rval != QLA_SUCCESS) {
1935 /*EMPTY*/
1936 ql_dbg(ql_dbg_mbx, vha, 0x1074,
1937 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
1938 } else {
1939 /*EMPTY*/
1940 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
1943 return rval;
1947 * qla2x00_full_login_lip
1948 * Issue full login LIP mailbox command.
1950 * Input:
1951 * ha = adapter block pointer.
1952 * TARGET_QUEUE_LOCK must be released.
1953 * ADAPTER_STATE_LOCK must be released.
1955 * Returns:
1956 * qla2x00 local function return status code.
1958 * Context:
1959 * Kernel context.
1962 qla2x00_full_login_lip(scsi_qla_host_t *vha)
1964 int rval;
1965 mbx_cmd_t mc;
1966 mbx_cmd_t *mcp = &mc;
1968 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
1970 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1971 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
1972 mcp->mb[2] = 0;
1973 mcp->mb[3] = 0;
1974 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
1975 mcp->in_mb = MBX_0;
1976 mcp->tov = MBX_TOV_SECONDS;
1977 mcp->flags = 0;
1978 rval = qla2x00_mailbox_command(vha, mcp);
1980 if (rval != QLA_SUCCESS) {
1981 /*EMPTY*/
1982 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
1983 } else {
1984 /*EMPTY*/
1985 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
1988 return rval;
1992 * qla2x00_get_id_list
1994 * Input:
1995 * ha = adapter block pointer.
1997 * Returns:
1998 * qla2x00 local function return status code.
2000 * Context:
2001 * Kernel context.
2004 qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2005 uint16_t *entries)
2007 int rval;
2008 mbx_cmd_t mc;
2009 mbx_cmd_t *mcp = &mc;
2011 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
2013 if (id_list == NULL)
2014 return QLA_FUNCTION_FAILED;
2016 mcp->mb[0] = MBC_GET_ID_LIST;
2017 mcp->out_mb = MBX_0;
2018 if (IS_FWI2_CAPABLE(vha->hw)) {
2019 mcp->mb[2] = MSW(id_list_dma);
2020 mcp->mb[3] = LSW(id_list_dma);
2021 mcp->mb[6] = MSW(MSD(id_list_dma));
2022 mcp->mb[7] = LSW(MSD(id_list_dma));
2023 mcp->mb[8] = 0;
2024 mcp->mb[9] = vha->vp_idx;
2025 mcp->out_mb |= MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2;
2026 } else {
2027 mcp->mb[1] = MSW(id_list_dma);
2028 mcp->mb[2] = LSW(id_list_dma);
2029 mcp->mb[3] = MSW(MSD(id_list_dma));
2030 mcp->mb[6] = LSW(MSD(id_list_dma));
2031 mcp->out_mb |= MBX_6|MBX_3|MBX_2|MBX_1;
2033 mcp->in_mb = MBX_1|MBX_0;
2034 mcp->tov = MBX_TOV_SECONDS;
2035 mcp->flags = 0;
2036 rval = qla2x00_mailbox_command(vha, mcp);
2038 if (rval != QLA_SUCCESS) {
2039 /*EMPTY*/
2040 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2041 } else {
2042 *entries = mcp->mb[1];
2043 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
2046 return rval;
2050 * qla2x00_get_resource_cnts
2051 * Get current firmware resource counts.
2053 * Input:
2054 * ha = adapter block pointer.
2056 * Returns:
2057 * qla2x00 local function return status code.
2059 * Context:
2060 * Kernel context.
2063 qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2064 uint16_t *orig_xchg_cnt, uint16_t *cur_iocb_cnt,
2065 uint16_t *orig_iocb_cnt, uint16_t *max_npiv_vports, uint16_t *max_fcfs)
2067 int rval;
2068 mbx_cmd_t mc;
2069 mbx_cmd_t *mcp = &mc;
2071 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
2073 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2074 mcp->out_mb = MBX_0;
2075 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2076 if (IS_QLA81XX(vha->hw))
2077 mcp->in_mb |= MBX_12;
2078 mcp->tov = MBX_TOV_SECONDS;
2079 mcp->flags = 0;
2080 rval = qla2x00_mailbox_command(vha, mcp);
2082 if (rval != QLA_SUCCESS) {
2083 /*EMPTY*/
2084 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2085 "Failed mb[0]=%x.\n", mcp->mb[0]);
2086 } else {
2087 ql_dbg(ql_dbg_mbx, vha, 0x107e,
2088 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2089 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2090 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
2091 mcp->mb[11], mcp->mb[12]);
2093 if (cur_xchg_cnt)
2094 *cur_xchg_cnt = mcp->mb[3];
2095 if (orig_xchg_cnt)
2096 *orig_xchg_cnt = mcp->mb[6];
2097 if (cur_iocb_cnt)
2098 *cur_iocb_cnt = mcp->mb[7];
2099 if (orig_iocb_cnt)
2100 *orig_iocb_cnt = mcp->mb[10];
2101 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2102 *max_npiv_vports = mcp->mb[11];
2103 if (IS_QLA81XX(vha->hw) && max_fcfs)
2104 *max_fcfs = mcp->mb[12];
2107 return (rval);
2111 * qla2x00_get_fcal_position_map
2112 * Get FCAL (LILP) position map using mailbox command
2114 * Input:
2115 * ha = adapter state pointer.
2116 * pos_map = buffer pointer (can be NULL).
2118 * Returns:
2119 * qla2x00 local function return status code.
2121 * Context:
2122 * Kernel context.
2125 qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2127 int rval;
2128 mbx_cmd_t mc;
2129 mbx_cmd_t *mcp = &mc;
2130 char *pmap;
2131 dma_addr_t pmap_dma;
2132 struct qla_hw_data *ha = vha->hw;
2134 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
2136 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2137 if (pmap == NULL) {
2138 ql_log(ql_log_warn, vha, 0x1080,
2139 "Memory alloc failed.\n");
2140 return QLA_MEMORY_ALLOC_FAILED;
2142 memset(pmap, 0, FCAL_MAP_SIZE);
2144 mcp->mb[0] = MBC_GET_FC_AL_POSITION_MAP;
2145 mcp->mb[2] = MSW(pmap_dma);
2146 mcp->mb[3] = LSW(pmap_dma);
2147 mcp->mb[6] = MSW(MSD(pmap_dma));
2148 mcp->mb[7] = LSW(MSD(pmap_dma));
2149 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2150 mcp->in_mb = MBX_1|MBX_0;
2151 mcp->buf_size = FCAL_MAP_SIZE;
2152 mcp->flags = MBX_DMA_IN;
2153 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2154 rval = qla2x00_mailbox_command(vha, mcp);
2156 if (rval == QLA_SUCCESS) {
2157 ql_dbg(ql_dbg_mbx, vha, 0x1081,
2158 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2159 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2160 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
2161 pmap, pmap[0] + 1);
2163 if (pos_map)
2164 memcpy(pos_map, pmap, FCAL_MAP_SIZE);
2166 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
2168 if (rval != QLA_SUCCESS) {
2169 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2170 } else {
2171 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
2174 return rval;
2178 * qla2x00_get_link_status
2180 * Input:
2181 * ha = adapter block pointer.
2182 * loop_id = device loop ID.
2183 * ret_buf = pointer to link status return buffer.
2185 * Returns:
2186 * 0 = success.
2187 * BIT_0 = mem alloc error.
2188 * BIT_1 = mailbox error.
2191 qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2192 struct link_statistics *stats, dma_addr_t stats_dma)
2194 int rval;
2195 mbx_cmd_t mc;
2196 mbx_cmd_t *mcp = &mc;
2197 uint32_t *siter, *diter, dwords;
2198 struct qla_hw_data *ha = vha->hw;
2200 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
2202 mcp->mb[0] = MBC_GET_LINK_STATUS;
2203 mcp->mb[2] = MSW(stats_dma);
2204 mcp->mb[3] = LSW(stats_dma);
2205 mcp->mb[6] = MSW(MSD(stats_dma));
2206 mcp->mb[7] = LSW(MSD(stats_dma));
2207 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2208 mcp->in_mb = MBX_0;
2209 if (IS_FWI2_CAPABLE(ha)) {
2210 mcp->mb[1] = loop_id;
2211 mcp->mb[4] = 0;
2212 mcp->mb[10] = 0;
2213 mcp->out_mb |= MBX_10|MBX_4|MBX_1;
2214 mcp->in_mb |= MBX_1;
2215 } else if (HAS_EXTENDED_IDS(ha)) {
2216 mcp->mb[1] = loop_id;
2217 mcp->mb[10] = 0;
2218 mcp->out_mb |= MBX_10|MBX_1;
2219 } else {
2220 mcp->mb[1] = loop_id << 8;
2221 mcp->out_mb |= MBX_1;
2223 mcp->tov = MBX_TOV_SECONDS;
2224 mcp->flags = IOCTL_CMD;
2225 rval = qla2x00_mailbox_command(vha, mcp);
2227 if (rval == QLA_SUCCESS) {
2228 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2229 ql_dbg(ql_dbg_mbx, vha, 0x1085,
2230 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2231 rval = QLA_FUNCTION_FAILED;
2232 } else {
2233 /* Copy over data -- firmware data is LE. */
2234 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
2235 dwords = offsetof(struct link_statistics, unused1) / 4;
2236 siter = diter = &stats->link_fail_cnt;
2237 while (dwords--)
2238 *diter++ = le32_to_cpu(*siter++);
2240 } else {
2241 /* Failed. */
2242 ql_dbg(ql_dbg_mbx, vha, 0x1087, "Failed=%x.\n", rval);
2245 return rval;
2249 qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2250 dma_addr_t stats_dma)
2252 int rval;
2253 mbx_cmd_t mc;
2254 mbx_cmd_t *mcp = &mc;
2255 uint32_t *siter, *diter, dwords;
2257 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
2259 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2260 mcp->mb[2] = MSW(stats_dma);
2261 mcp->mb[3] = LSW(stats_dma);
2262 mcp->mb[6] = MSW(MSD(stats_dma));
2263 mcp->mb[7] = LSW(MSD(stats_dma));
2264 mcp->mb[8] = sizeof(struct link_statistics) / 4;
2265 mcp->mb[9] = vha->vp_idx;
2266 mcp->mb[10] = 0;
2267 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
2268 mcp->in_mb = MBX_2|MBX_1|MBX_0;
2269 mcp->tov = MBX_TOV_SECONDS;
2270 mcp->flags = IOCTL_CMD;
2271 rval = qla2x00_mailbox_command(vha, mcp);
2273 if (rval == QLA_SUCCESS) {
2274 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
2275 ql_dbg(ql_dbg_mbx, vha, 0x1089,
2276 "Failed mb[0]=%x.\n", mcp->mb[0]);
2277 rval = QLA_FUNCTION_FAILED;
2278 } else {
2279 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
2280 /* Copy over data -- firmware data is LE. */
2281 dwords = sizeof(struct link_statistics) / 4;
2282 siter = diter = &stats->link_fail_cnt;
2283 while (dwords--)
2284 *diter++ = le32_to_cpu(*siter++);
2286 } else {
2287 /* Failed. */
2288 ql_dbg(ql_dbg_mbx, vha, 0x108b, "Failed=%x.\n", rval);
2291 return rval;
2295 qla24xx_abort_command(srb_t *sp)
2297 int rval;
2298 unsigned long flags = 0;
2300 struct abort_entry_24xx *abt;
2301 dma_addr_t abt_dma;
2302 uint32_t handle;
2303 fc_port_t *fcport = sp->fcport;
2304 struct scsi_qla_host *vha = fcport->vha;
2305 struct qla_hw_data *ha = vha->hw;
2306 struct req_que *req = vha->req;
2308 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
2310 spin_lock_irqsave(&ha->hardware_lock, flags);
2311 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
2312 if (req->outstanding_cmds[handle] == sp)
2313 break;
2315 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2316 if (handle == MAX_OUTSTANDING_COMMANDS) {
2317 /* Command not found. */
2318 return QLA_FUNCTION_FAILED;
2321 abt = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
2322 if (abt == NULL) {
2323 ql_log(ql_log_warn, vha, 0x108d,
2324 "Failed to allocate abort IOCB.\n");
2325 return QLA_MEMORY_ALLOC_FAILED;
2327 memset(abt, 0, sizeof(struct abort_entry_24xx));
2329 abt->entry_type = ABORT_IOCB_TYPE;
2330 abt->entry_count = 1;
2331 abt->handle = MAKE_HANDLE(req->id, abt->handle);
2332 abt->nport_handle = cpu_to_le16(fcport->loop_id);
2333 abt->handle_to_abort = MAKE_HANDLE(req->id, handle);
2334 abt->port_id[0] = fcport->d_id.b.al_pa;
2335 abt->port_id[1] = fcport->d_id.b.area;
2336 abt->port_id[2] = fcport->d_id.b.domain;
2337 abt->vp_index = fcport->vp_idx;
2339 abt->req_que_no = cpu_to_le16(req->id);
2341 rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
2342 if (rval != QLA_SUCCESS) {
2343 ql_dbg(ql_dbg_mbx, vha, 0x108e,
2344 "Failed to issue IOCB (%x).\n", rval);
2345 } else if (abt->entry_status != 0) {
2346 ql_dbg(ql_dbg_mbx, vha, 0x108f,
2347 "Failed to complete IOCB -- error status (%x).\n",
2348 abt->entry_status);
2349 rval = QLA_FUNCTION_FAILED;
2350 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) {
2351 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2352 "Failed to complete IOCB -- completion status (%x).\n",
2353 le16_to_cpu(abt->nport_handle));
2354 rval = QLA_FUNCTION_FAILED;
2355 } else {
2356 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
2359 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
2361 return rval;
2364 struct tsk_mgmt_cmd {
2365 union {
2366 struct tsk_mgmt_entry tsk;
2367 struct sts_entry_24xx sts;
2368 } p;
2371 static int
2372 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2373 unsigned int l, int tag)
2375 int rval, rval2;
2376 struct tsk_mgmt_cmd *tsk;
2377 struct sts_entry_24xx *sts;
2378 dma_addr_t tsk_dma;
2379 scsi_qla_host_t *vha;
2380 struct qla_hw_data *ha;
2381 struct req_que *req;
2382 struct rsp_que *rsp;
2384 vha = fcport->vha;
2385 ha = vha->hw;
2386 req = vha->req;
2388 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
2390 if (ha->flags.cpu_affinity_enabled)
2391 rsp = ha->rsp_q_map[tag + 1];
2392 else
2393 rsp = req->rsp;
2394 tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
2395 if (tsk == NULL) {
2396 ql_log(ql_log_warn, vha, 0x1093,
2397 "Failed to allocate task management IOCB.\n");
2398 return QLA_MEMORY_ALLOC_FAILED;
2400 memset(tsk, 0, sizeof(struct tsk_mgmt_cmd));
2402 tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
2403 tsk->p.tsk.entry_count = 1;
2404 tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
2405 tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
2406 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2407 tsk->p.tsk.control_flags = cpu_to_le32(type);
2408 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2409 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2410 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2411 tsk->p.tsk.vp_index = fcport->vp_idx;
2412 if (type == TCF_LUN_RESET) {
2413 int_to_scsilun(l, &tsk->p.tsk.lun);
2414 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
2415 sizeof(tsk->p.tsk.lun));
2418 sts = &tsk->p.sts;
2419 rval = qla2x00_issue_iocb(vha, tsk, tsk_dma, 0);
2420 if (rval != QLA_SUCCESS) {
2421 ql_dbg(ql_dbg_mbx, vha, 0x1094,
2422 "Failed to issue %s reset IOCB (%x).\n", name, rval);
2423 } else if (sts->entry_status != 0) {
2424 ql_dbg(ql_dbg_mbx, vha, 0x1095,
2425 "Failed to complete IOCB -- error status (%x).\n",
2426 sts->entry_status);
2427 rval = QLA_FUNCTION_FAILED;
2428 } else if (sts->comp_status !=
2429 __constant_cpu_to_le16(CS_COMPLETE)) {
2430 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2431 "Failed to complete IOCB -- completion status (%x).\n",
2432 le16_to_cpu(sts->comp_status));
2433 rval = QLA_FUNCTION_FAILED;
2434 } else if (le16_to_cpu(sts->scsi_status) &
2435 SS_RESPONSE_INFO_LEN_VALID) {
2436 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2437 ql_dbg(ql_dbg_mbx, vha, 0x1097,
2438 "Ignoring inconsistent data length -- not enough "
2439 "response info (%d).\n",
2440 le32_to_cpu(sts->rsp_data_len));
2441 } else if (sts->data[3]) {
2442 ql_dbg(ql_dbg_mbx, vha, 0x1098,
2443 "Failed to complete IOCB -- response (%x).\n",
2444 sts->data[3]);
2445 rval = QLA_FUNCTION_FAILED;
2449 /* Issue marker IOCB. */
2450 rval2 = qla2x00_marker(vha, req, rsp, fcport->loop_id, l,
2451 type == TCF_LUN_RESET ? MK_SYNC_ID_LUN: MK_SYNC_ID);
2452 if (rval2 != QLA_SUCCESS) {
2453 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2454 "Failed to issue marker IOCB (%x).\n", rval2);
2455 } else {
2456 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
2459 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
2461 return rval;
2465 qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
2467 struct qla_hw_data *ha = fcport->vha->hw;
2469 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2470 return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
2472 return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
2476 qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
2478 struct qla_hw_data *ha = fcport->vha->hw;
2480 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
2481 return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
2483 return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
2487 qla2x00_system_error(scsi_qla_host_t *vha)
2489 int rval;
2490 mbx_cmd_t mc;
2491 mbx_cmd_t *mcp = &mc;
2492 struct qla_hw_data *ha = vha->hw;
2494 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2495 return QLA_FUNCTION_FAILED;
2497 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
2499 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2500 mcp->out_mb = MBX_0;
2501 mcp->in_mb = MBX_0;
2502 mcp->tov = 5;
2503 mcp->flags = 0;
2504 rval = qla2x00_mailbox_command(vha, mcp);
2506 if (rval != QLA_SUCCESS) {
2507 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2508 } else {
2509 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
2512 return rval;
2516 * qla2x00_set_serdes_params() -
2517 * @ha: HA context
2519 * Returns
2522 qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2523 uint16_t sw_em_2g, uint16_t sw_em_4g)
2525 int rval;
2526 mbx_cmd_t mc;
2527 mbx_cmd_t *mcp = &mc;
2529 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
2531 mcp->mb[0] = MBC_SERDES_PARAMS;
2532 mcp->mb[1] = BIT_0;
2533 mcp->mb[2] = sw_em_1g | BIT_15;
2534 mcp->mb[3] = sw_em_2g | BIT_15;
2535 mcp->mb[4] = sw_em_4g | BIT_15;
2536 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2537 mcp->in_mb = MBX_0;
2538 mcp->tov = MBX_TOV_SECONDS;
2539 mcp->flags = 0;
2540 rval = qla2x00_mailbox_command(vha, mcp);
2542 if (rval != QLA_SUCCESS) {
2543 /*EMPTY*/
2544 ql_dbg(ql_dbg_mbx, vha, 0x109f,
2545 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2546 } else {
2547 /*EMPTY*/
2548 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
2551 return rval;
2555 qla2x00_stop_firmware(scsi_qla_host_t *vha)
2557 int rval;
2558 mbx_cmd_t mc;
2559 mbx_cmd_t *mcp = &mc;
2561 if (!IS_FWI2_CAPABLE(vha->hw))
2562 return QLA_FUNCTION_FAILED;
2564 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
2566 mcp->mb[0] = MBC_STOP_FIRMWARE;
2567 mcp->out_mb = MBX_0;
2568 mcp->in_mb = MBX_0;
2569 mcp->tov = 5;
2570 mcp->flags = 0;
2571 rval = qla2x00_mailbox_command(vha, mcp);
2573 if (rval != QLA_SUCCESS) {
2574 ql_dbg(ql_dbg_mbx, vha, 0x10a2, "Failed=%x.\n", rval);
2575 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2576 rval = QLA_INVALID_COMMAND;
2577 } else {
2578 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
2581 return rval;
2585 qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2586 uint16_t buffers)
2588 int rval;
2589 mbx_cmd_t mc;
2590 mbx_cmd_t *mcp = &mc;
2592 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
2594 if (!IS_FWI2_CAPABLE(vha->hw))
2595 return QLA_FUNCTION_FAILED;
2597 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2598 return QLA_FUNCTION_FAILED;
2600 mcp->mb[0] = MBC_TRACE_CONTROL;
2601 mcp->mb[1] = TC_EFT_ENABLE;
2602 mcp->mb[2] = LSW(eft_dma);
2603 mcp->mb[3] = MSW(eft_dma);
2604 mcp->mb[4] = LSW(MSD(eft_dma));
2605 mcp->mb[5] = MSW(MSD(eft_dma));
2606 mcp->mb[6] = buffers;
2607 mcp->mb[7] = TC_AEN_DISABLE;
2608 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2609 mcp->in_mb = MBX_1|MBX_0;
2610 mcp->tov = MBX_TOV_SECONDS;
2611 mcp->flags = 0;
2612 rval = qla2x00_mailbox_command(vha, mcp);
2613 if (rval != QLA_SUCCESS) {
2614 ql_dbg(ql_dbg_mbx, vha, 0x10a5,
2615 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2616 rval, mcp->mb[0], mcp->mb[1]);
2617 } else {
2618 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
2621 return rval;
2625 qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2627 int rval;
2628 mbx_cmd_t mc;
2629 mbx_cmd_t *mcp = &mc;
2631 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
2633 if (!IS_FWI2_CAPABLE(vha->hw))
2634 return QLA_FUNCTION_FAILED;
2636 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2637 return QLA_FUNCTION_FAILED;
2639 mcp->mb[0] = MBC_TRACE_CONTROL;
2640 mcp->mb[1] = TC_EFT_DISABLE;
2641 mcp->out_mb = MBX_1|MBX_0;
2642 mcp->in_mb = MBX_1|MBX_0;
2643 mcp->tov = MBX_TOV_SECONDS;
2644 mcp->flags = 0;
2645 rval = qla2x00_mailbox_command(vha, mcp);
2646 if (rval != QLA_SUCCESS) {
2647 ql_dbg(ql_dbg_mbx, vha, 0x10a8,
2648 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2649 rval, mcp->mb[0], mcp->mb[1]);
2650 } else {
2651 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
2654 return rval;
2658 qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2659 uint16_t buffers, uint16_t *mb, uint32_t *dwords)
2661 int rval;
2662 mbx_cmd_t mc;
2663 mbx_cmd_t *mcp = &mc;
2665 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2667 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
2668 return QLA_FUNCTION_FAILED;
2670 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2671 return QLA_FUNCTION_FAILED;
2673 mcp->mb[0] = MBC_TRACE_CONTROL;
2674 mcp->mb[1] = TC_FCE_ENABLE;
2675 mcp->mb[2] = LSW(fce_dma);
2676 mcp->mb[3] = MSW(fce_dma);
2677 mcp->mb[4] = LSW(MSD(fce_dma));
2678 mcp->mb[5] = MSW(MSD(fce_dma));
2679 mcp->mb[6] = buffers;
2680 mcp->mb[7] = TC_AEN_DISABLE;
2681 mcp->mb[8] = 0;
2682 mcp->mb[9] = TC_FCE_DEFAULT_RX_SIZE;
2683 mcp->mb[10] = TC_FCE_DEFAULT_TX_SIZE;
2684 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2685 MBX_1|MBX_0;
2686 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
2687 mcp->tov = MBX_TOV_SECONDS;
2688 mcp->flags = 0;
2689 rval = qla2x00_mailbox_command(vha, mcp);
2690 if (rval != QLA_SUCCESS) {
2691 ql_dbg(ql_dbg_mbx, vha, 0x10ab,
2692 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2693 rval, mcp->mb[0], mcp->mb[1]);
2694 } else {
2695 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
2697 if (mb)
2698 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
2699 if (dwords)
2700 *dwords = buffers;
2703 return rval;
2707 qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2709 int rval;
2710 mbx_cmd_t mc;
2711 mbx_cmd_t *mcp = &mc;
2713 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
2715 if (!IS_FWI2_CAPABLE(vha->hw))
2716 return QLA_FUNCTION_FAILED;
2718 if (unlikely(pci_channel_offline(vha->hw->pdev)))
2719 return QLA_FUNCTION_FAILED;
2721 mcp->mb[0] = MBC_TRACE_CONTROL;
2722 mcp->mb[1] = TC_FCE_DISABLE;
2723 mcp->mb[2] = TC_FCE_DISABLE_TRACE;
2724 mcp->out_mb = MBX_2|MBX_1|MBX_0;
2725 mcp->in_mb = MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|
2726 MBX_1|MBX_0;
2727 mcp->tov = MBX_TOV_SECONDS;
2728 mcp->flags = 0;
2729 rval = qla2x00_mailbox_command(vha, mcp);
2730 if (rval != QLA_SUCCESS) {
2731 ql_dbg(ql_dbg_mbx, vha, 0x10ae,
2732 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2733 rval, mcp->mb[0], mcp->mb[1]);
2734 } else {
2735 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
2737 if (wr)
2738 *wr = (uint64_t) mcp->mb[5] << 48 |
2739 (uint64_t) mcp->mb[4] << 32 |
2740 (uint64_t) mcp->mb[3] << 16 |
2741 (uint64_t) mcp->mb[2];
2742 if (rd)
2743 *rd = (uint64_t) mcp->mb[9] << 48 |
2744 (uint64_t) mcp->mb[8] << 32 |
2745 (uint64_t) mcp->mb[7] << 16 |
2746 (uint64_t) mcp->mb[6];
2749 return rval;
2753 qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2754 uint16_t *port_speed, uint16_t *mb)
2756 int rval;
2757 mbx_cmd_t mc;
2758 mbx_cmd_t *mcp = &mc;
2760 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
2762 if (!IS_IIDMA_CAPABLE(vha->hw))
2763 return QLA_FUNCTION_FAILED;
2765 mcp->mb[0] = MBC_PORT_PARAMS;
2766 mcp->mb[1] = loop_id;
2767 mcp->mb[2] = mcp->mb[3] = 0;
2768 mcp->mb[9] = vha->vp_idx;
2769 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2770 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2771 mcp->tov = MBX_TOV_SECONDS;
2772 mcp->flags = 0;
2773 rval = qla2x00_mailbox_command(vha, mcp);
2775 /* Return mailbox statuses. */
2776 if (mb != NULL) {
2777 mb[0] = mcp->mb[0];
2778 mb[1] = mcp->mb[1];
2779 mb[3] = mcp->mb[3];
2782 if (rval != QLA_SUCCESS) {
2783 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2784 } else {
2785 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
2786 if (port_speed)
2787 *port_speed = mcp->mb[3];
2790 return rval;
2794 qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2795 uint16_t port_speed, uint16_t *mb)
2797 int rval;
2798 mbx_cmd_t mc;
2799 mbx_cmd_t *mcp = &mc;
2801 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
2803 if (!IS_IIDMA_CAPABLE(vha->hw))
2804 return QLA_FUNCTION_FAILED;
2806 mcp->mb[0] = MBC_PORT_PARAMS;
2807 mcp->mb[1] = loop_id;
2808 mcp->mb[2] = BIT_0;
2809 if (IS_QLA8XXX_TYPE(vha->hw))
2810 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2811 else
2812 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
2813 mcp->mb[9] = vha->vp_idx;
2814 mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
2815 mcp->in_mb = MBX_3|MBX_1|MBX_0;
2816 mcp->tov = MBX_TOV_SECONDS;
2817 mcp->flags = 0;
2818 rval = qla2x00_mailbox_command(vha, mcp);
2820 /* Return mailbox statuses. */
2821 if (mb != NULL) {
2822 mb[0] = mcp->mb[0];
2823 mb[1] = mcp->mb[1];
2824 mb[3] = mcp->mb[3];
2827 if (rval != QLA_SUCCESS) {
2828 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
2829 } else {
2830 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
2833 return rval;
2836 void
2837 qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2838 struct vp_rpt_id_entry_24xx *rptid_entry)
2840 uint8_t vp_idx;
2841 uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
2842 struct qla_hw_data *ha = vha->hw;
2843 scsi_qla_host_t *vp;
2844 unsigned long flags;
2846 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
2848 if (rptid_entry->entry_status != 0)
2849 return;
2851 if (rptid_entry->format == 0) {
2852 ql_dbg(ql_dbg_mbx, vha, 0x10b7,
2853 "Format 0 : Number of VPs setup %d, number of "
2854 "VPs acquired %d.\n",
2855 MSB(le16_to_cpu(rptid_entry->vp_count)),
2856 LSB(le16_to_cpu(rptid_entry->vp_count)));
2857 ql_dbg(ql_dbg_mbx, vha, 0x10b8,
2858 "Primary port id %02x%02x%02x.\n",
2859 rptid_entry->port_id[2], rptid_entry->port_id[1],
2860 rptid_entry->port_id[0]);
2861 } else if (rptid_entry->format == 1) {
2862 vp_idx = LSB(stat);
2863 ql_dbg(ql_dbg_mbx, vha, 0x10b9,
2864 "Format 1: VP[%d] enabled - status %d - with "
2865 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2866 rptid_entry->port_id[2], rptid_entry->port_id[1],
2867 rptid_entry->port_id[0]);
2869 vp = vha;
2870 if (vp_idx == 0 && (MSB(stat) != 1))
2871 goto reg_needed;
2873 if (MSB(stat) == 1) {
2874 ql_dbg(ql_dbg_mbx, vha, 0x10ba,
2875 "Could not acquire ID for VP[%d].\n", vp_idx);
2876 return;
2879 spin_lock_irqsave(&ha->vport_slock, flags);
2880 list_for_each_entry(vp, &ha->vp_list, list)
2881 if (vp_idx == vp->vp_idx)
2882 break;
2883 spin_unlock_irqrestore(&ha->vport_slock, flags);
2885 if (!vp)
2886 return;
2888 vp->d_id.b.domain = rptid_entry->port_id[2];
2889 vp->d_id.b.area = rptid_entry->port_id[1];
2890 vp->d_id.b.al_pa = rptid_entry->port_id[0];
2893 * Cannot configure here as we are still sitting on the
2894 * response queue. Handle it in dpc context.
2896 set_bit(VP_IDX_ACQUIRED, &vp->vp_flags);
2898 reg_needed:
2899 set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags);
2900 set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags);
2901 set_bit(VP_DPC_NEEDED, &vha->dpc_flags);
2902 qla2xxx_wake_dpc(vha);
2907 * qla24xx_modify_vp_config
2908 * Change VP configuration for vha
2910 * Input:
2911 * vha = adapter block pointer.
2913 * Returns:
2914 * qla2xxx local function return status code.
2916 * Context:
2917 * Kernel context.
2920 qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2922 int rval;
2923 struct vp_config_entry_24xx *vpmod;
2924 dma_addr_t vpmod_dma;
2925 struct qla_hw_data *ha = vha->hw;
2926 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2928 /* This can be called by the parent */
2930 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
2932 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
2933 if (!vpmod) {
2934 ql_log(ql_log_warn, vha, 0x10bc,
2935 "Failed to allocate modify VP IOCB.\n");
2936 return QLA_MEMORY_ALLOC_FAILED;
2939 memset(vpmod, 0, sizeof(struct vp_config_entry_24xx));
2940 vpmod->entry_type = VP_CONFIG_IOCB_TYPE;
2941 vpmod->entry_count = 1;
2942 vpmod->command = VCT_COMMAND_MOD_ENABLE_VPS;
2943 vpmod->vp_count = 1;
2944 vpmod->vp_index1 = vha->vp_idx;
2945 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
2946 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
2947 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
2948 vpmod->entry_count = 1;
2950 rval = qla2x00_issue_iocb(base_vha, vpmod, vpmod_dma, 0);
2951 if (rval != QLA_SUCCESS) {
2952 ql_dbg(ql_dbg_mbx, vha, 0x10bd,
2953 "Failed to issue VP config IOCB (%x).\n", rval);
2954 } else if (vpmod->comp_status != 0) {
2955 ql_dbg(ql_dbg_mbx, vha, 0x10be,
2956 "Failed to complete IOCB -- error status (%x).\n",
2957 vpmod->comp_status);
2958 rval = QLA_FUNCTION_FAILED;
2959 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
2960 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
2961 "Failed to complete IOCB -- completion status (%x).\n",
2962 le16_to_cpu(vpmod->comp_status));
2963 rval = QLA_FUNCTION_FAILED;
2964 } else {
2965 /* EMPTY */
2966 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
2967 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
2969 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
2971 return rval;
2975 * qla24xx_control_vp
2976 * Enable a virtual port for given host
2978 * Input:
2979 * ha = adapter block pointer.
2980 * vhba = virtual adapter (unused)
2981 * index = index number for enabled VP
2983 * Returns:
2984 * qla2xxx local function return status code.
2986 * Context:
2987 * Kernel context.
2990 qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
2992 int rval;
2993 int map, pos;
2994 struct vp_ctrl_entry_24xx *vce;
2995 dma_addr_t vce_dma;
2996 struct qla_hw_data *ha = vha->hw;
2997 int vp_index = vha->vp_idx;
2998 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3000 ql_dbg(ql_dbg_mbx, vha, 0x10c1,
3001 "Entered %s enabling index %d.\n", __func__, vp_index);
3003 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
3004 return QLA_PARAMETER_ERROR;
3006 vce = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vce_dma);
3007 if (!vce) {
3008 ql_log(ql_log_warn, vha, 0x10c2,
3009 "Failed to allocate VP control IOCB.\n");
3010 return QLA_MEMORY_ALLOC_FAILED;
3012 memset(vce, 0, sizeof(struct vp_ctrl_entry_24xx));
3014 vce->entry_type = VP_CTRL_IOCB_TYPE;
3015 vce->entry_count = 1;
3016 vce->command = cpu_to_le16(cmd);
3017 vce->vp_count = __constant_cpu_to_le16(1);
3019 /* index map in firmware starts with 1; decrement index
3020 * this is ok as we never use index 0
3022 map = (vp_index - 1) / 8;
3023 pos = (vp_index - 1) & 7;
3024 mutex_lock(&ha->vport_lock);
3025 vce->vp_idx_map[map] |= 1 << pos;
3026 mutex_unlock(&ha->vport_lock);
3028 rval = qla2x00_issue_iocb(base_vha, vce, vce_dma, 0);
3029 if (rval != QLA_SUCCESS) {
3030 ql_dbg(ql_dbg_mbx, vha, 0x10c3,
3031 "Failed to issue VP control IOCB (%x).\n", rval);
3032 } else if (vce->entry_status != 0) {
3033 ql_dbg(ql_dbg_mbx, vha, 0x10c4,
3034 "Failed to complete IOCB -- error status (%x).\n",
3035 vce->entry_status);
3036 rval = QLA_FUNCTION_FAILED;
3037 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
3038 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3039 "Failed to complet IOCB -- completion status (%x).\n",
3040 le16_to_cpu(vce->comp_status));
3041 rval = QLA_FUNCTION_FAILED;
3042 } else {
3043 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
3046 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
3048 return rval;
3052 * qla2x00_send_change_request
3053 * Receive or disable RSCN request from fabric controller
3055 * Input:
3056 * ha = adapter block pointer
3057 * format = registration format:
3058 * 0 - Reserved
3059 * 1 - Fabric detected registration
3060 * 2 - N_port detected registration
3061 * 3 - Full registration
3062 * FF - clear registration
3063 * vp_idx = Virtual port index
3065 * Returns:
3066 * qla2x00 local function return status code.
3068 * Context:
3069 * Kernel Context
3073 qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3074 uint16_t vp_idx)
3076 int rval;
3077 mbx_cmd_t mc;
3078 mbx_cmd_t *mcp = &mc;
3080 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
3083 * This command is implicitly executed by firmware during login for the
3084 * physical hosts
3086 if (vp_idx == 0)
3087 return QLA_FUNCTION_FAILED;
3089 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3090 mcp->mb[1] = format;
3091 mcp->mb[9] = vp_idx;
3092 mcp->out_mb = MBX_9|MBX_1|MBX_0;
3093 mcp->in_mb = MBX_0|MBX_1;
3094 mcp->tov = MBX_TOV_SECONDS;
3095 mcp->flags = 0;
3096 rval = qla2x00_mailbox_command(vha, mcp);
3098 if (rval == QLA_SUCCESS) {
3099 if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
3100 rval = BIT_1;
3102 } else
3103 rval = BIT_1;
3105 return rval;
3109 qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3110 uint32_t size)
3112 int rval;
3113 mbx_cmd_t mc;
3114 mbx_cmd_t *mcp = &mc;
3116 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
3118 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3119 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
3120 mcp->mb[8] = MSW(addr);
3121 mcp->out_mb = MBX_8|MBX_0;
3122 } else {
3123 mcp->mb[0] = MBC_DUMP_RISC_RAM;
3124 mcp->out_mb = MBX_0;
3126 mcp->mb[1] = LSW(addr);
3127 mcp->mb[2] = MSW(req_dma);
3128 mcp->mb[3] = LSW(req_dma);
3129 mcp->mb[6] = MSW(MSD(req_dma));
3130 mcp->mb[7] = LSW(MSD(req_dma));
3131 mcp->out_mb |= MBX_7|MBX_6|MBX_3|MBX_2|MBX_1;
3132 if (IS_FWI2_CAPABLE(vha->hw)) {
3133 mcp->mb[4] = MSW(size);
3134 mcp->mb[5] = LSW(size);
3135 mcp->out_mb |= MBX_5|MBX_4;
3136 } else {
3137 mcp->mb[4] = LSW(size);
3138 mcp->out_mb |= MBX_4;
3141 mcp->in_mb = MBX_0;
3142 mcp->tov = MBX_TOV_SECONDS;
3143 mcp->flags = 0;
3144 rval = qla2x00_mailbox_command(vha, mcp);
3146 if (rval != QLA_SUCCESS) {
3147 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3148 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3149 } else {
3150 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
3153 return rval;
3156 /* 84XX Support **************************************************************/
3158 struct cs84xx_mgmt_cmd {
3159 union {
3160 struct verify_chip_entry_84xx req;
3161 struct verify_chip_rsp_84xx rsp;
3162 } p;
3166 qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3168 int rval, retry;
3169 struct cs84xx_mgmt_cmd *mn;
3170 dma_addr_t mn_dma;
3171 uint16_t options;
3172 unsigned long flags;
3173 struct qla_hw_data *ha = vha->hw;
3175 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
3177 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3178 if (mn == NULL) {
3179 return QLA_MEMORY_ALLOC_FAILED;
3182 /* Force Update? */
3183 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
3184 /* Diagnostic firmware? */
3185 /* options |= MENLO_DIAG_FW; */
3186 /* We update the firmware with only one data sequence. */
3187 options |= VCO_END_OF_DATA;
3189 do {
3190 retry = 0;
3191 memset(mn, 0, sizeof(*mn));
3192 mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
3193 mn->p.req.entry_count = 1;
3194 mn->p.req.options = cpu_to_le16(options);
3196 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c,
3197 "Dump of Verify Request.\n");
3198 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e,
3199 (uint8_t *)mn, sizeof(*mn));
3201 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
3202 if (rval != QLA_SUCCESS) {
3203 ql_dbg(ql_dbg_mbx, vha, 0x10cb,
3204 "Failed to issue verify IOCB (%x).\n", rval);
3205 goto verify_done;
3208 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110,
3209 "Dump of Verify Response.\n");
3210 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118,
3211 (uint8_t *)mn, sizeof(*mn));
3213 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3214 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3215 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3216 ql_dbg(ql_dbg_mbx, vha, 0x10ce,
3217 "cs=%x fc=%x.\n", status[0], status[1]);
3219 if (status[0] != CS_COMPLETE) {
3220 rval = QLA_FUNCTION_FAILED;
3221 if (!(options & VCO_DONT_UPDATE_FW)) {
3222 ql_dbg(ql_dbg_mbx, vha, 0x10cf,
3223 "Firmware update failed. Retrying "
3224 "without update firmware.\n");
3225 options |= VCO_DONT_UPDATE_FW;
3226 options &= ~VCO_FORCE_UPDATE;
3227 retry = 1;
3229 } else {
3230 ql_dbg(ql_dbg_mbx, vha, 0x10d0,
3231 "Firmware updated to %x.\n",
3232 le32_to_cpu(mn->p.rsp.fw_ver));
3234 /* NOTE: we only update OP firmware. */
3235 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
3236 ha->cs84xx->op_fw_version =
3237 le32_to_cpu(mn->p.rsp.fw_ver);
3238 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
3239 flags);
3241 } while (retry);
3243 verify_done:
3244 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3246 if (rval != QLA_SUCCESS) {
3247 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
3248 } else {
3249 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
3252 return rval;
3256 qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3258 int rval;
3259 unsigned long flags;
3260 mbx_cmd_t mc;
3261 mbx_cmd_t *mcp = &mc;
3262 struct device_reg_25xxmq __iomem *reg;
3263 struct qla_hw_data *ha = vha->hw;
3265 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
3267 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3268 mcp->mb[1] = req->options;
3269 mcp->mb[2] = MSW(LSD(req->dma));
3270 mcp->mb[3] = LSW(LSD(req->dma));
3271 mcp->mb[6] = MSW(MSD(req->dma));
3272 mcp->mb[7] = LSW(MSD(req->dma));
3273 mcp->mb[5] = req->length;
3274 if (req->rsp)
3275 mcp->mb[10] = req->rsp->id;
3276 mcp->mb[12] = req->qos;
3277 mcp->mb[11] = req->vp_idx;
3278 mcp->mb[13] = req->rid;
3280 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3281 QLA_QUE_PAGE * req->id);
3283 mcp->mb[4] = req->id;
3284 /* que in ptr index */
3285 mcp->mb[8] = 0;
3286 /* que out ptr index */
3287 mcp->mb[9] = 0;
3288 mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
3289 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3290 mcp->in_mb = MBX_0;
3291 mcp->flags = MBX_DMA_OUT;
3292 mcp->tov = 60;
3294 spin_lock_irqsave(&ha->hardware_lock, flags);
3295 if (!(req->options & BIT_0)) {
3296 WRT_REG_DWORD(&reg->req_q_in, 0);
3297 WRT_REG_DWORD(&reg->req_q_out, 0);
3299 req->req_q_in = &reg->req_q_in;
3300 req->req_q_out = &reg->req_q_out;
3301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3303 rval = qla2x00_mailbox_command(vha, mcp);
3304 if (rval != QLA_SUCCESS) {
3305 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3306 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3307 } else {
3308 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
3311 return rval;
3315 qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3317 int rval;
3318 unsigned long flags;
3319 mbx_cmd_t mc;
3320 mbx_cmd_t *mcp = &mc;
3321 struct device_reg_25xxmq __iomem *reg;
3322 struct qla_hw_data *ha = vha->hw;
3324 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
3326 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3327 mcp->mb[1] = rsp->options;
3328 mcp->mb[2] = MSW(LSD(rsp->dma));
3329 mcp->mb[3] = LSW(LSD(rsp->dma));
3330 mcp->mb[6] = MSW(MSD(rsp->dma));
3331 mcp->mb[7] = LSW(MSD(rsp->dma));
3332 mcp->mb[5] = rsp->length;
3333 mcp->mb[14] = rsp->msix->entry;
3334 mcp->mb[13] = rsp->rid;
3336 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3337 QLA_QUE_PAGE * rsp->id);
3339 mcp->mb[4] = rsp->id;
3340 /* que in ptr index */
3341 mcp->mb[8] = 0;
3342 /* que out ptr index */
3343 mcp->mb[9] = 0;
3344 mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
3345 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3346 mcp->in_mb = MBX_0;
3347 mcp->flags = MBX_DMA_OUT;
3348 mcp->tov = 60;
3350 spin_lock_irqsave(&ha->hardware_lock, flags);
3351 if (!(rsp->options & BIT_0)) {
3352 WRT_REG_DWORD(&reg->rsp_q_out, 0);
3353 WRT_REG_DWORD(&reg->rsp_q_in, 0);
3356 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3358 rval = qla2x00_mailbox_command(vha, mcp);
3359 if (rval != QLA_SUCCESS) {
3360 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3361 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3362 } else {
3363 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
3366 return rval;
3370 qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3372 int rval;
3373 mbx_cmd_t mc;
3374 mbx_cmd_t *mcp = &mc;
3376 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
3378 mcp->mb[0] = MBC_IDC_ACK;
3379 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
3380 mcp->out_mb = MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3381 mcp->in_mb = MBX_0;
3382 mcp->tov = MBX_TOV_SECONDS;
3383 mcp->flags = 0;
3384 rval = qla2x00_mailbox_command(vha, mcp);
3386 if (rval != QLA_SUCCESS) {
3387 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3388 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3389 } else {
3390 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
3393 return rval;
3397 qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3399 int rval;
3400 mbx_cmd_t mc;
3401 mbx_cmd_t *mcp = &mc;
3403 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3405 if (!IS_QLA81XX(vha->hw))
3406 return QLA_FUNCTION_FAILED;
3408 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3409 mcp->mb[1] = FAC_OPT_CMD_GET_SECTOR_SIZE;
3410 mcp->out_mb = MBX_1|MBX_0;
3411 mcp->in_mb = MBX_1|MBX_0;
3412 mcp->tov = MBX_TOV_SECONDS;
3413 mcp->flags = 0;
3414 rval = qla2x00_mailbox_command(vha, mcp);
3416 if (rval != QLA_SUCCESS) {
3417 ql_dbg(ql_dbg_mbx, vha, 0x10dd,
3418 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3419 rval, mcp->mb[0], mcp->mb[1]);
3420 } else {
3421 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
3422 *sector_size = mcp->mb[1];
3425 return rval;
3429 qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3431 int rval;
3432 mbx_cmd_t mc;
3433 mbx_cmd_t *mcp = &mc;
3435 if (!IS_QLA81XX(vha->hw))
3436 return QLA_FUNCTION_FAILED;
3438 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
3440 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3441 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
3442 FAC_OPT_CMD_WRITE_PROTECT;
3443 mcp->out_mb = MBX_1|MBX_0;
3444 mcp->in_mb = MBX_1|MBX_0;
3445 mcp->tov = MBX_TOV_SECONDS;
3446 mcp->flags = 0;
3447 rval = qla2x00_mailbox_command(vha, mcp);
3449 if (rval != QLA_SUCCESS) {
3450 ql_dbg(ql_dbg_mbx, vha, 0x10e0,
3451 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3452 rval, mcp->mb[0], mcp->mb[1]);
3453 } else {
3454 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
3457 return rval;
3461 qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3463 int rval;
3464 mbx_cmd_t mc;
3465 mbx_cmd_t *mcp = &mc;
3467 if (!IS_QLA81XX(vha->hw))
3468 return QLA_FUNCTION_FAILED;
3470 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
3472 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3473 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
3474 mcp->mb[2] = LSW(start);
3475 mcp->mb[3] = MSW(start);
3476 mcp->mb[4] = LSW(finish);
3477 mcp->mb[5] = MSW(finish);
3478 mcp->out_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3479 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3480 mcp->tov = MBX_TOV_SECONDS;
3481 mcp->flags = 0;
3482 rval = qla2x00_mailbox_command(vha, mcp);
3484 if (rval != QLA_SUCCESS) {
3485 ql_dbg(ql_dbg_mbx, vha, 0x10e3,
3486 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3487 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3488 } else {
3489 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
3492 return rval;
3496 qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3498 int rval = 0;
3499 mbx_cmd_t mc;
3500 mbx_cmd_t *mcp = &mc;
3502 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
3504 mcp->mb[0] = MBC_RESTART_MPI_FW;
3505 mcp->out_mb = MBX_0;
3506 mcp->in_mb = MBX_0|MBX_1;
3507 mcp->tov = MBX_TOV_SECONDS;
3508 mcp->flags = 0;
3509 rval = qla2x00_mailbox_command(vha, mcp);
3511 if (rval != QLA_SUCCESS) {
3512 ql_dbg(ql_dbg_mbx, vha, 0x10e6,
3513 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3514 rval, mcp->mb[0], mcp->mb[1]);
3515 } else {
3516 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
3519 return rval;
3523 qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3524 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3526 int rval;
3527 mbx_cmd_t mc;
3528 mbx_cmd_t *mcp = &mc;
3529 struct qla_hw_data *ha = vha->hw;
3531 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
3533 if (!IS_FWI2_CAPABLE(ha))
3534 return QLA_FUNCTION_FAILED;
3536 if (len == 1)
3537 opt |= BIT_0;
3539 mcp->mb[0] = MBC_READ_SFP;
3540 mcp->mb[1] = dev;
3541 mcp->mb[2] = MSW(sfp_dma);
3542 mcp->mb[3] = LSW(sfp_dma);
3543 mcp->mb[6] = MSW(MSD(sfp_dma));
3544 mcp->mb[7] = LSW(MSD(sfp_dma));
3545 mcp->mb[8] = len;
3546 mcp->mb[9] = off;
3547 mcp->mb[10] = opt;
3548 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3549 mcp->in_mb = MBX_1|MBX_0;
3550 mcp->tov = MBX_TOV_SECONDS;
3551 mcp->flags = 0;
3552 rval = qla2x00_mailbox_command(vha, mcp);
3554 if (opt & BIT_0)
3555 *sfp = mcp->mb[1];
3557 if (rval != QLA_SUCCESS) {
3558 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3559 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3560 } else {
3561 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
3564 return rval;
3568 qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3569 uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
3571 int rval;
3572 mbx_cmd_t mc;
3573 mbx_cmd_t *mcp = &mc;
3574 struct qla_hw_data *ha = vha->hw;
3576 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
3578 if (!IS_FWI2_CAPABLE(ha))
3579 return QLA_FUNCTION_FAILED;
3581 if (len == 1)
3582 opt |= BIT_0;
3584 if (opt & BIT_0)
3585 len = *sfp;
3587 mcp->mb[0] = MBC_WRITE_SFP;
3588 mcp->mb[1] = dev;
3589 mcp->mb[2] = MSW(sfp_dma);
3590 mcp->mb[3] = LSW(sfp_dma);
3591 mcp->mb[6] = MSW(MSD(sfp_dma));
3592 mcp->mb[7] = LSW(MSD(sfp_dma));
3593 mcp->mb[8] = len;
3594 mcp->mb[9] = off;
3595 mcp->mb[10] = opt;
3596 mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3597 mcp->in_mb = MBX_1|MBX_0;
3598 mcp->tov = MBX_TOV_SECONDS;
3599 mcp->flags = 0;
3600 rval = qla2x00_mailbox_command(vha, mcp);
3602 if (rval != QLA_SUCCESS) {
3603 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3604 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3605 } else {
3606 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
3609 return rval;
3613 qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3614 uint16_t size_in_bytes, uint16_t *actual_size)
3616 int rval;
3617 mbx_cmd_t mc;
3618 mbx_cmd_t *mcp = &mc;
3620 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3622 if (!IS_QLA8XXX_TYPE(vha->hw))
3623 return QLA_FUNCTION_FAILED;
3625 mcp->mb[0] = MBC_GET_XGMAC_STATS;
3626 mcp->mb[2] = MSW(stats_dma);
3627 mcp->mb[3] = LSW(stats_dma);
3628 mcp->mb[6] = MSW(MSD(stats_dma));
3629 mcp->mb[7] = LSW(MSD(stats_dma));
3630 mcp->mb[8] = size_in_bytes >> 2;
3631 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
3632 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3633 mcp->tov = MBX_TOV_SECONDS;
3634 mcp->flags = 0;
3635 rval = qla2x00_mailbox_command(vha, mcp);
3637 if (rval != QLA_SUCCESS) {
3638 ql_dbg(ql_dbg_mbx, vha, 0x10ef,
3639 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3640 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3641 } else {
3642 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
3645 *actual_size = mcp->mb[2] << 2;
3648 return rval;
3652 qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3653 uint16_t size)
3655 int rval;
3656 mbx_cmd_t mc;
3657 mbx_cmd_t *mcp = &mc;
3659 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3661 if (!IS_QLA8XXX_TYPE(vha->hw))
3662 return QLA_FUNCTION_FAILED;
3664 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
3665 mcp->mb[1] = 0;
3666 mcp->mb[2] = MSW(tlv_dma);
3667 mcp->mb[3] = LSW(tlv_dma);
3668 mcp->mb[6] = MSW(MSD(tlv_dma));
3669 mcp->mb[7] = LSW(MSD(tlv_dma));
3670 mcp->mb[8] = size;
3671 mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
3672 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3673 mcp->tov = MBX_TOV_SECONDS;
3674 mcp->flags = 0;
3675 rval = qla2x00_mailbox_command(vha, mcp);
3677 if (rval != QLA_SUCCESS) {
3678 ql_dbg(ql_dbg_mbx, vha, 0x10f2,
3679 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3680 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3681 } else {
3682 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
3685 return rval;
3689 qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3691 int rval;
3692 mbx_cmd_t mc;
3693 mbx_cmd_t *mcp = &mc;
3695 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
3697 if (!IS_FWI2_CAPABLE(vha->hw))
3698 return QLA_FUNCTION_FAILED;
3700 mcp->mb[0] = MBC_READ_RAM_EXTENDED;
3701 mcp->mb[1] = LSW(risc_addr);
3702 mcp->mb[8] = MSW(risc_addr);
3703 mcp->out_mb = MBX_8|MBX_1|MBX_0;
3704 mcp->in_mb = MBX_3|MBX_2|MBX_0;
3705 mcp->tov = 30;
3706 mcp->flags = 0;
3707 rval = qla2x00_mailbox_command(vha, mcp);
3708 if (rval != QLA_SUCCESS) {
3709 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3710 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3711 } else {
3712 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
3713 *data = mcp->mb[3] << 16 | mcp->mb[2];
3716 return rval;
3720 qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3721 uint16_t *mresp)
3723 int rval;
3724 mbx_cmd_t mc;
3725 mbx_cmd_t *mcp = &mc;
3726 uint32_t iter_cnt = 0x1;
3728 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
3730 memset(mcp->mb, 0 , sizeof(mcp->mb));
3731 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
3732 mcp->mb[1] = mreq->options | BIT_6; // BIT_6 specifies 64 bit addressing
3734 /* transfer count */
3735 mcp->mb[10] = LSW(mreq->transfer_size);
3736 mcp->mb[11] = MSW(mreq->transfer_size);
3738 /* send data address */
3739 mcp->mb[14] = LSW(mreq->send_dma);
3740 mcp->mb[15] = MSW(mreq->send_dma);
3741 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3742 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3744 /* receive data address */
3745 mcp->mb[16] = LSW(mreq->rcv_dma);
3746 mcp->mb[17] = MSW(mreq->rcv_dma);
3747 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3748 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3750 /* Iteration count */
3751 mcp->mb[18] = LSW(iter_cnt);
3752 mcp->mb[19] = MSW(iter_cnt);
3754 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3755 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3756 if (IS_QLA8XXX_TYPE(vha->hw))
3757 mcp->out_mb |= MBX_2;
3758 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3760 mcp->buf_size = mreq->transfer_size;
3761 mcp->tov = MBX_TOV_SECONDS;
3762 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3764 rval = qla2x00_mailbox_command(vha, mcp);
3766 if (rval != QLA_SUCCESS) {
3767 ql_dbg(ql_dbg_mbx, vha, 0x10f8,
3768 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[18]=%x "
3769 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3770 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3771 } else {
3772 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
3775 /* Copy mailbox information */
3776 memcpy( mresp, mcp->mb, 64);
3777 return rval;
3781 qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3782 uint16_t *mresp)
3784 int rval;
3785 mbx_cmd_t mc;
3786 mbx_cmd_t *mcp = &mc;
3787 struct qla_hw_data *ha = vha->hw;
3789 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
3791 memset(mcp->mb, 0 , sizeof(mcp->mb));
3792 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3793 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3794 if (IS_QLA8XXX_TYPE(ha)) {
3795 mcp->mb[1] |= BIT_15;
3796 mcp->mb[2] = vha->fcoe_fcf_idx;
3798 mcp->mb[16] = LSW(mreq->rcv_dma);
3799 mcp->mb[17] = MSW(mreq->rcv_dma);
3800 mcp->mb[6] = LSW(MSD(mreq->rcv_dma));
3801 mcp->mb[7] = MSW(MSD(mreq->rcv_dma));
3803 mcp->mb[10] = LSW(mreq->transfer_size);
3805 mcp->mb[14] = LSW(mreq->send_dma);
3806 mcp->mb[15] = MSW(mreq->send_dma);
3807 mcp->mb[20] = LSW(MSD(mreq->send_dma));
3808 mcp->mb[21] = MSW(MSD(mreq->send_dma));
3810 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3811 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3812 if (IS_QLA8XXX_TYPE(ha))
3813 mcp->out_mb |= MBX_2;
3815 mcp->in_mb = MBX_0;
3816 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
3817 mcp->in_mb |= MBX_1;
3818 if (IS_QLA8XXX_TYPE(ha))
3819 mcp->in_mb |= MBX_3;
3821 mcp->tov = MBX_TOV_SECONDS;
3822 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3823 mcp->buf_size = mreq->transfer_size;
3825 rval = qla2x00_mailbox_command(vha, mcp);
3827 if (rval != QLA_SUCCESS) {
3828 ql_dbg(ql_dbg_mbx, vha, 0x10fb,
3829 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3830 rval, mcp->mb[0], mcp->mb[1]);
3831 } else {
3832 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
3835 /* Copy mailbox information */
3836 memcpy(mresp, mcp->mb, 64);
3837 return rval;
3841 qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3843 int rval;
3844 mbx_cmd_t mc;
3845 mbx_cmd_t *mcp = &mc;
3847 ql_dbg(ql_dbg_mbx, vha, 0x10fd,
3848 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3850 mcp->mb[0] = MBC_ISP84XX_RESET;
3851 mcp->mb[1] = enable_diagnostic;
3852 mcp->out_mb = MBX_1|MBX_0;
3853 mcp->in_mb = MBX_1|MBX_0;
3854 mcp->tov = MBX_TOV_SECONDS;
3855 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
3856 rval = qla2x00_mailbox_command(vha, mcp);
3858 if (rval != QLA_SUCCESS)
3859 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3860 else
3861 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
3863 return rval;
3867 qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3869 int rval;
3870 mbx_cmd_t mc;
3871 mbx_cmd_t *mcp = &mc;
3873 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
3875 if (!IS_FWI2_CAPABLE(vha->hw))
3876 return QLA_FUNCTION_FAILED;
3878 mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
3879 mcp->mb[1] = LSW(risc_addr);
3880 mcp->mb[2] = LSW(data);
3881 mcp->mb[3] = MSW(data);
3882 mcp->mb[8] = MSW(risc_addr);
3883 mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
3884 mcp->in_mb = MBX_0;
3885 mcp->tov = 30;
3886 mcp->flags = 0;
3887 rval = qla2x00_mailbox_command(vha, mcp);
3888 if (rval != QLA_SUCCESS) {
3889 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3890 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3891 } else {
3892 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
3895 return rval;
3899 qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3901 int rval;
3902 uint32_t stat, timer;
3903 uint16_t mb0 = 0;
3904 struct qla_hw_data *ha = vha->hw;
3905 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3907 rval = QLA_SUCCESS;
3909 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
3911 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
3913 /* Write the MBC data to the registers */
3914 WRT_REG_WORD(&reg->mailbox0, MBC_WRITE_MPI_REGISTER);
3915 WRT_REG_WORD(&reg->mailbox1, mb[0]);
3916 WRT_REG_WORD(&reg->mailbox2, mb[1]);
3917 WRT_REG_WORD(&reg->mailbox3, mb[2]);
3918 WRT_REG_WORD(&reg->mailbox4, mb[3]);
3920 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
3922 /* Poll for MBC interrupt */
3923 for (timer = 6000000; timer; timer--) {
3924 /* Check for pending interrupts. */
3925 stat = RD_REG_DWORD(&reg->host_status);
3926 if (stat & HSRX_RISC_INT) {
3927 stat &= 0xff;
3929 if (stat == 0x1 || stat == 0x2 ||
3930 stat == 0x10 || stat == 0x11) {
3931 set_bit(MBX_INTERRUPT,
3932 &ha->mbx_cmd_flags);
3933 mb0 = RD_REG_WORD(&reg->mailbox0);
3934 WRT_REG_DWORD(&reg->hccr,
3935 HCCRX_CLR_RISC_INT);
3936 RD_REG_DWORD(&reg->hccr);
3937 break;
3940 udelay(5);
3943 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
3944 rval = mb0 & MBS_MASK;
3945 else
3946 rval = QLA_FUNCTION_FAILED;
3948 if (rval != QLA_SUCCESS) {
3949 ql_dbg(ql_dbg_mbx, vha, 0x1104,
3950 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
3951 } else {
3952 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
3955 return rval;
3958 qla2x00_get_data_rate(scsi_qla_host_t *vha)
3960 int rval;
3961 mbx_cmd_t mc;
3962 mbx_cmd_t *mcp = &mc;
3963 struct qla_hw_data *ha = vha->hw;
3965 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
3967 if (!IS_FWI2_CAPABLE(ha))
3968 return QLA_FUNCTION_FAILED;
3970 mcp->mb[0] = MBC_DATA_RATE;
3971 mcp->mb[1] = 0;
3972 mcp->out_mb = MBX_1|MBX_0;
3973 mcp->in_mb = MBX_2|MBX_1|MBX_0;
3974 mcp->tov = MBX_TOV_SECONDS;
3975 mcp->flags = 0;
3976 rval = qla2x00_mailbox_command(vha, mcp);
3977 if (rval != QLA_SUCCESS) {
3978 ql_dbg(ql_dbg_mbx, vha, 0x1107,
3979 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3980 } else {
3981 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
3982 if (mcp->mb[1] != 0x7)
3983 ha->link_data_rate = mcp->mb[1];
3986 return rval;
3990 qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
3992 int rval;
3993 mbx_cmd_t mc;
3994 mbx_cmd_t *mcp = &mc;
3995 struct qla_hw_data *ha = vha->hw;
3997 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
3999 if (!IS_QLA81XX(ha))
4000 return QLA_FUNCTION_FAILED;
4001 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4002 mcp->out_mb = MBX_0;
4003 mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4004 mcp->tov = MBX_TOV_SECONDS;
4005 mcp->flags = 0;
4007 rval = qla2x00_mailbox_command(vha, mcp);
4009 if (rval != QLA_SUCCESS) {
4010 ql_dbg(ql_dbg_mbx, vha, 0x110a,
4011 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4012 } else {
4013 /* Copy all bits to preserve original value */
4014 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4016 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
4018 return rval;
4022 qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4024 int rval;
4025 mbx_cmd_t mc;
4026 mbx_cmd_t *mcp = &mc;
4028 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
4030 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4031 /* Copy all bits to preserve original setting */
4032 memcpy(&mcp->mb[1], mb, sizeof(uint16_t) * 4);
4033 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4034 mcp->in_mb = MBX_0;
4035 mcp->tov = MBX_TOV_SECONDS;
4036 mcp->flags = 0;
4037 rval = qla2x00_mailbox_command(vha, mcp);
4039 if (rval != QLA_SUCCESS) {
4040 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4041 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4042 } else
4043 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
4045 return rval;
4050 qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4051 uint16_t *mb)
4053 int rval;
4054 mbx_cmd_t mc;
4055 mbx_cmd_t *mcp = &mc;
4056 struct qla_hw_data *ha = vha->hw;
4058 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
4060 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4061 return QLA_FUNCTION_FAILED;
4063 mcp->mb[0] = MBC_PORT_PARAMS;
4064 mcp->mb[1] = loop_id;
4065 if (ha->flags.fcp_prio_enabled)
4066 mcp->mb[2] = BIT_1;
4067 else
4068 mcp->mb[2] = BIT_2;
4069 mcp->mb[4] = priority & 0xf;
4070 mcp->mb[9] = vha->vp_idx;
4071 mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4072 mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0;
4073 mcp->tov = 30;
4074 mcp->flags = 0;
4075 rval = qla2x00_mailbox_command(vha, mcp);
4076 if (mb != NULL) {
4077 mb[0] = mcp->mb[0];
4078 mb[1] = mcp->mb[1];
4079 mb[3] = mcp->mb[3];
4080 mb[4] = mcp->mb[4];
4083 if (rval != QLA_SUCCESS) {
4084 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4085 } else {
4086 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
4089 return rval;
4093 qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4095 int rval;
4096 uint8_t byte;
4097 struct qla_hw_data *ha = vha->hw;
4099 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
4101 /* Integer part */
4102 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
4103 if (rval != QLA_SUCCESS) {
4104 ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
4105 ha->flags.thermal_supported = 0;
4106 goto fail;
4108 *temp = byte;
4110 /* Fraction part */
4111 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1, BIT_13|BIT_0);
4112 if (rval != QLA_SUCCESS) {
4113 ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
4114 ha->flags.thermal_supported = 0;
4115 goto fail;
4117 *frac = (byte >> 6) * 25;
4119 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
4120 fail:
4121 return rval;
4125 qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4127 int rval;
4128 struct qla_hw_data *ha = vha->hw;
4129 mbx_cmd_t mc;
4130 mbx_cmd_t *mcp = &mc;
4132 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
4134 if (!IS_FWI2_CAPABLE(ha))
4135 return QLA_FUNCTION_FAILED;
4137 memset(mcp, 0, sizeof(mbx_cmd_t));
4138 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4139 mcp->mb[1] = 1;
4141 mcp->out_mb = MBX_1|MBX_0;
4142 mcp->in_mb = MBX_0;
4143 mcp->tov = 30;
4144 mcp->flags = 0;
4146 rval = qla2x00_mailbox_command(vha, mcp);
4147 if (rval != QLA_SUCCESS) {
4148 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4149 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4150 } else {
4151 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
4154 return rval;
4158 qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4160 int rval;
4161 struct qla_hw_data *ha = vha->hw;
4162 mbx_cmd_t mc;
4163 mbx_cmd_t *mcp = &mc;
4165 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
4167 if (!IS_QLA82XX(ha))
4168 return QLA_FUNCTION_FAILED;
4170 memset(mcp, 0, sizeof(mbx_cmd_t));
4171 mcp->mb[0] = MBC_TOGGLE_INTERRUPT;
4172 mcp->mb[1] = 0;
4174 mcp->out_mb = MBX_1|MBX_0;
4175 mcp->in_mb = MBX_0;
4176 mcp->tov = 30;
4177 mcp->flags = 0;
4179 rval = qla2x00_mailbox_command(vha, mcp);
4180 if (rval != QLA_SUCCESS) {
4181 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4182 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4183 } else {
4184 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
4187 return rval;